]> git.ipfire.org Git - thirdparty/qemu.git/blob - linux-user/syscall.c
linux-user: make pwrite64/pread64(fd, NULL, 0, offset) return 0
[thirdparty/qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112
113 #include "qemu.h"
114
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
118
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
126 */
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
129 */
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
137 */
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
140
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
157
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164 */
165
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
169 */
170 //#define DEBUG_ERESTARTSYS
171
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
175
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183
184 #define _syscall0(type,name) \
185 static type name (void) \
186 { \
187 return syscall(__NR_##name); \
188 }
189
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
192 { \
193 return syscall(__NR_##name, arg1); \
194 }
195
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
198 { \
199 return syscall(__NR_##name, arg1, arg2); \
200 }
201
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 { \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
206 }
207
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 { \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
212 }
213
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 { \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
219 }
220
221
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
226 { \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
228 }
229
230
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
260 }
261 #endif
262
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
268 */
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
272
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
283 loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287 siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297 const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301 unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304 unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308 void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310 struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
322
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325 unsigned long, idx1, unsigned long, idx2)
326 #endif
327
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
330 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
331 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
332 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
333 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
334 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
335 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
336 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
337 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
338 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
339 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
340 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
344 #endif
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
347 #endif
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
350 #endif
351 #if defined(O_PATH)
352 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
353 #endif
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
356 #endif
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361 { 0, 0, 0, 0 }
362 };
363
364 enum {
365 QEMU_IFLA_BR_UNSPEC,
366 QEMU_IFLA_BR_FORWARD_DELAY,
367 QEMU_IFLA_BR_HELLO_TIME,
368 QEMU_IFLA_BR_MAX_AGE,
369 QEMU_IFLA_BR_AGEING_TIME,
370 QEMU_IFLA_BR_STP_STATE,
371 QEMU_IFLA_BR_PRIORITY,
372 QEMU_IFLA_BR_VLAN_FILTERING,
373 QEMU_IFLA_BR_VLAN_PROTOCOL,
374 QEMU_IFLA_BR_GROUP_FWD_MASK,
375 QEMU_IFLA_BR_ROOT_ID,
376 QEMU_IFLA_BR_BRIDGE_ID,
377 QEMU_IFLA_BR_ROOT_PORT,
378 QEMU_IFLA_BR_ROOT_PATH_COST,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381 QEMU_IFLA_BR_HELLO_TIMER,
382 QEMU_IFLA_BR_TCN_TIMER,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384 QEMU_IFLA_BR_GC_TIMER,
385 QEMU_IFLA_BR_GROUP_ADDR,
386 QEMU_IFLA_BR_FDB_FLUSH,
387 QEMU_IFLA_BR_MCAST_ROUTER,
388 QEMU_IFLA_BR_MCAST_SNOOPING,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390 QEMU_IFLA_BR_MCAST_QUERIER,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392 QEMU_IFLA_BR_MCAST_HASH_MAX,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405 QEMU_IFLA_BR_PAD,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION,
410 QEMU___IFLA_BR_MAX,
411 };
412
413 enum {
414 QEMU_IFLA_UNSPEC,
415 QEMU_IFLA_ADDRESS,
416 QEMU_IFLA_BROADCAST,
417 QEMU_IFLA_IFNAME,
418 QEMU_IFLA_MTU,
419 QEMU_IFLA_LINK,
420 QEMU_IFLA_QDISC,
421 QEMU_IFLA_STATS,
422 QEMU_IFLA_COST,
423 QEMU_IFLA_PRIORITY,
424 QEMU_IFLA_MASTER,
425 QEMU_IFLA_WIRELESS,
426 QEMU_IFLA_PROTINFO,
427 QEMU_IFLA_TXQLEN,
428 QEMU_IFLA_MAP,
429 QEMU_IFLA_WEIGHT,
430 QEMU_IFLA_OPERSTATE,
431 QEMU_IFLA_LINKMODE,
432 QEMU_IFLA_LINKINFO,
433 QEMU_IFLA_NET_NS_PID,
434 QEMU_IFLA_IFALIAS,
435 QEMU_IFLA_NUM_VF,
436 QEMU_IFLA_VFINFO_LIST,
437 QEMU_IFLA_STATS64,
438 QEMU_IFLA_VF_PORTS,
439 QEMU_IFLA_PORT_SELF,
440 QEMU_IFLA_AF_SPEC,
441 QEMU_IFLA_GROUP,
442 QEMU_IFLA_NET_NS_FD,
443 QEMU_IFLA_EXT_MASK,
444 QEMU_IFLA_PROMISCUITY,
445 QEMU_IFLA_NUM_TX_QUEUES,
446 QEMU_IFLA_NUM_RX_QUEUES,
447 QEMU_IFLA_CARRIER,
448 QEMU_IFLA_PHYS_PORT_ID,
449 QEMU_IFLA_CARRIER_CHANGES,
450 QEMU_IFLA_PHYS_SWITCH_ID,
451 QEMU_IFLA_LINK_NETNSID,
452 QEMU_IFLA_PHYS_PORT_NAME,
453 QEMU_IFLA_PROTO_DOWN,
454 QEMU_IFLA_GSO_MAX_SEGS,
455 QEMU_IFLA_GSO_MAX_SIZE,
456 QEMU_IFLA_PAD,
457 QEMU_IFLA_XDP,
458 QEMU_IFLA_EVENT,
459 QEMU_IFLA_NEW_NETNSID,
460 QEMU_IFLA_IF_NETNSID,
461 QEMU_IFLA_CARRIER_UP_COUNT,
462 QEMU_IFLA_CARRIER_DOWN_COUNT,
463 QEMU_IFLA_NEW_IFINDEX,
464 QEMU___IFLA_MAX
465 };
466
467 enum {
468 QEMU_IFLA_BRPORT_UNSPEC,
469 QEMU_IFLA_BRPORT_STATE,
470 QEMU_IFLA_BRPORT_PRIORITY,
471 QEMU_IFLA_BRPORT_COST,
472 QEMU_IFLA_BRPORT_MODE,
473 QEMU_IFLA_BRPORT_GUARD,
474 QEMU_IFLA_BRPORT_PROTECT,
475 QEMU_IFLA_BRPORT_FAST_LEAVE,
476 QEMU_IFLA_BRPORT_LEARNING,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478 QEMU_IFLA_BRPORT_PROXYARP,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481 QEMU_IFLA_BRPORT_ROOT_ID,
482 QEMU_IFLA_BRPORT_BRIDGE_ID,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST,
485 QEMU_IFLA_BRPORT_ID,
486 QEMU_IFLA_BRPORT_NO,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491 QEMU_IFLA_BRPORT_HOLD_TIMER,
492 QEMU_IFLA_BRPORT_FLUSH,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494 QEMU_IFLA_BRPORT_PAD,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501 QEMU___IFLA_BRPORT_MAX
502 };
503
504 enum {
505 QEMU_IFLA_INFO_UNSPEC,
506 QEMU_IFLA_INFO_KIND,
507 QEMU_IFLA_INFO_DATA,
508 QEMU_IFLA_INFO_XSTATS,
509 QEMU_IFLA_INFO_SLAVE_KIND,
510 QEMU_IFLA_INFO_SLAVE_DATA,
511 QEMU___IFLA_INFO_MAX,
512 };
513
514 enum {
515 QEMU_IFLA_INET_UNSPEC,
516 QEMU_IFLA_INET_CONF,
517 QEMU___IFLA_INET_MAX,
518 };
519
520 enum {
521 QEMU_IFLA_INET6_UNSPEC,
522 QEMU_IFLA_INET6_FLAGS,
523 QEMU_IFLA_INET6_CONF,
524 QEMU_IFLA_INET6_STATS,
525 QEMU_IFLA_INET6_MCAST,
526 QEMU_IFLA_INET6_CACHEINFO,
527 QEMU_IFLA_INET6_ICMP6STATS,
528 QEMU_IFLA_INET6_TOKEN,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE,
530 QEMU___IFLA_INET6_MAX
531 };
532
533 enum {
534 QEMU_IFLA_XDP_UNSPEC,
535 QEMU_IFLA_XDP_FD,
536 QEMU_IFLA_XDP_ATTACHED,
537 QEMU_IFLA_XDP_FLAGS,
538 QEMU_IFLA_XDP_PROG_ID,
539 QEMU___IFLA_XDP_MAX,
540 };
541
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545 TargetFdDataFunc host_to_target_data;
546 TargetFdDataFunc target_to_host_data;
547 TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
549
550 static TargetFdTrans **target_fd_trans;
551
552 static unsigned int target_fd_max;
553
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
555 {
556 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557 return target_fd_trans[fd]->target_to_host_data;
558 }
559 return NULL;
560 }
561
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
563 {
564 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565 return target_fd_trans[fd]->host_to_target_data;
566 }
567 return NULL;
568 }
569
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
571 {
572 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573 return target_fd_trans[fd]->target_to_host_addr;
574 }
575 return NULL;
576 }
577
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
579 {
580 unsigned int oldmax;
581
582 if (fd >= target_fd_max) {
583 oldmax = target_fd_max;
584 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans = g_renew(TargetFdTrans *,
586 target_fd_trans, target_fd_max);
587 memset((void *)(target_fd_trans + oldmax), 0,
588 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
589 }
590 target_fd_trans[fd] = trans;
591 }
592
593 static void fd_trans_unregister(int fd)
594 {
595 if (fd >= 0 && fd < target_fd_max) {
596 target_fd_trans[fd] = NULL;
597 }
598 }
599
600 static void fd_trans_dup(int oldfd, int newfd)
601 {
602 fd_trans_unregister(newfd);
603 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604 fd_trans_register(newfd, target_fd_trans[oldfd]);
605 }
606 }
607
608 static int sys_getcwd1(char *buf, size_t size)
609 {
610 if (getcwd(buf, size) == NULL) {
611 /* getcwd() sets errno */
612 return (-1);
613 }
614 return strlen(buf)+1;
615 }
616
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621 const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624 const struct timespec times[2], int flags)
625 {
626 errno = ENOSYS;
627 return -1;
628 }
629 #endif
630 #endif /* TARGET_NR_utimensat */
631
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636 const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639 int newfd, const char *new, int flags)
640 {
641 if (flags == 0) {
642 return renameat(oldfd, old, newfd, new);
643 }
644 errno = ENOSYS;
645 return -1;
646 }
647 #endif
648 #endif /* TARGET_NR_renameat2 */
649
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
652
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
655 {
656 return (inotify_init());
657 }
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
661 {
662 return (inotify_add_watch(fd, pathname, mask));
663 }
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
667 {
668 return (inotify_rm_watch(fd, wd));
669 }
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
674 {
675 return (inotify_init1(flags));
676 }
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
686
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694 uint64_t rlim_cur;
695 uint64_t rlim_max;
696 };
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698 const struct host_rlimit64 *, new_limit,
699 struct host_rlimit64 *, old_limit)
700 #endif
701
702
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
706
707 static inline int next_free_host_timer(void)
708 {
709 int k ;
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712 if (g_posix_timers[k] == 0) {
713 g_posix_timers[k] = (timer_t) 1;
714 return k;
715 }
716 }
717 return -1;
718 }
719 #endif
720
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
724 {
725 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
726 }
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
732 * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
737 {
738 switch (num) {
739 case TARGET_NR_pread64:
740 case TARGET_NR_pwrite64:
741 return 1;
742
743 default:
744 return 0;
745 }
746 }
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
752
753 #define ERRNO_TABLE_SIZE 1200
754
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
758 };
759
760 /*
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
763 */
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765 [EAGAIN] = TARGET_EAGAIN,
766 [EIDRM] = TARGET_EIDRM,
767 [ECHRNG] = TARGET_ECHRNG,
768 [EL2NSYNC] = TARGET_EL2NSYNC,
769 [EL3HLT] = TARGET_EL3HLT,
770 [EL3RST] = TARGET_EL3RST,
771 [ELNRNG] = TARGET_ELNRNG,
772 [EUNATCH] = TARGET_EUNATCH,
773 [ENOCSI] = TARGET_ENOCSI,
774 [EL2HLT] = TARGET_EL2HLT,
775 [EDEADLK] = TARGET_EDEADLK,
776 [ENOLCK] = TARGET_ENOLCK,
777 [EBADE] = TARGET_EBADE,
778 [EBADR] = TARGET_EBADR,
779 [EXFULL] = TARGET_EXFULL,
780 [ENOANO] = TARGET_ENOANO,
781 [EBADRQC] = TARGET_EBADRQC,
782 [EBADSLT] = TARGET_EBADSLT,
783 [EBFONT] = TARGET_EBFONT,
784 [ENOSTR] = TARGET_ENOSTR,
785 [ENODATA] = TARGET_ENODATA,
786 [ETIME] = TARGET_ETIME,
787 [ENOSR] = TARGET_ENOSR,
788 [ENONET] = TARGET_ENONET,
789 [ENOPKG] = TARGET_ENOPKG,
790 [EREMOTE] = TARGET_EREMOTE,
791 [ENOLINK] = TARGET_ENOLINK,
792 [EADV] = TARGET_EADV,
793 [ESRMNT] = TARGET_ESRMNT,
794 [ECOMM] = TARGET_ECOMM,
795 [EPROTO] = TARGET_EPROTO,
796 [EDOTDOT] = TARGET_EDOTDOT,
797 [EMULTIHOP] = TARGET_EMULTIHOP,
798 [EBADMSG] = TARGET_EBADMSG,
799 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
800 [EOVERFLOW] = TARGET_EOVERFLOW,
801 [ENOTUNIQ] = TARGET_ENOTUNIQ,
802 [EBADFD] = TARGET_EBADFD,
803 [EREMCHG] = TARGET_EREMCHG,
804 [ELIBACC] = TARGET_ELIBACC,
805 [ELIBBAD] = TARGET_ELIBBAD,
806 [ELIBSCN] = TARGET_ELIBSCN,
807 [ELIBMAX] = TARGET_ELIBMAX,
808 [ELIBEXEC] = TARGET_ELIBEXEC,
809 [EILSEQ] = TARGET_EILSEQ,
810 [ENOSYS] = TARGET_ENOSYS,
811 [ELOOP] = TARGET_ELOOP,
812 [ERESTART] = TARGET_ERESTART,
813 [ESTRPIPE] = TARGET_ESTRPIPE,
814 [ENOTEMPTY] = TARGET_ENOTEMPTY,
815 [EUSERS] = TARGET_EUSERS,
816 [ENOTSOCK] = TARGET_ENOTSOCK,
817 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
818 [EMSGSIZE] = TARGET_EMSGSIZE,
819 [EPROTOTYPE] = TARGET_EPROTOTYPE,
820 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
821 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
822 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
823 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
824 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
825 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
826 [EADDRINUSE] = TARGET_EADDRINUSE,
827 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
828 [ENETDOWN] = TARGET_ENETDOWN,
829 [ENETUNREACH] = TARGET_ENETUNREACH,
830 [ENETRESET] = TARGET_ENETRESET,
831 [ECONNABORTED] = TARGET_ECONNABORTED,
832 [ECONNRESET] = TARGET_ECONNRESET,
833 [ENOBUFS] = TARGET_ENOBUFS,
834 [EISCONN] = TARGET_EISCONN,
835 [ENOTCONN] = TARGET_ENOTCONN,
836 [EUCLEAN] = TARGET_EUCLEAN,
837 [ENOTNAM] = TARGET_ENOTNAM,
838 [ENAVAIL] = TARGET_ENAVAIL,
839 [EISNAM] = TARGET_EISNAM,
840 [EREMOTEIO] = TARGET_EREMOTEIO,
841 [EDQUOT] = TARGET_EDQUOT,
842 [ESHUTDOWN] = TARGET_ESHUTDOWN,
843 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
844 [ETIMEDOUT] = TARGET_ETIMEDOUT,
845 [ECONNREFUSED] = TARGET_ECONNREFUSED,
846 [EHOSTDOWN] = TARGET_EHOSTDOWN,
847 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
848 [EALREADY] = TARGET_EALREADY,
849 [EINPROGRESS] = TARGET_EINPROGRESS,
850 [ESTALE] = TARGET_ESTALE,
851 [ECANCELED] = TARGET_ECANCELED,
852 [ENOMEDIUM] = TARGET_ENOMEDIUM,
853 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855 [ENOKEY] = TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861 [EKEYREVOKED] = TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864 [EKEYREJECTED] = TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867 [EOWNERDEAD] = TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873 [ENOMSG] = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876 [ERFKILL] = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879 [EHWPOISON] = TARGET_EHWPOISON,
880 #endif
881 };
882
883 static inline int host_to_target_errno(int err)
884 {
885 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886 host_to_target_errno_table[err]) {
887 return host_to_target_errno_table[err];
888 }
889 return err;
890 }
891
892 static inline int target_to_host_errno(int err)
893 {
894 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895 target_to_host_errno_table[err]) {
896 return target_to_host_errno_table[err];
897 }
898 return err;
899 }
900
901 static inline abi_long get_errno(abi_long ret)
902 {
903 if (ret == -1)
904 return -host_to_target_errno(errno);
905 else
906 return ret;
907 }
908
909 const char *target_strerror(int err)
910 {
911 if (err == TARGET_ERESTARTSYS) {
912 return "To be restarted";
913 }
914 if (err == TARGET_QEMU_ESIGRETURN) {
915 return "Successful exit from sigreturn";
916 }
917
918 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919 return NULL;
920 }
921 return strerror(target_to_host_errno(err));
922 }
923
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
926 { \
927 return safe_syscall(__NR_##name); \
928 }
929
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
932 { \
933 return safe_syscall(__NR_##name, arg1); \
934 }
935
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
938 { \
939 return safe_syscall(__NR_##name, arg1, arg2); \
940 }
941
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
944 { \
945 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
946 }
947
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949 type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
951 { \
952 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
953 }
954
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956 type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958 type5 arg5) \
959 { \
960 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
961 }
962
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964 type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966 type5 arg5, type6 arg6) \
967 { \
968 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
969 }
970
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974 int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976 struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978 int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983 struct timespec *, tsp, const sigset_t *, sigmask,
984 size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986 int, maxevents, int, timeout, const sigset_t *, sigmask,
987 size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989 const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997 unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999 unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001 socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010 const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012 int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014 struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017 const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021 int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023 long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025 unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028 * wrappers for the sub-operations to hide this implementation detail.
1029 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030 * for the call parameter because some structs in there conflict with the
1031 * sys/ipc.h ones. So we just define them here, and rely on them being
1032 * the same for all host architectures.
1033 */
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1038
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040 void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1042 {
1043 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1044 }
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1046 {
1047 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1048 }
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050 const struct timespec *timeout)
1051 {
1052 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053 (long)timeout);
1054 }
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058 size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063 * "third argument might be integer or pointer or not present" behaviour of
1064 * the libc function.
1065 */
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069 * use the flock64 struct rather than unsuffixed flock
1070 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1071 */
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1077
1078 static inline int host_to_target_sock_type(int host_type)
1079 {
1080 int target_type;
1081
1082 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083 case SOCK_DGRAM:
1084 target_type = TARGET_SOCK_DGRAM;
1085 break;
1086 case SOCK_STREAM:
1087 target_type = TARGET_SOCK_STREAM;
1088 break;
1089 default:
1090 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091 break;
1092 }
1093
1094 #if defined(SOCK_CLOEXEC)
1095 if (host_type & SOCK_CLOEXEC) {
1096 target_type |= TARGET_SOCK_CLOEXEC;
1097 }
1098 #endif
1099
1100 #if defined(SOCK_NONBLOCK)
1101 if (host_type & SOCK_NONBLOCK) {
1102 target_type |= TARGET_SOCK_NONBLOCK;
1103 }
1104 #endif
1105
1106 return target_type;
1107 }
1108
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1112
1113 void target_set_brk(abi_ulong new_brk)
1114 {
1115 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116 brk_page = HOST_PAGE_ALIGN(target_brk);
1117 }
1118
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1121
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1124 {
1125 abi_long mapped_addr;
1126 abi_ulong new_alloc_size;
1127
1128 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1129
1130 if (!new_brk) {
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132 return target_brk;
1133 }
1134 if (new_brk < target_original_brk) {
1135 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136 target_brk);
1137 return target_brk;
1138 }
1139
1140 /* If the new brk is less than the highest page reserved to the
1141 * target heap allocation, set it and we're almost done... */
1142 if (new_brk <= brk_page) {
1143 /* Heap contents are initialized to zero, as for anonymous
1144 * mapped pages. */
1145 if (new_brk > target_brk) {
1146 memset(g2h(target_brk), 0, new_brk - target_brk);
1147 }
1148 target_brk = new_brk;
1149 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150 return target_brk;
1151 }
1152
1153 /* We need to allocate more memory after the brk... Note that
1154 * we don't use MAP_FIXED because that will map over the top of
1155 * any existing mapping (like the one with the host libc or qemu
1156 * itself); instead we treat "mapped but at wrong address" as
1157 * a failure and unmap again.
1158 */
1159 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161 PROT_READ|PROT_WRITE,
1162 MAP_ANON|MAP_PRIVATE, 0, 0));
1163
1164 if (mapped_addr == brk_page) {
1165 /* Heap contents are initialized to zero, as for anonymous
1166 * mapped pages. Technically the new pages are already
1167 * initialized to zero since they *are* anonymous mapped
1168 * pages, however we have to take care with the contents that
1169 * come from the remaining part of the previous page: it may
1170 * contains garbage data due to a previous heap usage (grown
1171 * then shrunken). */
1172 memset(g2h(target_brk), 0, brk_page - target_brk);
1173
1174 target_brk = new_brk;
1175 brk_page = HOST_PAGE_ALIGN(target_brk);
1176 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177 target_brk);
1178 return target_brk;
1179 } else if (mapped_addr != -1) {
1180 /* Mapped but at wrong address, meaning there wasn't actually
1181 * enough space for this brk.
1182 */
1183 target_munmap(mapped_addr, new_alloc_size);
1184 mapped_addr = -1;
1185 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1186 }
1187 else {
1188 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1189 }
1190
1191 #if defined(TARGET_ALPHA)
1192 /* We (partially) emulate OSF/1 on Alpha, which requires we
1193 return a proper errno, not an unchanged brk value. */
1194 return -TARGET_ENOMEM;
1195 #endif
1196 /* For everything else, return the previous break. */
1197 return target_brk;
1198 }
1199
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201 abi_ulong target_fds_addr,
1202 int n)
1203 {
1204 int i, nw, j, k;
1205 abi_ulong b, *target_fds;
1206
1207 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208 if (!(target_fds = lock_user(VERIFY_READ,
1209 target_fds_addr,
1210 sizeof(abi_ulong) * nw,
1211 1)))
1212 return -TARGET_EFAULT;
1213
1214 FD_ZERO(fds);
1215 k = 0;
1216 for (i = 0; i < nw; i++) {
1217 /* grab the abi_ulong */
1218 __get_user(b, &target_fds[i]);
1219 for (j = 0; j < TARGET_ABI_BITS; j++) {
1220 /* check the bit inside the abi_ulong */
1221 if ((b >> j) & 1)
1222 FD_SET(k, fds);
1223 k++;
1224 }
1225 }
1226
1227 unlock_user(target_fds, target_fds_addr, 0);
1228
1229 return 0;
1230 }
1231
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233 abi_ulong target_fds_addr,
1234 int n)
1235 {
1236 if (target_fds_addr) {
1237 if (copy_from_user_fdset(fds, target_fds_addr, n))
1238 return -TARGET_EFAULT;
1239 *fds_ptr = fds;
1240 } else {
1241 *fds_ptr = NULL;
1242 }
1243 return 0;
1244 }
1245
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247 const fd_set *fds,
1248 int n)
1249 {
1250 int i, nw, j, k;
1251 abi_long v;
1252 abi_ulong *target_fds;
1253
1254 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255 if (!(target_fds = lock_user(VERIFY_WRITE,
1256 target_fds_addr,
1257 sizeof(abi_ulong) * nw,
1258 0)))
1259 return -TARGET_EFAULT;
1260
1261 k = 0;
1262 for (i = 0; i < nw; i++) {
1263 v = 0;
1264 for (j = 0; j < TARGET_ABI_BITS; j++) {
1265 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266 k++;
1267 }
1268 __put_user(v, &target_fds[i]);
1269 }
1270
1271 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1272
1273 return 0;
1274 }
1275
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1281
1282 static inline abi_long host_to_target_clock_t(long ticks)
1283 {
1284 #if HOST_HZ == TARGET_HZ
1285 return ticks;
1286 #else
1287 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1289 }
1290
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292 const struct rusage *rusage)
1293 {
1294 struct target_rusage *target_rusage;
1295
1296 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297 return -TARGET_EFAULT;
1298 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316 unlock_user_struct(target_rusage, target_addr, 1);
1317
1318 return 0;
1319 }
1320
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1322 {
1323 abi_ulong target_rlim_swap;
1324 rlim_t result;
1325
1326 target_rlim_swap = tswapal(target_rlim);
1327 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328 return RLIM_INFINITY;
1329
1330 result = target_rlim_swap;
1331 if (target_rlim_swap != (rlim_t)result)
1332 return RLIM_INFINITY;
1333
1334 return result;
1335 }
1336
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1338 {
1339 abi_ulong target_rlim_swap;
1340 abi_ulong result;
1341
1342 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343 target_rlim_swap = TARGET_RLIM_INFINITY;
1344 else
1345 target_rlim_swap = rlim;
1346 result = tswapal(target_rlim_swap);
1347
1348 return result;
1349 }
1350
1351 static inline int target_to_host_resource(int code)
1352 {
1353 switch (code) {
1354 case TARGET_RLIMIT_AS:
1355 return RLIMIT_AS;
1356 case TARGET_RLIMIT_CORE:
1357 return RLIMIT_CORE;
1358 case TARGET_RLIMIT_CPU:
1359 return RLIMIT_CPU;
1360 case TARGET_RLIMIT_DATA:
1361 return RLIMIT_DATA;
1362 case TARGET_RLIMIT_FSIZE:
1363 return RLIMIT_FSIZE;
1364 case TARGET_RLIMIT_LOCKS:
1365 return RLIMIT_LOCKS;
1366 case TARGET_RLIMIT_MEMLOCK:
1367 return RLIMIT_MEMLOCK;
1368 case TARGET_RLIMIT_MSGQUEUE:
1369 return RLIMIT_MSGQUEUE;
1370 case TARGET_RLIMIT_NICE:
1371 return RLIMIT_NICE;
1372 case TARGET_RLIMIT_NOFILE:
1373 return RLIMIT_NOFILE;
1374 case TARGET_RLIMIT_NPROC:
1375 return RLIMIT_NPROC;
1376 case TARGET_RLIMIT_RSS:
1377 return RLIMIT_RSS;
1378 case TARGET_RLIMIT_RTPRIO:
1379 return RLIMIT_RTPRIO;
1380 case TARGET_RLIMIT_SIGPENDING:
1381 return RLIMIT_SIGPENDING;
1382 case TARGET_RLIMIT_STACK:
1383 return RLIMIT_STACK;
1384 default:
1385 return code;
1386 }
1387 }
1388
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390 abi_ulong target_tv_addr)
1391 {
1392 struct target_timeval *target_tv;
1393
1394 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395 return -TARGET_EFAULT;
1396
1397 __get_user(tv->tv_sec, &target_tv->tv_sec);
1398 __get_user(tv->tv_usec, &target_tv->tv_usec);
1399
1400 unlock_user_struct(target_tv, target_tv_addr, 0);
1401
1402 return 0;
1403 }
1404
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406 const struct timeval *tv)
1407 {
1408 struct target_timeval *target_tv;
1409
1410 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411 return -TARGET_EFAULT;
1412
1413 __put_user(tv->tv_sec, &target_tv->tv_sec);
1414 __put_user(tv->tv_usec, &target_tv->tv_usec);
1415
1416 unlock_user_struct(target_tv, target_tv_addr, 1);
1417
1418 return 0;
1419 }
1420
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422 abi_ulong target_tz_addr)
1423 {
1424 struct target_timezone *target_tz;
1425
1426 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427 return -TARGET_EFAULT;
1428 }
1429
1430 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1432
1433 unlock_user_struct(target_tz, target_tz_addr, 0);
1434
1435 return 0;
1436 }
1437
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1440
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442 abi_ulong target_mq_attr_addr)
1443 {
1444 struct target_mq_attr *target_mq_attr;
1445
1446 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447 target_mq_attr_addr, 1))
1448 return -TARGET_EFAULT;
1449
1450 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1454
1455 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1456
1457 return 0;
1458 }
1459
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461 const struct mq_attr *attr)
1462 {
1463 struct target_mq_attr *target_mq_attr;
1464
1465 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466 target_mq_attr_addr, 0))
1467 return -TARGET_EFAULT;
1468
1469 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1473
1474 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1475
1476 return 0;
1477 }
1478 #endif
1479
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483 abi_ulong rfd_addr, abi_ulong wfd_addr,
1484 abi_ulong efd_addr, abi_ulong target_tv_addr)
1485 {
1486 fd_set rfds, wfds, efds;
1487 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488 struct timeval tv;
1489 struct timespec ts, *ts_ptr;
1490 abi_long ret;
1491
1492 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493 if (ret) {
1494 return ret;
1495 }
1496 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497 if (ret) {
1498 return ret;
1499 }
1500 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501 if (ret) {
1502 return ret;
1503 }
1504
1505 if (target_tv_addr) {
1506 if (copy_from_user_timeval(&tv, target_tv_addr))
1507 return -TARGET_EFAULT;
1508 ts.tv_sec = tv.tv_sec;
1509 ts.tv_nsec = tv.tv_usec * 1000;
1510 ts_ptr = &ts;
1511 } else {
1512 ts_ptr = NULL;
1513 }
1514
1515 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516 ts_ptr, NULL));
1517
1518 if (!is_error(ret)) {
1519 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520 return -TARGET_EFAULT;
1521 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522 return -TARGET_EFAULT;
1523 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524 return -TARGET_EFAULT;
1525
1526 if (target_tv_addr) {
1527 tv.tv_sec = ts.tv_sec;
1528 tv.tv_usec = ts.tv_nsec / 1000;
1529 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530 return -TARGET_EFAULT;
1531 }
1532 }
1533 }
1534
1535 return ret;
1536 }
1537
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1540 {
1541 struct target_sel_arg_struct *sel;
1542 abi_ulong inp, outp, exp, tvp;
1543 long nsel;
1544
1545 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546 return -TARGET_EFAULT;
1547 }
1548
1549 nsel = tswapal(sel->n);
1550 inp = tswapal(sel->inp);
1551 outp = tswapal(sel->outp);
1552 exp = tswapal(sel->exp);
1553 tvp = tswapal(sel->tvp);
1554
1555 unlock_user_struct(sel, arg1, 0);
1556
1557 return do_select(nsel, inp, outp, exp, tvp);
1558 }
1559 #endif
1560 #endif
1561
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1563 {
1564 #ifdef CONFIG_PIPE2
1565 return pipe2(host_pipe, flags);
1566 #else
1567 return -ENOSYS;
1568 #endif
1569 }
1570
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572 int flags, int is_pipe2)
1573 {
1574 int host_pipe[2];
1575 abi_long ret;
1576 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1577
1578 if (is_error(ret))
1579 return get_errno(ret);
1580
1581 /* Several targets have special calling conventions for the original
1582 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1583 if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586 return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589 return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592 return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595 return host_pipe[0];
1596 #endif
1597 }
1598
1599 if (put_user_s32(host_pipe[0], pipedes)
1600 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601 return -TARGET_EFAULT;
1602 return get_errno(ret);
1603 }
1604
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606 abi_ulong target_addr,
1607 socklen_t len)
1608 {
1609 struct target_ip_mreqn *target_smreqn;
1610
1611 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612 if (!target_smreqn)
1613 return -TARGET_EFAULT;
1614 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616 if (len == sizeof(struct target_ip_mreqn))
1617 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618 unlock_user(target_smreqn, target_addr, 0);
1619
1620 return 0;
1621 }
1622
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624 abi_ulong target_addr,
1625 socklen_t len)
1626 {
1627 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628 sa_family_t sa_family;
1629 struct target_sockaddr *target_saddr;
1630
1631 if (fd_trans_target_to_host_addr(fd)) {
1632 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1633 }
1634
1635 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636 if (!target_saddr)
1637 return -TARGET_EFAULT;
1638
1639 sa_family = tswap16(target_saddr->sa_family);
1640
1641 /* Oops. The caller might send a incomplete sun_path; sun_path
1642 * must be terminated by \0 (see the manual page), but
1643 * unfortunately it is quite common to specify sockaddr_un
1644 * length as "strlen(x->sun_path)" while it should be
1645 * "strlen(...) + 1". We'll fix that here if needed.
1646 * Linux kernel has a similar feature.
1647 */
1648
1649 if (sa_family == AF_UNIX) {
1650 if (len < unix_maxlen && len > 0) {
1651 char *cp = (char*)target_saddr;
1652
1653 if ( cp[len-1] && !cp[len] )
1654 len++;
1655 }
1656 if (len > unix_maxlen)
1657 len = unix_maxlen;
1658 }
1659
1660 memcpy(addr, target_saddr, len);
1661 addr->sa_family = sa_family;
1662 if (sa_family == AF_NETLINK) {
1663 struct sockaddr_nl *nladdr;
1664
1665 nladdr = (struct sockaddr_nl *)addr;
1666 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668 } else if (sa_family == AF_PACKET) {
1669 struct target_sockaddr_ll *lladdr;
1670
1671 lladdr = (struct target_sockaddr_ll *)addr;
1672 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1674 }
1675 unlock_user(target_saddr, target_addr, 0);
1676
1677 return 0;
1678 }
1679
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681 struct sockaddr *addr,
1682 socklen_t len)
1683 {
1684 struct target_sockaddr *target_saddr;
1685
1686 if (len == 0) {
1687 return 0;
1688 }
1689 assert(addr);
1690
1691 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692 if (!target_saddr)
1693 return -TARGET_EFAULT;
1694 memcpy(target_saddr, addr, len);
1695 if (len >= offsetof(struct target_sockaddr, sa_family) +
1696 sizeof(target_saddr->sa_family)) {
1697 target_saddr->sa_family = tswap16(addr->sa_family);
1698 }
1699 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703 } else if (addr->sa_family == AF_PACKET) {
1704 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707 } else if (addr->sa_family == AF_INET6 &&
1708 len >= sizeof(struct target_sockaddr_in6)) {
1709 struct target_sockaddr_in6 *target_in6 =
1710 (struct target_sockaddr_in6 *)target_saddr;
1711 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1712 }
1713 unlock_user(target_saddr, target_addr, len);
1714
1715 return 0;
1716 }
1717
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719 struct target_msghdr *target_msgh)
1720 {
1721 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722 abi_long msg_controllen;
1723 abi_ulong target_cmsg_addr;
1724 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725 socklen_t space = 0;
1726
1727 msg_controllen = tswapal(target_msgh->msg_controllen);
1728 if (msg_controllen < sizeof (struct target_cmsghdr))
1729 goto the_end;
1730 target_cmsg_addr = tswapal(target_msgh->msg_control);
1731 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732 target_cmsg_start = target_cmsg;
1733 if (!target_cmsg)
1734 return -TARGET_EFAULT;
1735
1736 while (cmsg && target_cmsg) {
1737 void *data = CMSG_DATA(cmsg);
1738 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1739
1740 int len = tswapal(target_cmsg->cmsg_len)
1741 - sizeof(struct target_cmsghdr);
1742
1743 space += CMSG_SPACE(len);
1744 if (space > msgh->msg_controllen) {
1745 space -= CMSG_SPACE(len);
1746 /* This is a QEMU bug, since we allocated the payload
1747 * area ourselves (unlike overflow in host-to-target
1748 * conversion, which is just the guest giving us a buffer
1749 * that's too small). It can't happen for the payload types
1750 * we currently support; if it becomes an issue in future
1751 * we would need to improve our allocation strategy to
1752 * something more intelligent than "twice the size of the
1753 * target buffer we're reading from".
1754 */
1755 gemu_log("Host cmsg overflow\n");
1756 break;
1757 }
1758
1759 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760 cmsg->cmsg_level = SOL_SOCKET;
1761 } else {
1762 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1763 }
1764 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765 cmsg->cmsg_len = CMSG_LEN(len);
1766
1767 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768 int *fd = (int *)data;
1769 int *target_fd = (int *)target_data;
1770 int i, numfds = len / sizeof(int);
1771
1772 for (i = 0; i < numfds; i++) {
1773 __get_user(fd[i], target_fd + i);
1774 }
1775 } else if (cmsg->cmsg_level == SOL_SOCKET
1776 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1777 struct ucred *cred = (struct ucred *)data;
1778 struct target_ucred *target_cred =
1779 (struct target_ucred *)target_data;
1780
1781 __get_user(cred->pid, &target_cred->pid);
1782 __get_user(cred->uid, &target_cred->uid);
1783 __get_user(cred->gid, &target_cred->gid);
1784 } else {
1785 gemu_log("Unsupported ancillary data: %d/%d\n",
1786 cmsg->cmsg_level, cmsg->cmsg_type);
1787 memcpy(data, target_data, len);
1788 }
1789
1790 cmsg = CMSG_NXTHDR(msgh, cmsg);
1791 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792 target_cmsg_start);
1793 }
1794 unlock_user(target_cmsg, target_cmsg_addr, 0);
1795 the_end:
1796 msgh->msg_controllen = space;
1797 return 0;
1798 }
1799
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801 struct msghdr *msgh)
1802 {
1803 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804 abi_long msg_controllen;
1805 abi_ulong target_cmsg_addr;
1806 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807 socklen_t space = 0;
1808
1809 msg_controllen = tswapal(target_msgh->msg_controllen);
1810 if (msg_controllen < sizeof (struct target_cmsghdr))
1811 goto the_end;
1812 target_cmsg_addr = tswapal(target_msgh->msg_control);
1813 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814 target_cmsg_start = target_cmsg;
1815 if (!target_cmsg)
1816 return -TARGET_EFAULT;
1817
1818 while (cmsg && target_cmsg) {
1819 void *data = CMSG_DATA(cmsg);
1820 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1821
1822 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823 int tgt_len, tgt_space;
1824
1825 /* We never copy a half-header but may copy half-data;
1826 * this is Linux's behaviour in put_cmsg(). Note that
1827 * truncation here is a guest problem (which we report
1828 * to the guest via the CTRUNC bit), unlike truncation
1829 * in target_to_host_cmsg, which is a QEMU bug.
1830 */
1831 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833 break;
1834 }
1835
1836 if (cmsg->cmsg_level == SOL_SOCKET) {
1837 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838 } else {
1839 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1840 }
1841 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1842
1843 /* Payload types which need a different size of payload on
1844 * the target must adjust tgt_len here.
1845 */
1846 tgt_len = len;
1847 switch (cmsg->cmsg_level) {
1848 case SOL_SOCKET:
1849 switch (cmsg->cmsg_type) {
1850 case SO_TIMESTAMP:
1851 tgt_len = sizeof(struct target_timeval);
1852 break;
1853 default:
1854 break;
1855 }
1856 break;
1857 default:
1858 break;
1859 }
1860
1861 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1864 }
1865
1866 /* We must now copy-and-convert len bytes of payload
1867 * into tgt_len bytes of destination space. Bear in mind
1868 * that in both source and destination we may be dealing
1869 * with a truncated value!
1870 */
1871 switch (cmsg->cmsg_level) {
1872 case SOL_SOCKET:
1873 switch (cmsg->cmsg_type) {
1874 case SCM_RIGHTS:
1875 {
1876 int *fd = (int *)data;
1877 int *target_fd = (int *)target_data;
1878 int i, numfds = tgt_len / sizeof(int);
1879
1880 for (i = 0; i < numfds; i++) {
1881 __put_user(fd[i], target_fd + i);
1882 }
1883 break;
1884 }
1885 case SO_TIMESTAMP:
1886 {
1887 struct timeval *tv = (struct timeval *)data;
1888 struct target_timeval *target_tv =
1889 (struct target_timeval *)target_data;
1890
1891 if (len != sizeof(struct timeval) ||
1892 tgt_len != sizeof(struct target_timeval)) {
1893 goto unimplemented;
1894 }
1895
1896 /* copy struct timeval to target */
1897 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899 break;
1900 }
1901 case SCM_CREDENTIALS:
1902 {
1903 struct ucred *cred = (struct ucred *)data;
1904 struct target_ucred *target_cred =
1905 (struct target_ucred *)target_data;
1906
1907 __put_user(cred->pid, &target_cred->pid);
1908 __put_user(cred->uid, &target_cred->uid);
1909 __put_user(cred->gid, &target_cred->gid);
1910 break;
1911 }
1912 default:
1913 goto unimplemented;
1914 }
1915 break;
1916
1917 case SOL_IP:
1918 switch (cmsg->cmsg_type) {
1919 case IP_TTL:
1920 {
1921 uint32_t *v = (uint32_t *)data;
1922 uint32_t *t_int = (uint32_t *)target_data;
1923
1924 if (len != sizeof(uint32_t) ||
1925 tgt_len != sizeof(uint32_t)) {
1926 goto unimplemented;
1927 }
1928 __put_user(*v, t_int);
1929 break;
1930 }
1931 case IP_RECVERR:
1932 {
1933 struct errhdr_t {
1934 struct sock_extended_err ee;
1935 struct sockaddr_in offender;
1936 };
1937 struct errhdr_t *errh = (struct errhdr_t *)data;
1938 struct errhdr_t *target_errh =
1939 (struct errhdr_t *)target_data;
1940
1941 if (len != sizeof(struct errhdr_t) ||
1942 tgt_len != sizeof(struct errhdr_t)) {
1943 goto unimplemented;
1944 }
1945 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1948 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953 (void *) &errh->offender, sizeof(errh->offender));
1954 break;
1955 }
1956 default:
1957 goto unimplemented;
1958 }
1959 break;
1960
1961 case SOL_IPV6:
1962 switch (cmsg->cmsg_type) {
1963 case IPV6_HOPLIMIT:
1964 {
1965 uint32_t *v = (uint32_t *)data;
1966 uint32_t *t_int = (uint32_t *)target_data;
1967
1968 if (len != sizeof(uint32_t) ||
1969 tgt_len != sizeof(uint32_t)) {
1970 goto unimplemented;
1971 }
1972 __put_user(*v, t_int);
1973 break;
1974 }
1975 case IPV6_RECVERR:
1976 {
1977 struct errhdr6_t {
1978 struct sock_extended_err ee;
1979 struct sockaddr_in6 offender;
1980 };
1981 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982 struct errhdr6_t *target_errh =
1983 (struct errhdr6_t *)target_data;
1984
1985 if (len != sizeof(struct errhdr6_t) ||
1986 tgt_len != sizeof(struct errhdr6_t)) {
1987 goto unimplemented;
1988 }
1989 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1992 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997 (void *) &errh->offender, sizeof(errh->offender));
1998 break;
1999 }
2000 default:
2001 goto unimplemented;
2002 }
2003 break;
2004
2005 default:
2006 unimplemented:
2007 gemu_log("Unsupported ancillary data: %d/%d\n",
2008 cmsg->cmsg_level, cmsg->cmsg_type);
2009 memcpy(target_data, data, MIN(len, tgt_len));
2010 if (tgt_len > len) {
2011 memset(target_data + len, 0, tgt_len - len);
2012 }
2013 }
2014
2015 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017 if (msg_controllen < tgt_space) {
2018 tgt_space = msg_controllen;
2019 }
2020 msg_controllen -= tgt_space;
2021 space += tgt_space;
2022 cmsg = CMSG_NXTHDR(msgh, cmsg);
2023 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024 target_cmsg_start);
2025 }
2026 unlock_user(target_cmsg, target_cmsg_addr, space);
2027 the_end:
2028 target_msgh->msg_controllen = tswapal(space);
2029 return 0;
2030 }
2031
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2033 {
2034 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2039 }
2040
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042 size_t len,
2043 abi_long (*host_to_target_nlmsg)
2044 (struct nlmsghdr *))
2045 {
2046 uint32_t nlmsg_len;
2047 abi_long ret;
2048
2049 while (len > sizeof(struct nlmsghdr)) {
2050
2051 nlmsg_len = nlh->nlmsg_len;
2052 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053 nlmsg_len > len) {
2054 break;
2055 }
2056
2057 switch (nlh->nlmsg_type) {
2058 case NLMSG_DONE:
2059 tswap_nlmsghdr(nlh);
2060 return 0;
2061 case NLMSG_NOOP:
2062 break;
2063 case NLMSG_ERROR:
2064 {
2065 struct nlmsgerr *e = NLMSG_DATA(nlh);
2066 e->error = tswap32(e->error);
2067 tswap_nlmsghdr(&e->msg);
2068 tswap_nlmsghdr(nlh);
2069 return 0;
2070 }
2071 default:
2072 ret = host_to_target_nlmsg(nlh);
2073 if (ret < 0) {
2074 tswap_nlmsghdr(nlh);
2075 return ret;
2076 }
2077 break;
2078 }
2079 tswap_nlmsghdr(nlh);
2080 len -= NLMSG_ALIGN(nlmsg_len);
2081 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2082 }
2083 return 0;
2084 }
2085
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087 size_t len,
2088 abi_long (*target_to_host_nlmsg)
2089 (struct nlmsghdr *))
2090 {
2091 int ret;
2092
2093 while (len > sizeof(struct nlmsghdr)) {
2094 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095 tswap32(nlh->nlmsg_len) > len) {
2096 break;
2097 }
2098 tswap_nlmsghdr(nlh);
2099 switch (nlh->nlmsg_type) {
2100 case NLMSG_DONE:
2101 return 0;
2102 case NLMSG_NOOP:
2103 break;
2104 case NLMSG_ERROR:
2105 {
2106 struct nlmsgerr *e = NLMSG_DATA(nlh);
2107 e->error = tswap32(e->error);
2108 tswap_nlmsghdr(&e->msg);
2109 return 0;
2110 }
2111 default:
2112 ret = target_to_host_nlmsg(nlh);
2113 if (ret < 0) {
2114 return ret;
2115 }
2116 }
2117 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2119 }
2120 return 0;
2121 }
2122
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125 size_t len, void *context,
2126 abi_long (*host_to_target_nlattr)
2127 (struct nlattr *,
2128 void *context))
2129 {
2130 unsigned short nla_len;
2131 abi_long ret;
2132
2133 while (len > sizeof(struct nlattr)) {
2134 nla_len = nlattr->nla_len;
2135 if (nla_len < sizeof(struct nlattr) ||
2136 nla_len > len) {
2137 break;
2138 }
2139 ret = host_to_target_nlattr(nlattr, context);
2140 nlattr->nla_len = tswap16(nlattr->nla_len);
2141 nlattr->nla_type = tswap16(nlattr->nla_type);
2142 if (ret < 0) {
2143 return ret;
2144 }
2145 len -= NLA_ALIGN(nla_len);
2146 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2147 }
2148 return 0;
2149 }
2150
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152 size_t len,
2153 abi_long (*host_to_target_rtattr)
2154 (struct rtattr *))
2155 {
2156 unsigned short rta_len;
2157 abi_long ret;
2158
2159 while (len > sizeof(struct rtattr)) {
2160 rta_len = rtattr->rta_len;
2161 if (rta_len < sizeof(struct rtattr) ||
2162 rta_len > len) {
2163 break;
2164 }
2165 ret = host_to_target_rtattr(rtattr);
2166 rtattr->rta_len = tswap16(rtattr->rta_len);
2167 rtattr->rta_type = tswap16(rtattr->rta_type);
2168 if (ret < 0) {
2169 return ret;
2170 }
2171 len -= RTA_ALIGN(rta_len);
2172 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2173 }
2174 return 0;
2175 }
2176
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2178
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180 void *context)
2181 {
2182 uint16_t *u16;
2183 uint32_t *u32;
2184 uint64_t *u64;
2185
2186 switch (nlattr->nla_type) {
2187 /* no data */
2188 case QEMU_IFLA_BR_FDB_FLUSH:
2189 break;
2190 /* binary */
2191 case QEMU_IFLA_BR_GROUP_ADDR:
2192 break;
2193 /* uint8_t */
2194 case QEMU_IFLA_BR_VLAN_FILTERING:
2195 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197 case QEMU_IFLA_BR_MCAST_ROUTER:
2198 case QEMU_IFLA_BR_MCAST_SNOOPING:
2199 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200 case QEMU_IFLA_BR_MCAST_QUERIER:
2201 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204 case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205 case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206 case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207 case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208 break;
2209 /* uint16_t */
2210 case QEMU_IFLA_BR_PRIORITY:
2211 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213 case QEMU_IFLA_BR_ROOT_PORT:
2214 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215 u16 = NLA_DATA(nlattr);
2216 *u16 = tswap16(*u16);
2217 break;
2218 /* uint32_t */
2219 case QEMU_IFLA_BR_FORWARD_DELAY:
2220 case QEMU_IFLA_BR_HELLO_TIME:
2221 case QEMU_IFLA_BR_MAX_AGE:
2222 case QEMU_IFLA_BR_AGEING_TIME:
2223 case QEMU_IFLA_BR_STP_STATE:
2224 case QEMU_IFLA_BR_ROOT_PATH_COST:
2225 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229 u32 = NLA_DATA(nlattr);
2230 *u32 = tswap32(*u32);
2231 break;
2232 /* uint64_t */
2233 case QEMU_IFLA_BR_HELLO_TIMER:
2234 case QEMU_IFLA_BR_TCN_TIMER:
2235 case QEMU_IFLA_BR_GC_TIMER:
2236 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243 u64 = NLA_DATA(nlattr);
2244 *u64 = tswap64(*u64);
2245 break;
2246 /* ifla_bridge_id: uin8_t[] */
2247 case QEMU_IFLA_BR_ROOT_ID:
2248 case QEMU_IFLA_BR_BRIDGE_ID:
2249 break;
2250 default:
2251 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252 break;
2253 }
2254 return 0;
2255 }
2256
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258 void *context)
2259 {
2260 uint16_t *u16;
2261 uint32_t *u32;
2262 uint64_t *u64;
2263
2264 switch (nlattr->nla_type) {
2265 /* uint8_t */
2266 case QEMU_IFLA_BRPORT_STATE:
2267 case QEMU_IFLA_BRPORT_MODE:
2268 case QEMU_IFLA_BRPORT_GUARD:
2269 case QEMU_IFLA_BRPORT_PROTECT:
2270 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271 case QEMU_IFLA_BRPORT_LEARNING:
2272 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273 case QEMU_IFLA_BRPORT_PROXYARP:
2274 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279 case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281 case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282 case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284 break;
2285 /* uint16_t */
2286 case QEMU_IFLA_BRPORT_PRIORITY:
2287 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289 case QEMU_IFLA_BRPORT_ID:
2290 case QEMU_IFLA_BRPORT_NO:
2291 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292 u16 = NLA_DATA(nlattr);
2293 *u16 = tswap16(*u16);
2294 break;
2295 /* uin32_t */
2296 case QEMU_IFLA_BRPORT_COST:
2297 u32 = NLA_DATA(nlattr);
2298 *u32 = tswap32(*u32);
2299 break;
2300 /* uint64_t */
2301 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304 u64 = NLA_DATA(nlattr);
2305 *u64 = tswap64(*u64);
2306 break;
2307 /* ifla_bridge_id: uint8_t[] */
2308 case QEMU_IFLA_BRPORT_ROOT_ID:
2309 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310 break;
2311 default:
2312 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313 break;
2314 }
2315 return 0;
2316 }
2317
2318 struct linkinfo_context {
2319 int len;
2320 char *name;
2321 int slave_len;
2322 char *slave_name;
2323 };
2324
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326 void *context)
2327 {
2328 struct linkinfo_context *li_context = context;
2329
2330 switch (nlattr->nla_type) {
2331 /* string */
2332 case QEMU_IFLA_INFO_KIND:
2333 li_context->name = NLA_DATA(nlattr);
2334 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335 break;
2336 case QEMU_IFLA_INFO_SLAVE_KIND:
2337 li_context->slave_name = NLA_DATA(nlattr);
2338 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339 break;
2340 /* stats */
2341 case QEMU_IFLA_INFO_XSTATS:
2342 /* FIXME: only used by CAN */
2343 break;
2344 /* nested */
2345 case QEMU_IFLA_INFO_DATA:
2346 if (strncmp(li_context->name, "bridge",
2347 li_context->len) == 0) {
2348 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349 nlattr->nla_len,
2350 NULL,
2351 host_to_target_data_bridge_nlattr);
2352 } else {
2353 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2354 }
2355 break;
2356 case QEMU_IFLA_INFO_SLAVE_DATA:
2357 if (strncmp(li_context->slave_name, "bridge",
2358 li_context->slave_len) == 0) {
2359 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360 nlattr->nla_len,
2361 NULL,
2362 host_to_target_slave_data_bridge_nlattr);
2363 } else {
2364 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365 li_context->slave_name);
2366 }
2367 break;
2368 default:
2369 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370 break;
2371 }
2372
2373 return 0;
2374 }
2375
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377 void *context)
2378 {
2379 uint32_t *u32;
2380 int i;
2381
2382 switch (nlattr->nla_type) {
2383 case QEMU_IFLA_INET_CONF:
2384 u32 = NLA_DATA(nlattr);
2385 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386 i++) {
2387 u32[i] = tswap32(u32[i]);
2388 }
2389 break;
2390 default:
2391 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2392 }
2393 return 0;
2394 }
2395
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397 void *context)
2398 {
2399 uint32_t *u32;
2400 uint64_t *u64;
2401 struct ifla_cacheinfo *ci;
2402 int i;
2403
2404 switch (nlattr->nla_type) {
2405 /* binaries */
2406 case QEMU_IFLA_INET6_TOKEN:
2407 break;
2408 /* uint8_t */
2409 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410 break;
2411 /* uint32_t */
2412 case QEMU_IFLA_INET6_FLAGS:
2413 u32 = NLA_DATA(nlattr);
2414 *u32 = tswap32(*u32);
2415 break;
2416 /* uint32_t[] */
2417 case QEMU_IFLA_INET6_CONF:
2418 u32 = NLA_DATA(nlattr);
2419 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420 i++) {
2421 u32[i] = tswap32(u32[i]);
2422 }
2423 break;
2424 /* ifla_cacheinfo */
2425 case QEMU_IFLA_INET6_CACHEINFO:
2426 ci = NLA_DATA(nlattr);
2427 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428 ci->tstamp = tswap32(ci->tstamp);
2429 ci->reachable_time = tswap32(ci->reachable_time);
2430 ci->retrans_time = tswap32(ci->retrans_time);
2431 break;
2432 /* uint64_t[] */
2433 case QEMU_IFLA_INET6_STATS:
2434 case QEMU_IFLA_INET6_ICMP6STATS:
2435 u64 = NLA_DATA(nlattr);
2436 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437 i++) {
2438 u64[i] = tswap64(u64[i]);
2439 }
2440 break;
2441 default:
2442 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2443 }
2444 return 0;
2445 }
2446
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448 void *context)
2449 {
2450 switch (nlattr->nla_type) {
2451 case AF_INET:
2452 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453 NULL,
2454 host_to_target_data_inet_nlattr);
2455 case AF_INET6:
2456 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457 NULL,
2458 host_to_target_data_inet6_nlattr);
2459 default:
2460 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461 break;
2462 }
2463 return 0;
2464 }
2465
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467 void *context)
2468 {
2469 uint32_t *u32;
2470
2471 switch (nlattr->nla_type) {
2472 /* uint8_t */
2473 case QEMU_IFLA_XDP_ATTACHED:
2474 break;
2475 /* uint32_t */
2476 case QEMU_IFLA_XDP_PROG_ID:
2477 u32 = NLA_DATA(nlattr);
2478 *u32 = tswap32(*u32);
2479 break;
2480 default:
2481 gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482 break;
2483 }
2484 return 0;
2485 }
2486
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2488 {
2489 uint32_t *u32;
2490 struct rtnl_link_stats *st;
2491 struct rtnl_link_stats64 *st64;
2492 struct rtnl_link_ifmap *map;
2493 struct linkinfo_context li_context;
2494
2495 switch (rtattr->rta_type) {
2496 /* binary stream */
2497 case QEMU_IFLA_ADDRESS:
2498 case QEMU_IFLA_BROADCAST:
2499 /* string */
2500 case QEMU_IFLA_IFNAME:
2501 case QEMU_IFLA_QDISC:
2502 break;
2503 /* uin8_t */
2504 case QEMU_IFLA_OPERSTATE:
2505 case QEMU_IFLA_LINKMODE:
2506 case QEMU_IFLA_CARRIER:
2507 case QEMU_IFLA_PROTO_DOWN:
2508 break;
2509 /* uint32_t */
2510 case QEMU_IFLA_MTU:
2511 case QEMU_IFLA_LINK:
2512 case QEMU_IFLA_WEIGHT:
2513 case QEMU_IFLA_TXQLEN:
2514 case QEMU_IFLA_CARRIER_CHANGES:
2515 case QEMU_IFLA_NUM_RX_QUEUES:
2516 case QEMU_IFLA_NUM_TX_QUEUES:
2517 case QEMU_IFLA_PROMISCUITY:
2518 case QEMU_IFLA_EXT_MASK:
2519 case QEMU_IFLA_LINK_NETNSID:
2520 case QEMU_IFLA_GROUP:
2521 case QEMU_IFLA_MASTER:
2522 case QEMU_IFLA_NUM_VF:
2523 case QEMU_IFLA_GSO_MAX_SEGS:
2524 case QEMU_IFLA_GSO_MAX_SIZE:
2525 case QEMU_IFLA_CARRIER_UP_COUNT:
2526 case QEMU_IFLA_CARRIER_DOWN_COUNT:
2527 u32 = RTA_DATA(rtattr);
2528 *u32 = tswap32(*u32);
2529 break;
2530 /* struct rtnl_link_stats */
2531 case QEMU_IFLA_STATS:
2532 st = RTA_DATA(rtattr);
2533 st->rx_packets = tswap32(st->rx_packets);
2534 st->tx_packets = tswap32(st->tx_packets);
2535 st->rx_bytes = tswap32(st->rx_bytes);
2536 st->tx_bytes = tswap32(st->tx_bytes);
2537 st->rx_errors = tswap32(st->rx_errors);
2538 st->tx_errors = tswap32(st->tx_errors);
2539 st->rx_dropped = tswap32(st->rx_dropped);
2540 st->tx_dropped = tswap32(st->tx_dropped);
2541 st->multicast = tswap32(st->multicast);
2542 st->collisions = tswap32(st->collisions);
2543
2544 /* detailed rx_errors: */
2545 st->rx_length_errors = tswap32(st->rx_length_errors);
2546 st->rx_over_errors = tswap32(st->rx_over_errors);
2547 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2548 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2549 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2550 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2551
2552 /* detailed tx_errors */
2553 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2554 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2555 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2556 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2557 st->tx_window_errors = tswap32(st->tx_window_errors);
2558
2559 /* for cslip etc */
2560 st->rx_compressed = tswap32(st->rx_compressed);
2561 st->tx_compressed = tswap32(st->tx_compressed);
2562 break;
2563 /* struct rtnl_link_stats64 */
2564 case QEMU_IFLA_STATS64:
2565 st64 = RTA_DATA(rtattr);
2566 st64->rx_packets = tswap64(st64->rx_packets);
2567 st64->tx_packets = tswap64(st64->tx_packets);
2568 st64->rx_bytes = tswap64(st64->rx_bytes);
2569 st64->tx_bytes = tswap64(st64->tx_bytes);
2570 st64->rx_errors = tswap64(st64->rx_errors);
2571 st64->tx_errors = tswap64(st64->tx_errors);
2572 st64->rx_dropped = tswap64(st64->rx_dropped);
2573 st64->tx_dropped = tswap64(st64->tx_dropped);
2574 st64->multicast = tswap64(st64->multicast);
2575 st64->collisions = tswap64(st64->collisions);
2576
2577 /* detailed rx_errors: */
2578 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2579 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2580 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2581 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2582 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2583 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2584
2585 /* detailed tx_errors */
2586 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2587 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2588 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2589 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2590 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2591
2592 /* for cslip etc */
2593 st64->rx_compressed = tswap64(st64->rx_compressed);
2594 st64->tx_compressed = tswap64(st64->tx_compressed);
2595 break;
2596 /* struct rtnl_link_ifmap */
2597 case QEMU_IFLA_MAP:
2598 map = RTA_DATA(rtattr);
2599 map->mem_start = tswap64(map->mem_start);
2600 map->mem_end = tswap64(map->mem_end);
2601 map->base_addr = tswap64(map->base_addr);
2602 map->irq = tswap16(map->irq);
2603 break;
2604 /* nested */
2605 case QEMU_IFLA_LINKINFO:
2606 memset(&li_context, 0, sizeof(li_context));
2607 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2608 &li_context,
2609 host_to_target_data_linkinfo_nlattr);
2610 case QEMU_IFLA_AF_SPEC:
2611 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612 NULL,
2613 host_to_target_data_spec_nlattr);
2614 case QEMU_IFLA_XDP:
2615 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616 NULL,
2617 host_to_target_data_xdp_nlattr);
2618 default:
2619 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2620 break;
2621 }
2622 return 0;
2623 }
2624
2625 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2626 {
2627 uint32_t *u32;
2628 struct ifa_cacheinfo *ci;
2629
2630 switch (rtattr->rta_type) {
2631 /* binary: depends on family type */
2632 case IFA_ADDRESS:
2633 case IFA_LOCAL:
2634 break;
2635 /* string */
2636 case IFA_LABEL:
2637 break;
2638 /* u32 */
2639 case IFA_FLAGS:
2640 case IFA_BROADCAST:
2641 u32 = RTA_DATA(rtattr);
2642 *u32 = tswap32(*u32);
2643 break;
2644 /* struct ifa_cacheinfo */
2645 case IFA_CACHEINFO:
2646 ci = RTA_DATA(rtattr);
2647 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2648 ci->ifa_valid = tswap32(ci->ifa_valid);
2649 ci->cstamp = tswap32(ci->cstamp);
2650 ci->tstamp = tswap32(ci->tstamp);
2651 break;
2652 default:
2653 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2654 break;
2655 }
2656 return 0;
2657 }
2658
2659 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2660 {
2661 uint32_t *u32;
2662 switch (rtattr->rta_type) {
2663 /* binary: depends on family type */
2664 case RTA_GATEWAY:
2665 case RTA_DST:
2666 case RTA_PREFSRC:
2667 break;
2668 /* u32 */
2669 case RTA_PRIORITY:
2670 case RTA_TABLE:
2671 case RTA_OIF:
2672 u32 = RTA_DATA(rtattr);
2673 *u32 = tswap32(*u32);
2674 break;
2675 default:
2676 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2677 break;
2678 }
2679 return 0;
2680 }
2681
2682 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2683 uint32_t rtattr_len)
2684 {
2685 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2686 host_to_target_data_link_rtattr);
2687 }
2688
2689 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2690 uint32_t rtattr_len)
2691 {
2692 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2693 host_to_target_data_addr_rtattr);
2694 }
2695
2696 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2697 uint32_t rtattr_len)
2698 {
2699 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2700 host_to_target_data_route_rtattr);
2701 }
2702
2703 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2704 {
2705 uint32_t nlmsg_len;
2706 struct ifinfomsg *ifi;
2707 struct ifaddrmsg *ifa;
2708 struct rtmsg *rtm;
2709
2710 nlmsg_len = nlh->nlmsg_len;
2711 switch (nlh->nlmsg_type) {
2712 case RTM_NEWLINK:
2713 case RTM_DELLINK:
2714 case RTM_GETLINK:
2715 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2716 ifi = NLMSG_DATA(nlh);
2717 ifi->ifi_type = tswap16(ifi->ifi_type);
2718 ifi->ifi_index = tswap32(ifi->ifi_index);
2719 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2720 ifi->ifi_change = tswap32(ifi->ifi_change);
2721 host_to_target_link_rtattr(IFLA_RTA(ifi),
2722 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2723 }
2724 break;
2725 case RTM_NEWADDR:
2726 case RTM_DELADDR:
2727 case RTM_GETADDR:
2728 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2729 ifa = NLMSG_DATA(nlh);
2730 ifa->ifa_index = tswap32(ifa->ifa_index);
2731 host_to_target_addr_rtattr(IFA_RTA(ifa),
2732 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2733 }
2734 break;
2735 case RTM_NEWROUTE:
2736 case RTM_DELROUTE:
2737 case RTM_GETROUTE:
2738 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2739 rtm = NLMSG_DATA(nlh);
2740 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2741 host_to_target_route_rtattr(RTM_RTA(rtm),
2742 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2743 }
2744 break;
2745 default:
2746 return -TARGET_EINVAL;
2747 }
2748 return 0;
2749 }
2750
2751 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2752 size_t len)
2753 {
2754 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2755 }
2756
2757 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2758 size_t len,
2759 abi_long (*target_to_host_rtattr)
2760 (struct rtattr *))
2761 {
2762 abi_long ret;
2763
2764 while (len >= sizeof(struct rtattr)) {
2765 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2766 tswap16(rtattr->rta_len) > len) {
2767 break;
2768 }
2769 rtattr->rta_len = tswap16(rtattr->rta_len);
2770 rtattr->rta_type = tswap16(rtattr->rta_type);
2771 ret = target_to_host_rtattr(rtattr);
2772 if (ret < 0) {
2773 return ret;
2774 }
2775 len -= RTA_ALIGN(rtattr->rta_len);
2776 rtattr = (struct rtattr *)(((char *)rtattr) +
2777 RTA_ALIGN(rtattr->rta_len));
2778 }
2779 return 0;
2780 }
2781
2782 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2783 {
2784 switch (rtattr->rta_type) {
2785 default:
2786 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2787 break;
2788 }
2789 return 0;
2790 }
2791
2792 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2793 {
2794 switch (rtattr->rta_type) {
2795 /* binary: depends on family type */
2796 case IFA_LOCAL:
2797 case IFA_ADDRESS:
2798 break;
2799 default:
2800 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2801 break;
2802 }
2803 return 0;
2804 }
2805
2806 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2807 {
2808 uint32_t *u32;
2809 switch (rtattr->rta_type) {
2810 /* binary: depends on family type */
2811 case RTA_DST:
2812 case RTA_SRC:
2813 case RTA_GATEWAY:
2814 break;
2815 /* u32 */
2816 case RTA_PRIORITY:
2817 case RTA_OIF:
2818 u32 = RTA_DATA(rtattr);
2819 *u32 = tswap32(*u32);
2820 break;
2821 default:
2822 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2823 break;
2824 }
2825 return 0;
2826 }
2827
2828 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2829 uint32_t rtattr_len)
2830 {
2831 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2832 target_to_host_data_link_rtattr);
2833 }
2834
2835 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2836 uint32_t rtattr_len)
2837 {
2838 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2839 target_to_host_data_addr_rtattr);
2840 }
2841
2842 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2843 uint32_t rtattr_len)
2844 {
2845 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2846 target_to_host_data_route_rtattr);
2847 }
2848
2849 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2850 {
2851 struct ifinfomsg *ifi;
2852 struct ifaddrmsg *ifa;
2853 struct rtmsg *rtm;
2854
2855 switch (nlh->nlmsg_type) {
2856 case RTM_GETLINK:
2857 break;
2858 case RTM_NEWLINK:
2859 case RTM_DELLINK:
2860 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2861 ifi = NLMSG_DATA(nlh);
2862 ifi->ifi_type = tswap16(ifi->ifi_type);
2863 ifi->ifi_index = tswap32(ifi->ifi_index);
2864 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2865 ifi->ifi_change = tswap32(ifi->ifi_change);
2866 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2867 NLMSG_LENGTH(sizeof(*ifi)));
2868 }
2869 break;
2870 case RTM_GETADDR:
2871 case RTM_NEWADDR:
2872 case RTM_DELADDR:
2873 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2874 ifa = NLMSG_DATA(nlh);
2875 ifa->ifa_index = tswap32(ifa->ifa_index);
2876 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2877 NLMSG_LENGTH(sizeof(*ifa)));
2878 }
2879 break;
2880 case RTM_GETROUTE:
2881 break;
2882 case RTM_NEWROUTE:
2883 case RTM_DELROUTE:
2884 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2885 rtm = NLMSG_DATA(nlh);
2886 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2887 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2888 NLMSG_LENGTH(sizeof(*rtm)));
2889 }
2890 break;
2891 default:
2892 return -TARGET_EOPNOTSUPP;
2893 }
2894 return 0;
2895 }
2896
2897 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2898 {
2899 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2900 }
2901 #endif /* CONFIG_RTNETLINK */
2902
2903 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2904 {
2905 switch (nlh->nlmsg_type) {
2906 default:
2907 gemu_log("Unknown host audit message type %d\n",
2908 nlh->nlmsg_type);
2909 return -TARGET_EINVAL;
2910 }
2911 return 0;
2912 }
2913
2914 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2915 size_t len)
2916 {
2917 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2918 }
2919
2920 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2921 {
2922 switch (nlh->nlmsg_type) {
2923 case AUDIT_USER:
2924 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2925 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2926 break;
2927 default:
2928 gemu_log("Unknown target audit message type %d\n",
2929 nlh->nlmsg_type);
2930 return -TARGET_EINVAL;
2931 }
2932
2933 return 0;
2934 }
2935
2936 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2937 {
2938 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2939 }
2940
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long do_setsockopt(int sockfd, int level, int optname,
2943 abi_ulong optval_addr, socklen_t optlen)
2944 {
2945 abi_long ret;
2946 int val;
2947 struct ip_mreqn *ip_mreq;
2948 struct ip_mreq_source *ip_mreq_source;
2949
2950 switch(level) {
2951 case SOL_TCP:
2952 /* TCP options all take an 'int' value. */
2953 if (optlen < sizeof(uint32_t))
2954 return -TARGET_EINVAL;
2955
2956 if (get_user_u32(val, optval_addr))
2957 return -TARGET_EFAULT;
2958 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2959 break;
2960 case SOL_IP:
2961 switch(optname) {
2962 case IP_TOS:
2963 case IP_TTL:
2964 case IP_HDRINCL:
2965 case IP_ROUTER_ALERT:
2966 case IP_RECVOPTS:
2967 case IP_RETOPTS:
2968 case IP_PKTINFO:
2969 case IP_MTU_DISCOVER:
2970 case IP_RECVERR:
2971 case IP_RECVTTL:
2972 case IP_RECVTOS:
2973 #ifdef IP_FREEBIND
2974 case IP_FREEBIND:
2975 #endif
2976 case IP_MULTICAST_TTL:
2977 case IP_MULTICAST_LOOP:
2978 val = 0;
2979 if (optlen >= sizeof(uint32_t)) {
2980 if (get_user_u32(val, optval_addr))
2981 return -TARGET_EFAULT;
2982 } else if (optlen >= 1) {
2983 if (get_user_u8(val, optval_addr))
2984 return -TARGET_EFAULT;
2985 }
2986 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2987 break;
2988 case IP_ADD_MEMBERSHIP:
2989 case IP_DROP_MEMBERSHIP:
2990 if (optlen < sizeof (struct target_ip_mreq) ||
2991 optlen > sizeof (struct target_ip_mreqn))
2992 return -TARGET_EINVAL;
2993
2994 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2995 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2996 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2997 break;
2998
2999 case IP_BLOCK_SOURCE:
3000 case IP_UNBLOCK_SOURCE:
3001 case IP_ADD_SOURCE_MEMBERSHIP:
3002 case IP_DROP_SOURCE_MEMBERSHIP:
3003 if (optlen != sizeof (struct target_ip_mreq_source))
3004 return -TARGET_EINVAL;
3005
3006 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3007 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3008 unlock_user (ip_mreq_source, optval_addr, 0);
3009 break;
3010
3011 default:
3012 goto unimplemented;
3013 }
3014 break;
3015 case SOL_IPV6:
3016 switch (optname) {
3017 case IPV6_MTU_DISCOVER:
3018 case IPV6_MTU:
3019 case IPV6_V6ONLY:
3020 case IPV6_RECVPKTINFO:
3021 case IPV6_UNICAST_HOPS:
3022 case IPV6_MULTICAST_HOPS:
3023 case IPV6_MULTICAST_LOOP:
3024 case IPV6_RECVERR:
3025 case IPV6_RECVHOPLIMIT:
3026 case IPV6_2292HOPLIMIT:
3027 case IPV6_CHECKSUM:
3028 val = 0;
3029 if (optlen < sizeof(uint32_t)) {
3030 return -TARGET_EINVAL;
3031 }
3032 if (get_user_u32(val, optval_addr)) {
3033 return -TARGET_EFAULT;
3034 }
3035 ret = get_errno(setsockopt(sockfd, level, optname,
3036 &val, sizeof(val)));
3037 break;
3038 case IPV6_PKTINFO:
3039 {
3040 struct in6_pktinfo pki;
3041
3042 if (optlen < sizeof(pki)) {
3043 return -TARGET_EINVAL;
3044 }
3045
3046 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3047 return -TARGET_EFAULT;
3048 }
3049
3050 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3051
3052 ret = get_errno(setsockopt(sockfd, level, optname,
3053 &pki, sizeof(pki)));
3054 break;
3055 }
3056 default:
3057 goto unimplemented;
3058 }
3059 break;
3060 case SOL_ICMPV6:
3061 switch (optname) {
3062 case ICMPV6_FILTER:
3063 {
3064 struct icmp6_filter icmp6f;
3065
3066 if (optlen > sizeof(icmp6f)) {
3067 optlen = sizeof(icmp6f);
3068 }
3069
3070 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3071 return -TARGET_EFAULT;
3072 }
3073
3074 for (val = 0; val < 8; val++) {
3075 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3076 }
3077
3078 ret = get_errno(setsockopt(sockfd, level, optname,
3079 &icmp6f, optlen));
3080 break;
3081 }
3082 default:
3083 goto unimplemented;
3084 }
3085 break;
3086 case SOL_RAW:
3087 switch (optname) {
3088 case ICMP_FILTER:
3089 case IPV6_CHECKSUM:
3090 /* those take an u32 value */
3091 if (optlen < sizeof(uint32_t)) {
3092 return -TARGET_EINVAL;
3093 }
3094
3095 if (get_user_u32(val, optval_addr)) {
3096 return -TARGET_EFAULT;
3097 }
3098 ret = get_errno(setsockopt(sockfd, level, optname,
3099 &val, sizeof(val)));
3100 break;
3101
3102 default:
3103 goto unimplemented;
3104 }
3105 break;
3106 case TARGET_SOL_SOCKET:
3107 switch (optname) {
3108 case TARGET_SO_RCVTIMEO:
3109 {
3110 struct timeval tv;
3111
3112 optname = SO_RCVTIMEO;
3113
3114 set_timeout:
3115 if (optlen != sizeof(struct target_timeval)) {
3116 return -TARGET_EINVAL;
3117 }
3118
3119 if (copy_from_user_timeval(&tv, optval_addr)) {
3120 return -TARGET_EFAULT;
3121 }
3122
3123 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3124 &tv, sizeof(tv)));
3125 return ret;
3126 }
3127 case TARGET_SO_SNDTIMEO:
3128 optname = SO_SNDTIMEO;
3129 goto set_timeout;
3130 case TARGET_SO_ATTACH_FILTER:
3131 {
3132 struct target_sock_fprog *tfprog;
3133 struct target_sock_filter *tfilter;
3134 struct sock_fprog fprog;
3135 struct sock_filter *filter;
3136 int i;
3137
3138 if (optlen != sizeof(*tfprog)) {
3139 return -TARGET_EINVAL;
3140 }
3141 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3142 return -TARGET_EFAULT;
3143 }
3144 if (!lock_user_struct(VERIFY_READ, tfilter,
3145 tswapal(tfprog->filter), 0)) {
3146 unlock_user_struct(tfprog, optval_addr, 1);
3147 return -TARGET_EFAULT;
3148 }
3149
3150 fprog.len = tswap16(tfprog->len);
3151 filter = g_try_new(struct sock_filter, fprog.len);
3152 if (filter == NULL) {
3153 unlock_user_struct(tfilter, tfprog->filter, 1);
3154 unlock_user_struct(tfprog, optval_addr, 1);
3155 return -TARGET_ENOMEM;
3156 }
3157 for (i = 0; i < fprog.len; i++) {
3158 filter[i].code = tswap16(tfilter[i].code);
3159 filter[i].jt = tfilter[i].jt;
3160 filter[i].jf = tfilter[i].jf;
3161 filter[i].k = tswap32(tfilter[i].k);
3162 }
3163 fprog.filter = filter;
3164
3165 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3166 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3167 g_free(filter);
3168
3169 unlock_user_struct(tfilter, tfprog->filter, 1);
3170 unlock_user_struct(tfprog, optval_addr, 1);
3171 return ret;
3172 }
3173 case TARGET_SO_BINDTODEVICE:
3174 {
3175 char *dev_ifname, *addr_ifname;
3176
3177 if (optlen > IFNAMSIZ - 1) {
3178 optlen = IFNAMSIZ - 1;
3179 }
3180 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3181 if (!dev_ifname) {
3182 return -TARGET_EFAULT;
3183 }
3184 optname = SO_BINDTODEVICE;
3185 addr_ifname = alloca(IFNAMSIZ);
3186 memcpy(addr_ifname, dev_ifname, optlen);
3187 addr_ifname[optlen] = 0;
3188 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3189 addr_ifname, optlen));
3190 unlock_user (dev_ifname, optval_addr, 0);
3191 return ret;
3192 }
3193 /* Options with 'int' argument. */
3194 case TARGET_SO_DEBUG:
3195 optname = SO_DEBUG;
3196 break;
3197 case TARGET_SO_REUSEADDR:
3198 optname = SO_REUSEADDR;
3199 break;
3200 case TARGET_SO_TYPE:
3201 optname = SO_TYPE;
3202 break;
3203 case TARGET_SO_ERROR:
3204 optname = SO_ERROR;
3205 break;
3206 case TARGET_SO_DONTROUTE:
3207 optname = SO_DONTROUTE;
3208 break;
3209 case TARGET_SO_BROADCAST:
3210 optname = SO_BROADCAST;
3211 break;
3212 case TARGET_SO_SNDBUF:
3213 optname = SO_SNDBUF;
3214 break;
3215 case TARGET_SO_SNDBUFFORCE:
3216 optname = SO_SNDBUFFORCE;
3217 break;
3218 case TARGET_SO_RCVBUF:
3219 optname = SO_RCVBUF;
3220 break;
3221 case TARGET_SO_RCVBUFFORCE:
3222 optname = SO_RCVBUFFORCE;
3223 break;
3224 case TARGET_SO_KEEPALIVE:
3225 optname = SO_KEEPALIVE;
3226 break;
3227 case TARGET_SO_OOBINLINE:
3228 optname = SO_OOBINLINE;
3229 break;
3230 case TARGET_SO_NO_CHECK:
3231 optname = SO_NO_CHECK;
3232 break;
3233 case TARGET_SO_PRIORITY:
3234 optname = SO_PRIORITY;
3235 break;
3236 #ifdef SO_BSDCOMPAT
3237 case TARGET_SO_BSDCOMPAT:
3238 optname = SO_BSDCOMPAT;
3239 break;
3240 #endif
3241 case TARGET_SO_PASSCRED:
3242 optname = SO_PASSCRED;
3243 break;
3244 case TARGET_SO_PASSSEC:
3245 optname = SO_PASSSEC;
3246 break;
3247 case TARGET_SO_TIMESTAMP:
3248 optname = SO_TIMESTAMP;
3249 break;
3250 case TARGET_SO_RCVLOWAT:
3251 optname = SO_RCVLOWAT;
3252 break;
3253 default:
3254 goto unimplemented;
3255 }
3256 if (optlen < sizeof(uint32_t))
3257 return -TARGET_EINVAL;
3258
3259 if (get_user_u32(val, optval_addr))
3260 return -TARGET_EFAULT;
3261 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3262 break;
3263 default:
3264 unimplemented:
3265 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3266 ret = -TARGET_ENOPROTOOPT;
3267 }
3268 return ret;
3269 }
3270
3271 /* do_getsockopt() Must return target values and target errnos. */
3272 static abi_long do_getsockopt(int sockfd, int level, int optname,
3273 abi_ulong optval_addr, abi_ulong optlen)
3274 {
3275 abi_long ret;
3276 int len, val;
3277 socklen_t lv;
3278
3279 switch(level) {
3280 case TARGET_SOL_SOCKET:
3281 level = SOL_SOCKET;
3282 switch (optname) {
3283 /* These don't just return a single integer */
3284 case TARGET_SO_LINGER:
3285 case TARGET_SO_RCVTIMEO:
3286 case TARGET_SO_SNDTIMEO:
3287 case TARGET_SO_PEERNAME:
3288 goto unimplemented;
3289 case TARGET_SO_PEERCRED: {
3290 struct ucred cr;
3291 socklen_t crlen;
3292 struct target_ucred *tcr;
3293
3294 if (get_user_u32(len, optlen)) {
3295 return -TARGET_EFAULT;
3296 }
3297 if (len < 0) {
3298 return -TARGET_EINVAL;
3299 }
3300
3301 crlen = sizeof(cr);
3302 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3303 &cr, &crlen));
3304 if (ret < 0) {
3305 return ret;
3306 }
3307 if (len > crlen) {
3308 len = crlen;
3309 }
3310 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3311 return -TARGET_EFAULT;
3312 }
3313 __put_user(cr.pid, &tcr->pid);
3314 __put_user(cr.uid, &tcr->uid);
3315 __put_user(cr.gid, &tcr->gid);
3316 unlock_user_struct(tcr, optval_addr, 1);
3317 if (put_user_u32(len, optlen)) {
3318 return -TARGET_EFAULT;
3319 }
3320 break;
3321 }
3322 /* Options with 'int' argument. */
3323 case TARGET_SO_DEBUG:
3324 optname = SO_DEBUG;
3325 goto int_case;
3326 case TARGET_SO_REUSEADDR:
3327 optname = SO_REUSEADDR;
3328 goto int_case;
3329 case TARGET_SO_TYPE:
3330 optname = SO_TYPE;
3331 goto int_case;
3332 case TARGET_SO_ERROR:
3333 optname = SO_ERROR;
3334 goto int_case;
3335 case TARGET_SO_DONTROUTE:
3336 optname = SO_DONTROUTE;
3337 goto int_case;
3338 case TARGET_SO_BROADCAST:
3339 optname = SO_BROADCAST;
3340 goto int_case;
3341 case TARGET_SO_SNDBUF:
3342 optname = SO_SNDBUF;
3343 goto int_case;
3344 case TARGET_SO_RCVBUF:
3345 optname = SO_RCVBUF;
3346 goto int_case;
3347 case TARGET_SO_KEEPALIVE:
3348 optname = SO_KEEPALIVE;
3349 goto int_case;
3350 case TARGET_SO_OOBINLINE:
3351 optname = SO_OOBINLINE;
3352 goto int_case;
3353 case TARGET_SO_NO_CHECK:
3354 optname = SO_NO_CHECK;
3355 goto int_case;
3356 case TARGET_SO_PRIORITY:
3357 optname = SO_PRIORITY;
3358 goto int_case;
3359 #ifdef SO_BSDCOMPAT
3360 case TARGET_SO_BSDCOMPAT:
3361 optname = SO_BSDCOMPAT;
3362 goto int_case;
3363 #endif
3364 case TARGET_SO_PASSCRED:
3365 optname = SO_PASSCRED;
3366 goto int_case;
3367 case TARGET_SO_TIMESTAMP:
3368 optname = SO_TIMESTAMP;
3369 goto int_case;
3370 case TARGET_SO_RCVLOWAT:
3371 optname = SO_RCVLOWAT;
3372 goto int_case;
3373 case TARGET_SO_ACCEPTCONN:
3374 optname = SO_ACCEPTCONN;
3375 goto int_case;
3376 default:
3377 goto int_case;
3378 }
3379 break;
3380 case SOL_TCP:
3381 /* TCP options all take an 'int' value. */
3382 int_case:
3383 if (get_user_u32(len, optlen))
3384 return -TARGET_EFAULT;
3385 if (len < 0)
3386 return -TARGET_EINVAL;
3387 lv = sizeof(lv);
3388 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3389 if (ret < 0)
3390 return ret;
3391 if (optname == SO_TYPE) {
3392 val = host_to_target_sock_type(val);
3393 }
3394 if (len > lv)
3395 len = lv;
3396 if (len == 4) {
3397 if (put_user_u32(val, optval_addr))
3398 return -TARGET_EFAULT;
3399 } else {
3400 if (put_user_u8(val, optval_addr))
3401 return -TARGET_EFAULT;
3402 }
3403 if (put_user_u32(len, optlen))
3404 return -TARGET_EFAULT;
3405 break;
3406 case SOL_IP:
3407 switch(optname) {
3408 case IP_TOS:
3409 case IP_TTL:
3410 case IP_HDRINCL:
3411 case IP_ROUTER_ALERT:
3412 case IP_RECVOPTS:
3413 case IP_RETOPTS:
3414 case IP_PKTINFO:
3415 case IP_MTU_DISCOVER:
3416 case IP_RECVERR:
3417 case IP_RECVTOS:
3418 #ifdef IP_FREEBIND
3419 case IP_FREEBIND:
3420 #endif
3421 case IP_MULTICAST_TTL:
3422 case IP_MULTICAST_LOOP:
3423 if (get_user_u32(len, optlen))
3424 return -TARGET_EFAULT;
3425 if (len < 0)
3426 return -TARGET_EINVAL;
3427 lv = sizeof(lv);
3428 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3429 if (ret < 0)
3430 return ret;
3431 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3432 len = 1;
3433 if (put_user_u32(len, optlen)
3434 || put_user_u8(val, optval_addr))
3435 return -TARGET_EFAULT;
3436 } else {
3437 if (len > sizeof(int))
3438 len = sizeof(int);
3439 if (put_user_u32(len, optlen)
3440 || put_user_u32(val, optval_addr))
3441 return -TARGET_EFAULT;
3442 }
3443 break;
3444 default:
3445 ret = -TARGET_ENOPROTOOPT;
3446 break;
3447 }
3448 break;
3449 default:
3450 unimplemented:
3451 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3452 level, optname);
3453 ret = -TARGET_EOPNOTSUPP;
3454 break;
3455 }
3456 return ret;
3457 }
3458
3459 /* Convert target low/high pair representing file offset into the host
3460 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3461 * as the kernel doesn't handle them either.
3462 */
3463 static void target_to_host_low_high(abi_ulong tlow,
3464 abi_ulong thigh,
3465 unsigned long *hlow,
3466 unsigned long *hhigh)
3467 {
3468 uint64_t off = tlow |
3469 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3470 TARGET_LONG_BITS / 2;
3471
3472 *hlow = off;
3473 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3474 }
3475
3476 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3477 abi_ulong count, int copy)
3478 {
3479 struct target_iovec *target_vec;
3480 struct iovec *vec;
3481 abi_ulong total_len, max_len;
3482 int i;
3483 int err = 0;
3484 bool bad_address = false;
3485
3486 if (count == 0) {
3487 errno = 0;
3488 return NULL;
3489 }
3490 if (count > IOV_MAX) {
3491 errno = EINVAL;
3492 return NULL;
3493 }
3494
3495 vec = g_try_new0(struct iovec, count);
3496 if (vec == NULL) {
3497 errno = ENOMEM;
3498 return NULL;
3499 }
3500
3501 target_vec = lock_user(VERIFY_READ, target_addr,
3502 count * sizeof(struct target_iovec), 1);
3503 if (target_vec == NULL) {
3504 err = EFAULT;
3505 goto fail2;
3506 }
3507
3508 /* ??? If host page size > target page size, this will result in a
3509 value larger than what we can actually support. */
3510 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3511 total_len = 0;
3512
3513 for (i = 0; i < count; i++) {
3514 abi_ulong base = tswapal(target_vec[i].iov_base);
3515 abi_long len = tswapal(target_vec[i].iov_len);
3516
3517 if (len < 0) {
3518 err = EINVAL;
3519 goto fail;
3520 } else if (len == 0) {
3521 /* Zero length pointer is ignored. */
3522 vec[i].iov_base = 0;
3523 } else {
3524 vec[i].iov_base = lock_user(type, base, len, copy);
3525 /* If the first buffer pointer is bad, this is a fault. But
3526 * subsequent bad buffers will result in a partial write; this
3527 * is realized by filling the vector with null pointers and
3528 * zero lengths. */
3529 if (!vec[i].iov_base) {
3530 if (i == 0) {
3531 err = EFAULT;
3532 goto fail;
3533 } else {
3534 bad_address = true;
3535 }
3536 }
3537 if (bad_address) {
3538 len = 0;
3539 }
3540 if (len > max_len - total_len) {
3541 len = max_len - total_len;
3542 }
3543 }
3544 vec[i].iov_len = len;
3545 total_len += len;
3546 }
3547
3548 unlock_user(target_vec, target_addr, 0);
3549 return vec;
3550
3551 fail:
3552 while (--i >= 0) {
3553 if (tswapal(target_vec[i].iov_len) > 0) {
3554 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3555 }
3556 }
3557 unlock_user(target_vec, target_addr, 0);
3558 fail2:
3559 g_free(vec);
3560 errno = err;
3561 return NULL;
3562 }
3563
3564 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3565 abi_ulong count, int copy)
3566 {
3567 struct target_iovec *target_vec;
3568 int i;
3569
3570 target_vec = lock_user(VERIFY_READ, target_addr,
3571 count * sizeof(struct target_iovec), 1);
3572 if (target_vec) {
3573 for (i = 0; i < count; i++) {
3574 abi_ulong base = tswapal(target_vec[i].iov_base);
3575 abi_long len = tswapal(target_vec[i].iov_len);
3576 if (len < 0) {
3577 break;
3578 }
3579 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3580 }
3581 unlock_user(target_vec, target_addr, 0);
3582 }
3583
3584 g_free(vec);
3585 }
3586
3587 static inline int target_to_host_sock_type(int *type)
3588 {
3589 int host_type = 0;
3590 int target_type = *type;
3591
3592 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3593 case TARGET_SOCK_DGRAM:
3594 host_type = SOCK_DGRAM;
3595 break;
3596 case TARGET_SOCK_STREAM:
3597 host_type = SOCK_STREAM;
3598 break;
3599 default:
3600 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3601 break;
3602 }
3603 if (target_type & TARGET_SOCK_CLOEXEC) {
3604 #if defined(SOCK_CLOEXEC)
3605 host_type |= SOCK_CLOEXEC;
3606 #else
3607 return -TARGET_EINVAL;
3608 #endif
3609 }
3610 if (target_type & TARGET_SOCK_NONBLOCK) {
3611 #if defined(SOCK_NONBLOCK)
3612 host_type |= SOCK_NONBLOCK;
3613 #elif !defined(O_NONBLOCK)
3614 return -TARGET_EINVAL;
3615 #endif
3616 }
3617 *type = host_type;
3618 return 0;
3619 }
3620
3621 /* Try to emulate socket type flags after socket creation. */
3622 static int sock_flags_fixup(int fd, int target_type)
3623 {
3624 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3625 if (target_type & TARGET_SOCK_NONBLOCK) {
3626 int flags = fcntl(fd, F_GETFL);
3627 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3628 close(fd);
3629 return -TARGET_EINVAL;
3630 }
3631 }
3632 #endif
3633 return fd;
3634 }
3635
3636 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3637 abi_ulong target_addr,
3638 socklen_t len)
3639 {
3640 struct sockaddr *addr = host_addr;
3641 struct target_sockaddr *target_saddr;
3642
3643 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3644 if (!target_saddr) {
3645 return -TARGET_EFAULT;
3646 }
3647
3648 memcpy(addr, target_saddr, len);
3649 addr->sa_family = tswap16(target_saddr->sa_family);
3650 /* spkt_protocol is big-endian */
3651
3652 unlock_user(target_saddr, target_addr, 0);
3653 return 0;
3654 }
3655
3656 static TargetFdTrans target_packet_trans = {
3657 .target_to_host_addr = packet_target_to_host_sockaddr,
3658 };
3659
3660 #ifdef CONFIG_RTNETLINK
3661 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3662 {
3663 abi_long ret;
3664
3665 ret = target_to_host_nlmsg_route(buf, len);
3666 if (ret < 0) {
3667 return ret;
3668 }
3669
3670 return len;
3671 }
3672
3673 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3674 {
3675 abi_long ret;
3676
3677 ret = host_to_target_nlmsg_route(buf, len);
3678 if (ret < 0) {
3679 return ret;
3680 }
3681
3682 return len;
3683 }
3684
3685 static TargetFdTrans target_netlink_route_trans = {
3686 .target_to_host_data = netlink_route_target_to_host,
3687 .host_to_target_data = netlink_route_host_to_target,
3688 };
3689 #endif /* CONFIG_RTNETLINK */
3690
3691 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3692 {
3693 abi_long ret;
3694
3695 ret = target_to_host_nlmsg_audit(buf, len);
3696 if (ret < 0) {
3697 return ret;
3698 }
3699
3700 return len;
3701 }
3702
3703 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3704 {
3705 abi_long ret;
3706
3707 ret = host_to_target_nlmsg_audit(buf, len);
3708 if (ret < 0) {
3709 return ret;
3710 }
3711
3712 return len;
3713 }
3714
3715 static TargetFdTrans target_netlink_audit_trans = {
3716 .target_to_host_data = netlink_audit_target_to_host,
3717 .host_to_target_data = netlink_audit_host_to_target,
3718 };
3719
3720 /* do_socket() Must return target values and target errnos. */
3721 static abi_long do_socket(int domain, int type, int protocol)
3722 {
3723 int target_type = type;
3724 int ret;
3725
3726 ret = target_to_host_sock_type(&type);
3727 if (ret) {
3728 return ret;
3729 }
3730
3731 if (domain == PF_NETLINK && !(
3732 #ifdef CONFIG_RTNETLINK
3733 protocol == NETLINK_ROUTE ||
3734 #endif
3735 protocol == NETLINK_KOBJECT_UEVENT ||
3736 protocol == NETLINK_AUDIT)) {
3737 return -EPFNOSUPPORT;
3738 }
3739
3740 if (domain == AF_PACKET ||
3741 (domain == AF_INET && type == SOCK_PACKET)) {
3742 protocol = tswap16(protocol);
3743 }
3744
3745 ret = get_errno(socket(domain, type, protocol));
3746 if (ret >= 0) {
3747 ret = sock_flags_fixup(ret, target_type);
3748 if (type == SOCK_PACKET) {
3749 /* Manage an obsolete case :
3750 * if socket type is SOCK_PACKET, bind by name
3751 */
3752 fd_trans_register(ret, &target_packet_trans);
3753 } else if (domain == PF_NETLINK) {
3754 switch (protocol) {
3755 #ifdef CONFIG_RTNETLINK
3756 case NETLINK_ROUTE:
3757 fd_trans_register(ret, &target_netlink_route_trans);
3758 break;
3759 #endif
3760 case NETLINK_KOBJECT_UEVENT:
3761 /* nothing to do: messages are strings */
3762 break;
3763 case NETLINK_AUDIT:
3764 fd_trans_register(ret, &target_netlink_audit_trans);
3765 break;
3766 default:
3767 g_assert_not_reached();
3768 }
3769 }
3770 }
3771 return ret;
3772 }
3773
3774 /* do_bind() Must return target values and target errnos. */
3775 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3776 socklen_t addrlen)
3777 {
3778 void *addr;
3779 abi_long ret;
3780
3781 if ((int)addrlen < 0) {
3782 return -TARGET_EINVAL;
3783 }
3784
3785 addr = alloca(addrlen+1);
3786
3787 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3788 if (ret)
3789 return ret;
3790
3791 return get_errno(bind(sockfd, addr, addrlen));
3792 }
3793
3794 /* do_connect() Must return target values and target errnos. */
3795 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3796 socklen_t addrlen)
3797 {
3798 void *addr;
3799 abi_long ret;
3800
3801 if ((int)addrlen < 0) {
3802 return -TARGET_EINVAL;
3803 }
3804
3805 addr = alloca(addrlen+1);
3806
3807 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3808 if (ret)
3809 return ret;
3810
3811 return get_errno(safe_connect(sockfd, addr, addrlen));
3812 }
3813
3814 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3815 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3816 int flags, int send)
3817 {
3818 abi_long ret, len;
3819 struct msghdr msg;
3820 abi_ulong count;
3821 struct iovec *vec;
3822 abi_ulong target_vec;
3823
3824 if (msgp->msg_name) {
3825 msg.msg_namelen = tswap32(msgp->msg_namelen);
3826 msg.msg_name = alloca(msg.msg_namelen+1);
3827 ret = target_to_host_sockaddr(fd, msg.msg_name,
3828 tswapal(msgp->msg_name),
3829 msg.msg_namelen);
3830 if (ret == -TARGET_EFAULT) {
3831 /* For connected sockets msg_name and msg_namelen must
3832 * be ignored, so returning EFAULT immediately is wrong.
3833 * Instead, pass a bad msg_name to the host kernel, and
3834 * let it decide whether to return EFAULT or not.
3835 */
3836 msg.msg_name = (void *)-1;
3837 } else if (ret) {
3838 goto out2;
3839 }
3840 } else {
3841 msg.msg_name = NULL;
3842 msg.msg_namelen = 0;
3843 }
3844 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3845 msg.msg_control = alloca(msg.msg_controllen);
3846 memset(msg.msg_control, 0, msg.msg_controllen);
3847
3848 msg.msg_flags = tswap32(msgp->msg_flags);
3849
3850 count = tswapal(msgp->msg_iovlen);
3851 target_vec = tswapal(msgp->msg_iov);
3852
3853 if (count > IOV_MAX) {
3854 /* sendrcvmsg returns a different errno for this condition than
3855 * readv/writev, so we must catch it here before lock_iovec() does.
3856 */
3857 ret = -TARGET_EMSGSIZE;
3858 goto out2;
3859 }
3860
3861 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3862 target_vec, count, send);
3863 if (vec == NULL) {
3864 ret = -host_to_target_errno(errno);
3865 goto out2;
3866 }
3867 msg.msg_iovlen = count;
3868 msg.msg_iov = vec;
3869
3870 if (send) {
3871 if (fd_trans_target_to_host_data(fd)) {
3872 void *host_msg;
3873
3874 host_msg = g_malloc(msg.msg_iov->iov_len);
3875 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3876 ret = fd_trans_target_to_host_data(fd)(host_msg,
3877 msg.msg_iov->iov_len);
3878 if (ret >= 0) {
3879 msg.msg_iov->iov_base = host_msg;
3880 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3881 }
3882 g_free(host_msg);
3883 } else {
3884 ret = target_to_host_cmsg(&msg, msgp);
3885 if (ret == 0) {
3886 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3887 }
3888 }
3889 } else {
3890 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3891 if (!is_error(ret)) {
3892 len = ret;
3893 if (fd_trans_host_to_target_data(fd)) {
3894 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3895 len);
3896 } else {
3897 ret = host_to_target_cmsg(msgp, &msg);
3898 }
3899 if (!is_error(ret)) {
3900 msgp->msg_namelen = tswap32(msg.msg_namelen);
3901 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3902 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3903 msg.msg_name, msg.msg_namelen);
3904 if (ret) {
3905 goto out;
3906 }
3907 }
3908
3909 ret = len;
3910 }
3911 }
3912 }
3913
3914 out:
3915 unlock_iovec(vec, target_vec, count, !send);
3916 out2:
3917 return ret;
3918 }
3919
3920 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3921 int flags, int send)
3922 {
3923 abi_long ret;
3924 struct target_msghdr *msgp;
3925
3926 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3927 msgp,
3928 target_msg,
3929 send ? 1 : 0)) {
3930 return -TARGET_EFAULT;
3931 }
3932 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3933 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3934 return ret;
3935 }
3936
3937 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3938 * so it might not have this *mmsg-specific flag either.
3939 */
3940 #ifndef MSG_WAITFORONE
3941 #define MSG_WAITFORONE 0x10000
3942 #endif
3943
3944 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3945 unsigned int vlen, unsigned int flags,
3946 int send)
3947 {
3948 struct target_mmsghdr *mmsgp;
3949 abi_long ret = 0;
3950 int i;
3951
3952 if (vlen > UIO_MAXIOV) {
3953 vlen = UIO_MAXIOV;
3954 }
3955
3956 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3957 if (!mmsgp) {
3958 return -TARGET_EFAULT;
3959 }
3960
3961 for (i = 0; i < vlen; i++) {
3962 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3963 if (is_error(ret)) {
3964 break;
3965 }
3966 mmsgp[i].msg_len = tswap32(ret);
3967 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3968 if (flags & MSG_WAITFORONE) {
3969 flags |= MSG_DONTWAIT;
3970 }
3971 }
3972
3973 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3974
3975 /* Return number of datagrams sent if we sent any at all;
3976 * otherwise return the error.
3977 */
3978 if (i) {
3979 return i;
3980 }
3981 return ret;
3982 }
3983
3984 /* do_accept4() Must return target values and target errnos. */
3985 static abi_long do_accept4(int fd, abi_ulong target_addr,
3986 abi_ulong target_addrlen_addr, int flags)
3987 {
3988 socklen_t addrlen;
3989 void *addr;
3990 abi_long ret;
3991 int host_flags;
3992
3993 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3994
3995 if (target_addr == 0) {
3996 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3997 }
3998
3999 /* linux returns EINVAL if addrlen pointer is invalid */
4000 if (get_user_u32(addrlen, target_addrlen_addr))
4001 return -TARGET_EINVAL;
4002
4003 if ((int)addrlen < 0) {
4004 return -TARGET_EINVAL;
4005 }
4006
4007 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4008 return -TARGET_EINVAL;
4009
4010 addr = alloca(addrlen);
4011
4012 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4013 if (!is_error(ret)) {
4014 host_to_target_sockaddr(target_addr, addr, addrlen);
4015 if (put_user_u32(addrlen, target_addrlen_addr))
4016 ret = -TARGET_EFAULT;
4017 }
4018 return ret;
4019 }
4020
4021 /* do_getpeername() Must return target values and target errnos. */
4022 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4023 abi_ulong target_addrlen_addr)
4024 {
4025 socklen_t addrlen;
4026 void *addr;
4027 abi_long ret;
4028
4029 if (get_user_u32(addrlen, target_addrlen_addr))
4030 return -TARGET_EFAULT;
4031
4032 if ((int)addrlen < 0) {
4033 return -TARGET_EINVAL;
4034 }
4035
4036 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4037 return -TARGET_EFAULT;
4038
4039 addr = alloca(addrlen);
4040
4041 ret = get_errno(getpeername(fd, addr, &addrlen));
4042 if (!is_error(ret)) {
4043 host_to_target_sockaddr(target_addr, addr, addrlen);
4044 if (put_user_u32(addrlen, target_addrlen_addr))
4045 ret = -TARGET_EFAULT;
4046 }
4047 return ret;
4048 }
4049
4050 /* do_getsockname() Must return target values and target errnos. */
4051 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4052 abi_ulong target_addrlen_addr)
4053 {
4054 socklen_t addrlen;
4055 void *addr;
4056 abi_long ret;
4057
4058 if (get_user_u32(addrlen, target_addrlen_addr))
4059 return -TARGET_EFAULT;
4060
4061 if ((int)addrlen < 0) {
4062 return -TARGET_EINVAL;
4063 }
4064
4065 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4066 return -TARGET_EFAULT;
4067
4068 addr = alloca(addrlen);
4069
4070 ret = get_errno(getsockname(fd, addr, &addrlen));
4071 if (!is_error(ret)) {
4072 host_to_target_sockaddr(target_addr, addr, addrlen);
4073 if (put_user_u32(addrlen, target_addrlen_addr))
4074 ret = -TARGET_EFAULT;
4075 }
4076 return ret;
4077 }
4078
4079 /* do_socketpair() Must return target values and target errnos. */
4080 static abi_long do_socketpair(int domain, int type, int protocol,
4081 abi_ulong target_tab_addr)
4082 {
4083 int tab[2];
4084 abi_long ret;
4085
4086 target_to_host_sock_type(&type);
4087
4088 ret = get_errno(socketpair(domain, type, protocol, tab));
4089 if (!is_error(ret)) {
4090 if (put_user_s32(tab[0], target_tab_addr)
4091 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4092 ret = -TARGET_EFAULT;
4093 }
4094 return ret;
4095 }
4096
4097 /* do_sendto() Must return target values and target errnos. */
4098 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4099 abi_ulong target_addr, socklen_t addrlen)
4100 {
4101 void *addr;
4102 void *host_msg;
4103 void *copy_msg = NULL;
4104 abi_long ret;
4105
4106 if ((int)addrlen < 0) {
4107 return -TARGET_EINVAL;
4108 }
4109
4110 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4111 if (!host_msg)
4112 return -TARGET_EFAULT;
4113 if (fd_trans_target_to_host_data(fd)) {
4114 copy_msg = host_msg;
4115 host_msg = g_malloc(len);
4116 memcpy(host_msg, copy_msg, len);
4117 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4118 if (ret < 0) {
4119 goto fail;
4120 }
4121 }
4122 if (target_addr) {
4123 addr = alloca(addrlen+1);
4124 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4125 if (ret) {
4126 goto fail;
4127 }
4128 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4129 } else {
4130 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4131 }
4132 fail:
4133 if (copy_msg) {
4134 g_free(host_msg);
4135 host_msg = copy_msg;
4136 }
4137 unlock_user(host_msg, msg, 0);
4138 return ret;
4139 }
4140
4141 /* do_recvfrom() Must return target values and target errnos. */
4142 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4143 abi_ulong target_addr,
4144 abi_ulong target_addrlen)
4145 {
4146 socklen_t addrlen;
4147 void *addr;
4148 void *host_msg;
4149 abi_long ret;
4150
4151 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4152 if (!host_msg)
4153 return -TARGET_EFAULT;
4154 if (target_addr) {
4155 if (get_user_u32(addrlen, target_addrlen)) {
4156 ret = -TARGET_EFAULT;
4157 goto fail;
4158 }
4159 if ((int)addrlen < 0) {
4160 ret = -TARGET_EINVAL;
4161 goto fail;
4162 }
4163 addr = alloca(addrlen);
4164 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4165 addr, &addrlen));
4166 } else {
4167 addr = NULL; /* To keep compiler quiet. */
4168 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4169 }
4170 if (!is_error(ret)) {
4171 if (fd_trans_host_to_target_data(fd)) {
4172 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4173 }
4174 if (target_addr) {
4175 host_to_target_sockaddr(target_addr, addr, addrlen);
4176 if (put_user_u32(addrlen, target_addrlen)) {
4177 ret = -TARGET_EFAULT;
4178 goto fail;
4179 }
4180 }
4181 unlock_user(host_msg, msg, len);
4182 } else {
4183 fail:
4184 unlock_user(host_msg, msg, 0);
4185 }
4186 return ret;
4187 }
4188
4189 #ifdef TARGET_NR_socketcall
4190 /* do_socketcall() must return target values and target errnos. */
4191 static abi_long do_socketcall(int num, abi_ulong vptr)
4192 {
4193 static const unsigned nargs[] = { /* number of arguments per operation */
4194 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4195 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4196 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4197 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4198 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4199 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4200 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4201 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4202 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4203 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4204 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4205 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4206 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4207 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4208 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4209 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4210 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4211 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4212 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4213 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4214 };
4215 abi_long a[6]; /* max 6 args */
4216 unsigned i;
4217
4218 /* check the range of the first argument num */
4219 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4220 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4221 return -TARGET_EINVAL;
4222 }
4223 /* ensure we have space for args */
4224 if (nargs[num] > ARRAY_SIZE(a)) {
4225 return -TARGET_EINVAL;
4226 }
4227 /* collect the arguments in a[] according to nargs[] */
4228 for (i = 0; i < nargs[num]; ++i) {
4229 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4230 return -TARGET_EFAULT;
4231 }
4232 }
4233 /* now when we have the args, invoke the appropriate underlying function */
4234 switch (num) {
4235 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4236 return do_socket(a[0], a[1], a[2]);
4237 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4238 return do_bind(a[0], a[1], a[2]);
4239 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4240 return do_connect(a[0], a[1], a[2]);
4241 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4242 return get_errno(listen(a[0], a[1]));
4243 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4244 return do_accept4(a[0], a[1], a[2], 0);
4245 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4246 return do_getsockname(a[0], a[1], a[2]);
4247 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4248 return do_getpeername(a[0], a[1], a[2]);
4249 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4250 return do_socketpair(a[0], a[1], a[2], a[3]);
4251 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4252 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4253 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4254 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4255 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4256 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4257 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4258 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4259 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4260 return get_errno(shutdown(a[0], a[1]));
4261 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4262 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4263 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4264 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4265 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4266 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4267 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4268 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4269 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4270 return do_accept4(a[0], a[1], a[2], a[3]);
4271 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4272 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4273 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4274 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4275 default:
4276 gemu_log("Unsupported socketcall: %d\n", num);
4277 return -TARGET_EINVAL;
4278 }
4279 }
4280 #endif
4281
4282 #define N_SHM_REGIONS 32
4283
4284 static struct shm_region {
4285 abi_ulong start;
4286 abi_ulong size;
4287 bool in_use;
4288 } shm_regions[N_SHM_REGIONS];
4289
4290 #ifndef TARGET_SEMID64_DS
4291 /* asm-generic version of this struct */
4292 struct target_semid64_ds
4293 {
4294 struct target_ipc_perm sem_perm;
4295 abi_ulong sem_otime;
4296 #if TARGET_ABI_BITS == 32
4297 abi_ulong __unused1;
4298 #endif
4299 abi_ulong sem_ctime;
4300 #if TARGET_ABI_BITS == 32
4301 abi_ulong __unused2;
4302 #endif
4303 abi_ulong sem_nsems;
4304 abi_ulong __unused3;
4305 abi_ulong __unused4;
4306 };
4307 #endif
4308
4309 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4310 abi_ulong target_addr)
4311 {
4312 struct target_ipc_perm *target_ip;
4313 struct target_semid64_ds *target_sd;
4314
4315 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4316 return -TARGET_EFAULT;
4317 target_ip = &(target_sd->sem_perm);
4318 host_ip->__key = tswap32(target_ip->__key);
4319 host_ip->uid = tswap32(target_ip->uid);
4320 host_ip->gid = tswap32(target_ip->gid);
4321 host_ip->cuid = tswap32(target_ip->cuid);
4322 host_ip->cgid = tswap32(target_ip->cgid);
4323 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4324 host_ip->mode = tswap32(target_ip->mode);
4325 #else
4326 host_ip->mode = tswap16(target_ip->mode);
4327 #endif
4328 #if defined(TARGET_PPC)
4329 host_ip->__seq = tswap32(target_ip->__seq);
4330 #else
4331 host_ip->__seq = tswap16(target_ip->__seq);
4332 #endif
4333 unlock_user_struct(target_sd, target_addr, 0);
4334 return 0;
4335 }
4336
4337 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4338 struct ipc_perm *host_ip)
4339 {
4340 struct target_ipc_perm *target_ip;
4341 struct target_semid64_ds *target_sd;
4342
4343 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4344 return -TARGET_EFAULT;
4345 target_ip = &(target_sd->sem_perm);
4346 target_ip->__key = tswap32(host_ip->__key);
4347 target_ip->uid = tswap32(host_ip->uid);
4348 target_ip->gid = tswap32(host_ip->gid);
4349 target_ip->cuid = tswap32(host_ip->cuid);
4350 target_ip->cgid = tswap32(host_ip->cgid);
4351 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4352 target_ip->mode = tswap32(host_ip->mode);
4353 #else
4354 target_ip->mode = tswap16(host_ip->mode);
4355 #endif
4356 #if defined(TARGET_PPC)
4357 target_ip->__seq = tswap32(host_ip->__seq);
4358 #else
4359 target_ip->__seq = tswap16(host_ip->__seq);
4360 #endif
4361 unlock_user_struct(target_sd, target_addr, 1);
4362 return 0;
4363 }
4364
4365 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4366 abi_ulong target_addr)
4367 {
4368 struct target_semid64_ds *target_sd;
4369
4370 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4371 return -TARGET_EFAULT;
4372 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4373 return -TARGET_EFAULT;
4374 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4375 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4376 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4377 unlock_user_struct(target_sd, target_addr, 0);
4378 return 0;
4379 }
4380
4381 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4382 struct semid_ds *host_sd)
4383 {
4384 struct target_semid64_ds *target_sd;
4385
4386 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4387 return -TARGET_EFAULT;
4388 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4389 return -TARGET_EFAULT;
4390 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4391 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4392 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4393 unlock_user_struct(target_sd, target_addr, 1);
4394 return 0;
4395 }
4396
4397 struct target_seminfo {
4398 int semmap;
4399 int semmni;
4400 int semmns;
4401 int semmnu;
4402 int semmsl;
4403 int semopm;
4404 int semume;
4405 int semusz;
4406 int semvmx;
4407 int semaem;
4408 };
4409
4410 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4411 struct seminfo *host_seminfo)
4412 {
4413 struct target_seminfo *target_seminfo;
4414 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4415 return -TARGET_EFAULT;
4416 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4417 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4418 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4419 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4420 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4421 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4422 __put_user(host_seminfo->semume, &target_seminfo->semume);
4423 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4424 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4425 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4426 unlock_user_struct(target_seminfo, target_addr, 1);
4427 return 0;
4428 }
4429
4430 union semun {
4431 int val;
4432 struct semid_ds *buf;
4433 unsigned short *array;
4434 struct seminfo *__buf;
4435 };
4436
4437 union target_semun {
4438 int val;
4439 abi_ulong buf;
4440 abi_ulong array;
4441 abi_ulong __buf;
4442 };
4443
4444 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4445 abi_ulong target_addr)
4446 {
4447 int nsems;
4448 unsigned short *array;
4449 union semun semun;
4450 struct semid_ds semid_ds;
4451 int i, ret;
4452
4453 semun.buf = &semid_ds;
4454
4455 ret = semctl(semid, 0, IPC_STAT, semun);
4456 if (ret == -1)
4457 return get_errno(ret);
4458
4459 nsems = semid_ds.sem_nsems;
4460
4461 *host_array = g_try_new(unsigned short, nsems);
4462 if (!*host_array) {
4463 return -TARGET_ENOMEM;
4464 }
4465 array = lock_user(VERIFY_READ, target_addr,
4466 nsems*sizeof(unsigned short), 1);
4467 if (!array) {
4468 g_free(*host_array);
4469 return -TARGET_EFAULT;
4470 }
4471
4472 for(i=0; i<nsems; i++) {
4473 __get_user((*host_array)[i], &array[i]);
4474 }
4475 unlock_user(array, target_addr, 0);
4476
4477 return 0;
4478 }
4479
4480 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4481 unsigned short **host_array)
4482 {
4483 int nsems;
4484 unsigned short *array;
4485 union semun semun;
4486 struct semid_ds semid_ds;
4487 int i, ret;
4488
4489 semun.buf = &semid_ds;
4490
4491 ret = semctl(semid, 0, IPC_STAT, semun);
4492 if (ret == -1)
4493 return get_errno(ret);
4494
4495 nsems = semid_ds.sem_nsems;
4496
4497 array = lock_user(VERIFY_WRITE, target_addr,
4498 nsems*sizeof(unsigned short), 0);
4499 if (!array)
4500 return -TARGET_EFAULT;
4501
4502 for(i=0; i<nsems; i++) {
4503 __put_user((*host_array)[i], &array[i]);
4504 }
4505 g_free(*host_array);
4506 unlock_user(array, target_addr, 1);
4507
4508 return 0;
4509 }
4510
4511 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4512 abi_ulong target_arg)
4513 {
4514 union target_semun target_su = { .buf = target_arg };
4515 union semun arg;
4516 struct semid_ds dsarg;
4517 unsigned short *array = NULL;
4518 struct seminfo seminfo;
4519 abi_long ret = -TARGET_EINVAL;
4520 abi_long err;
4521 cmd &= 0xff;
4522
4523 switch( cmd ) {
4524 case GETVAL:
4525 case SETVAL:
4526 /* In 64 bit cross-endian situations, we will erroneously pick up
4527 * the wrong half of the union for the "val" element. To rectify
4528 * this, the entire 8-byte structure is byteswapped, followed by
4529 * a swap of the 4 byte val field. In other cases, the data is
4530 * already in proper host byte order. */
4531 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4532 target_su.buf = tswapal(target_su.buf);
4533 arg.val = tswap32(target_su.val);
4534 } else {
4535 arg.val = target_su.val;
4536 }
4537 ret = get_errno(semctl(semid, semnum, cmd, arg));
4538 break;
4539 case GETALL:
4540 case SETALL:
4541 err = target_to_host_semarray(semid, &array, target_su.array);
4542 if (err)
4543 return err;
4544 arg.array = array;
4545 ret = get_errno(semctl(semid, semnum, cmd, arg));
4546 err = host_to_target_semarray(semid, target_su.array, &array);
4547 if (err)
4548 return err;
4549 break;
4550 case IPC_STAT:
4551 case IPC_SET:
4552 case SEM_STAT:
4553 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4554 if (err)
4555 return err;
4556 arg.buf = &dsarg;
4557 ret = get_errno(semctl(semid, semnum, cmd, arg));
4558 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4559 if (err)
4560 return err;
4561 break;
4562 case IPC_INFO:
4563 case SEM_INFO:
4564 arg.__buf = &seminfo;
4565 ret = get_errno(semctl(semid, semnum, cmd, arg));
4566 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4567 if (err)
4568 return err;
4569 break;
4570 case IPC_RMID:
4571 case GETPID:
4572 case GETNCNT:
4573 case GETZCNT:
4574 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4575 break;
4576 }
4577
4578 return ret;
4579 }
4580
4581 struct target_sembuf {
4582 unsigned short sem_num;
4583 short sem_op;
4584 short sem_flg;
4585 };
4586
4587 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4588 abi_ulong target_addr,
4589 unsigned nsops)
4590 {
4591 struct target_sembuf *target_sembuf;
4592 int i;
4593
4594 target_sembuf = lock_user(VERIFY_READ, target_addr,
4595 nsops*sizeof(struct target_sembuf), 1);
4596 if (!target_sembuf)
4597 return -TARGET_EFAULT;
4598
4599 for(i=0; i<nsops; i++) {
4600 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4601 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4602 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4603 }
4604
4605 unlock_user(target_sembuf, target_addr, 0);
4606
4607 return 0;
4608 }
4609
4610 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4611 {
4612 struct sembuf sops[nsops];
4613
4614 if (target_to_host_sembuf(sops, ptr, nsops))
4615 return -TARGET_EFAULT;
4616
4617 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4618 }
4619
4620 struct target_msqid_ds
4621 {
4622 struct target_ipc_perm msg_perm;
4623 abi_ulong msg_stime;
4624 #if TARGET_ABI_BITS == 32
4625 abi_ulong __unused1;
4626 #endif
4627 abi_ulong msg_rtime;
4628 #if TARGET_ABI_BITS == 32
4629 abi_ulong __unused2;
4630 #endif
4631 abi_ulong msg_ctime;
4632 #if TARGET_ABI_BITS == 32
4633 abi_ulong __unused3;
4634 #endif
4635 abi_ulong __msg_cbytes;
4636 abi_ulong msg_qnum;
4637 abi_ulong msg_qbytes;
4638 abi_ulong msg_lspid;
4639 abi_ulong msg_lrpid;
4640 abi_ulong __unused4;
4641 abi_ulong __unused5;
4642 };
4643
4644 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4645 abi_ulong target_addr)
4646 {
4647 struct target_msqid_ds *target_md;
4648
4649 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4650 return -TARGET_EFAULT;
4651 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4652 return -TARGET_EFAULT;
4653 host_md->msg_stime = tswapal(target_md->msg_stime);
4654 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4655 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4656 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4657 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4658 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4659 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4660 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4661 unlock_user_struct(target_md, target_addr, 0);
4662 return 0;
4663 }
4664
4665 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4666 struct msqid_ds *host_md)
4667 {
4668 struct target_msqid_ds *target_md;
4669
4670 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4671 return -TARGET_EFAULT;
4672 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4673 return -TARGET_EFAULT;
4674 target_md->msg_stime = tswapal(host_md->msg_stime);
4675 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4676 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4677 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4678 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4679 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4680 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4681 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4682 unlock_user_struct(target_md, target_addr, 1);
4683 return 0;
4684 }
4685
4686 struct target_msginfo {
4687 int msgpool;
4688 int msgmap;
4689 int msgmax;
4690 int msgmnb;
4691 int msgmni;
4692 int msgssz;
4693 int msgtql;
4694 unsigned short int msgseg;
4695 };
4696
4697 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4698 struct msginfo *host_msginfo)
4699 {
4700 struct target_msginfo *target_msginfo;
4701 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4702 return -TARGET_EFAULT;
4703 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4704 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4705 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4706 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4707 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4708 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4709 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4710 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4711 unlock_user_struct(target_msginfo, target_addr, 1);
4712 return 0;
4713 }
4714
4715 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4716 {
4717 struct msqid_ds dsarg;
4718 struct msginfo msginfo;
4719 abi_long ret = -TARGET_EINVAL;
4720
4721 cmd &= 0xff;
4722
4723 switch (cmd) {
4724 case IPC_STAT:
4725 case IPC_SET:
4726 case MSG_STAT:
4727 if (target_to_host_msqid_ds(&dsarg,ptr))
4728 return -TARGET_EFAULT;
4729 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4730 if (host_to_target_msqid_ds(ptr,&dsarg))
4731 return -TARGET_EFAULT;
4732 break;
4733 case IPC_RMID:
4734 ret = get_errno(msgctl(msgid, cmd, NULL));
4735 break;
4736 case IPC_INFO:
4737 case MSG_INFO:
4738 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4739 if (host_to_target_msginfo(ptr, &msginfo))
4740 return -TARGET_EFAULT;
4741 break;
4742 }
4743
4744 return ret;
4745 }
4746
4747 struct target_msgbuf {
4748 abi_long mtype;
4749 char mtext[1];
4750 };
4751
4752 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4753 ssize_t msgsz, int msgflg)
4754 {
4755 struct target_msgbuf *target_mb;
4756 struct msgbuf *host_mb;
4757 abi_long ret = 0;
4758
4759 if (msgsz < 0) {
4760 return -TARGET_EINVAL;
4761 }
4762
4763 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4764 return -TARGET_EFAULT;
4765 host_mb = g_try_malloc(msgsz + sizeof(long));
4766 if (!host_mb) {
4767 unlock_user_struct(target_mb, msgp, 0);
4768 return -TARGET_ENOMEM;
4769 }
4770 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4771 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4772 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4773 g_free(host_mb);
4774 unlock_user_struct(target_mb, msgp, 0);
4775
4776 return ret;
4777 }
4778
4779 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4780 ssize_t msgsz, abi_long msgtyp,
4781 int msgflg)
4782 {
4783 struct target_msgbuf *target_mb;
4784 char *target_mtext;
4785 struct msgbuf *host_mb;
4786 abi_long ret = 0;
4787
4788 if (msgsz < 0) {
4789 return -TARGET_EINVAL;
4790 }
4791
4792 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4793 return -TARGET_EFAULT;
4794
4795 host_mb = g_try_malloc(msgsz + sizeof(long));
4796 if (!host_mb) {
4797 ret = -TARGET_ENOMEM;
4798 goto end;
4799 }
4800 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4801
4802 if (ret > 0) {
4803 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4804 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4805 if (!target_mtext) {
4806 ret = -TARGET_EFAULT;
4807 goto end;
4808 }
4809 memcpy(target_mb->mtext, host_mb->mtext, ret);
4810 unlock_user(target_mtext, target_mtext_addr, ret);
4811 }
4812
4813 target_mb->mtype = tswapal(host_mb->mtype);
4814
4815 end:
4816 if (target_mb)
4817 unlock_user_struct(target_mb, msgp, 1);
4818 g_free(host_mb);
4819 return ret;
4820 }
4821
4822 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4823 abi_ulong target_addr)
4824 {
4825 struct target_shmid_ds *target_sd;
4826
4827 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4828 return -TARGET_EFAULT;
4829 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4830 return -TARGET_EFAULT;
4831 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4832 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4833 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4834 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4835 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4836 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4837 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4838 unlock_user_struct(target_sd, target_addr, 0);
4839 return 0;
4840 }
4841
4842 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4843 struct shmid_ds *host_sd)
4844 {
4845 struct target_shmid_ds *target_sd;
4846
4847 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4848 return -TARGET_EFAULT;
4849 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4850 return -TARGET_EFAULT;
4851 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4852 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4853 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4854 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4855 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4856 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4857 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4858 unlock_user_struct(target_sd, target_addr, 1);
4859 return 0;
4860 }
4861
4862 struct target_shminfo {
4863 abi_ulong shmmax;
4864 abi_ulong shmmin;
4865 abi_ulong shmmni;
4866 abi_ulong shmseg;
4867 abi_ulong shmall;
4868 };
4869
4870 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4871 struct shminfo *host_shminfo)
4872 {
4873 struct target_shminfo *target_shminfo;
4874 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4875 return -TARGET_EFAULT;
4876 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4877 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4878 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4879 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4880 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4881 unlock_user_struct(target_shminfo, target_addr, 1);
4882 return 0;
4883 }
4884
4885 struct target_shm_info {
4886 int used_ids;
4887 abi_ulong shm_tot;
4888 abi_ulong shm_rss;
4889 abi_ulong shm_swp;
4890 abi_ulong swap_attempts;
4891 abi_ulong swap_successes;
4892 };
4893
4894 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4895 struct shm_info *host_shm_info)
4896 {
4897 struct target_shm_info *target_shm_info;
4898 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4899 return -TARGET_EFAULT;
4900 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4901 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4902 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4903 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4904 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4905 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4906 unlock_user_struct(target_shm_info, target_addr, 1);
4907 return 0;
4908 }
4909
4910 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4911 {
4912 struct shmid_ds dsarg;
4913 struct shminfo shminfo;
4914 struct shm_info shm_info;
4915 abi_long ret = -TARGET_EINVAL;
4916
4917 cmd &= 0xff;
4918
4919 switch(cmd) {
4920 case IPC_STAT:
4921 case IPC_SET:
4922 case SHM_STAT:
4923 if (target_to_host_shmid_ds(&dsarg, buf))
4924 return -TARGET_EFAULT;
4925 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4926 if (host_to_target_shmid_ds(buf, &dsarg))
4927 return -TARGET_EFAULT;
4928 break;
4929 case IPC_INFO:
4930 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4931 if (host_to_target_shminfo(buf, &shminfo))
4932 return -TARGET_EFAULT;
4933 break;
4934 case SHM_INFO:
4935 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4936 if (host_to_target_shm_info(buf, &shm_info))
4937 return -TARGET_EFAULT;
4938 break;
4939 case IPC_RMID:
4940 case SHM_LOCK:
4941 case SHM_UNLOCK:
4942 ret = get_errno(shmctl(shmid, cmd, NULL));
4943 break;
4944 }
4945
4946 return ret;
4947 }
4948
4949 #ifndef TARGET_FORCE_SHMLBA
4950 /* For most architectures, SHMLBA is the same as the page size;
4951 * some architectures have larger values, in which case they should
4952 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4953 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4954 * and defining its own value for SHMLBA.
4955 *
4956 * The kernel also permits SHMLBA to be set by the architecture to a
4957 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4958 * this means that addresses are rounded to the large size if
4959 * SHM_RND is set but addresses not aligned to that size are not rejected
4960 * as long as they are at least page-aligned. Since the only architecture
4961 * which uses this is ia64 this code doesn't provide for that oddity.
4962 */
4963 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4964 {
4965 return TARGET_PAGE_SIZE;
4966 }
4967 #endif
4968
4969 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4970 int shmid, abi_ulong shmaddr, int shmflg)
4971 {
4972 abi_long raddr;
4973 void *host_raddr;
4974 struct shmid_ds shm_info;
4975 int i,ret;
4976 abi_ulong shmlba;
4977
4978 /* find out the length of the shared memory segment */
4979 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4980 if (is_error(ret)) {
4981 /* can't get length, bail out */
4982 return ret;
4983 }
4984
4985 shmlba = target_shmlba(cpu_env);
4986
4987 if (shmaddr & (shmlba - 1)) {
4988 if (shmflg & SHM_RND) {
4989 shmaddr &= ~(shmlba - 1);
4990 } else {
4991 return -TARGET_EINVAL;
4992 }
4993 }
4994 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4995 return -TARGET_EINVAL;
4996 }
4997
4998 mmap_lock();
4999
5000 if (shmaddr)
5001 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5002 else {
5003 abi_ulong mmap_start;
5004
5005 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5006
5007 if (mmap_start == -1) {
5008 errno = ENOMEM;
5009 host_raddr = (void *)-1;
5010 } else
5011 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5012 }
5013
5014 if (host_raddr == (void *)-1) {
5015 mmap_unlock();
5016 return get_errno((long)host_raddr);
5017 }
5018 raddr=h2g((unsigned long)host_raddr);
5019
5020 page_set_flags(raddr, raddr + shm_info.shm_segsz,
5021 PAGE_VALID | PAGE_READ |
5022 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5023
5024 for (i = 0; i < N_SHM_REGIONS; i++) {
5025 if (!shm_regions[i].in_use) {
5026 shm_regions[i].in_use = true;
5027 shm_regions[i].start = raddr;
5028 shm_regions[i].size = shm_info.shm_segsz;
5029 break;
5030 }
5031 }
5032
5033 mmap_unlock();
5034 return raddr;
5035
5036 }
5037
5038 static inline abi_long do_shmdt(abi_ulong shmaddr)
5039 {
5040 int i;
5041 abi_long rv;
5042
5043 mmap_lock();
5044
5045 for (i = 0; i < N_SHM_REGIONS; ++i) {
5046 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5047 shm_regions[i].in_use = false;
5048 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5049 break;
5050 }
5051 }
5052 rv = get_errno(shmdt(g2h(shmaddr)));
5053
5054 mmap_unlock();
5055
5056 return rv;
5057 }
5058
5059 #ifdef TARGET_NR_ipc
5060 /* ??? This only works with linear mappings. */
5061 /* do_ipc() must return target values and target errnos. */
5062 static abi_long do_ipc(CPUArchState *cpu_env,
5063 unsigned int call, abi_long first,
5064 abi_long second, abi_long third,
5065 abi_long ptr, abi_long fifth)
5066 {
5067 int version;
5068 abi_long ret = 0;
5069
5070 version = call >> 16;
5071 call &= 0xffff;
5072
5073 switch (call) {
5074 case IPCOP_semop:
5075 ret = do_semop(first, ptr, second);
5076 break;
5077
5078 case IPCOP_semget:
5079 ret = get_errno(semget(first, second, third));
5080 break;
5081
5082 case IPCOP_semctl: {
5083 /* The semun argument to semctl is passed by value, so dereference the
5084 * ptr argument. */
5085 abi_ulong atptr;
5086 get_user_ual(atptr, ptr);
5087 ret = do_semctl(first, second, third, atptr);
5088 break;
5089 }
5090
5091 case IPCOP_msgget:
5092 ret = get_errno(msgget(first, second));
5093 break;
5094
5095 case IPCOP_msgsnd:
5096 ret = do_msgsnd(first, ptr, second, third);
5097 break;
5098
5099 case IPCOP_msgctl:
5100 ret = do_msgctl(first, second, ptr);
5101 break;
5102
5103 case IPCOP_msgrcv:
5104 switch (version) {
5105 case 0:
5106 {
5107 struct target_ipc_kludge {
5108 abi_long msgp;
5109 abi_long msgtyp;
5110 } *tmp;
5111
5112 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5113 ret = -TARGET_EFAULT;
5114 break;
5115 }
5116
5117 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5118
5119 unlock_user_struct(tmp, ptr, 0);
5120 break;
5121 }
5122 default:
5123 ret = do_msgrcv(first, ptr, second, fifth, third);
5124 }
5125 break;
5126
5127 case IPCOP_shmat:
5128 switch (version) {
5129 default:
5130 {
5131 abi_ulong raddr;
5132 raddr = do_shmat(cpu_env, first, ptr, second);
5133 if (is_error(raddr))
5134 return get_errno(raddr);
5135 if (put_user_ual(raddr, third))
5136 return -TARGET_EFAULT;
5137 break;
5138 }
5139 case 1:
5140 ret = -TARGET_EINVAL;
5141 break;
5142 }
5143 break;
5144 case IPCOP_shmdt:
5145 ret = do_shmdt(ptr);
5146 break;
5147
5148 case IPCOP_shmget:
5149 /* IPC_* flag values are the same on all linux platforms */
5150 ret = get_errno(shmget(first, second, third));
5151 break;
5152
5153 /* IPC_* and SHM_* command values are the same on all linux platforms */
5154 case IPCOP_shmctl:
5155 ret = do_shmctl(first, second, ptr);
5156 break;
5157 default:
5158 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5159 ret = -TARGET_ENOSYS;
5160 break;
5161 }
5162 return ret;
5163 }
5164 #endif
5165
5166 /* kernel structure types definitions */
5167
5168 #define STRUCT(name, ...) STRUCT_ ## name,
5169 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5170 enum {
5171 #include "syscall_types.h"
5172 STRUCT_MAX
5173 };
5174 #undef STRUCT
5175 #undef STRUCT_SPECIAL
5176
5177 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5178 #define STRUCT_SPECIAL(name)
5179 #include "syscall_types.h"
5180 #undef STRUCT
5181 #undef STRUCT_SPECIAL
5182
5183 typedef struct IOCTLEntry IOCTLEntry;
5184
5185 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5186 int fd, int cmd, abi_long arg);
5187
5188 struct IOCTLEntry {
5189 int target_cmd;
5190 unsigned int host_cmd;
5191 const char *name;
5192 int access;
5193 do_ioctl_fn *do_ioctl;
5194 const argtype arg_type[5];
5195 };
5196
5197 #define IOC_R 0x0001
5198 #define IOC_W 0x0002
5199 #define IOC_RW (IOC_R | IOC_W)
5200
5201 #define MAX_STRUCT_SIZE 4096
5202
5203 #ifdef CONFIG_FIEMAP
5204 /* So fiemap access checks don't overflow on 32 bit systems.
5205 * This is very slightly smaller than the limit imposed by
5206 * the underlying kernel.
5207 */
5208 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5209 / sizeof(struct fiemap_extent))
5210
5211 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5212 int fd, int cmd, abi_long arg)
5213 {
5214 /* The parameter for this ioctl is a struct fiemap followed
5215 * by an array of struct fiemap_extent whose size is set
5216 * in fiemap->fm_extent_count. The array is filled in by the
5217 * ioctl.
5218 */
5219 int target_size_in, target_size_out;
5220 struct fiemap *fm;
5221 const argtype *arg_type = ie->arg_type;
5222 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5223 void *argptr, *p;
5224 abi_long ret;
5225 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5226 uint32_t outbufsz;
5227 int free_fm = 0;
5228
5229 assert(arg_type[0] == TYPE_PTR);
5230 assert(ie->access == IOC_RW);
5231 arg_type++;
5232 target_size_in = thunk_type_size(arg_type, 0);
5233 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5234 if (!argptr) {
5235 return -TARGET_EFAULT;
5236 }
5237 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5238 unlock_user(argptr, arg, 0);
5239 fm = (struct fiemap *)buf_temp;
5240 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5241 return -TARGET_EINVAL;
5242 }
5243
5244 outbufsz = sizeof (*fm) +
5245 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5246
5247 if (outbufsz > MAX_STRUCT_SIZE) {
5248 /* We can't fit all the extents into the fixed size buffer.
5249 * Allocate one that is large enough and use it instead.
5250 */
5251 fm = g_try_malloc(outbufsz);
5252 if (!fm) {
5253 return -TARGET_ENOMEM;
5254 }
5255 memcpy(fm, buf_temp, sizeof(struct fiemap));
5256 free_fm = 1;
5257 }
5258 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5259 if (!is_error(ret)) {
5260 target_size_out = target_size_in;
5261 /* An extent_count of 0 means we were only counting the extents
5262 * so there are no structs to copy
5263 */
5264 if (fm->fm_extent_count != 0) {
5265 target_size_out += fm->fm_mapped_extents * extent_size;
5266 }
5267 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5268 if (!argptr) {
5269 ret = -TARGET_EFAULT;
5270 } else {
5271 /* Convert the struct fiemap */
5272 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5273 if (fm->fm_extent_count != 0) {
5274 p = argptr + target_size_in;
5275 /* ...and then all the struct fiemap_extents */
5276 for (i = 0; i < fm->fm_mapped_extents; i++) {
5277 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5278 THUNK_TARGET);
5279 p += extent_size;
5280 }
5281 }
5282 unlock_user(argptr, arg, target_size_out);
5283 }
5284 }
5285 if (free_fm) {
5286 g_free(fm);
5287 }
5288 return ret;
5289 }
5290 #endif
5291
5292 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5293 int fd, int cmd, abi_long arg)
5294 {
5295 const argtype *arg_type = ie->arg_type;
5296 int target_size;
5297 void *argptr;
5298 int ret;
5299 struct ifconf *host_ifconf;
5300 uint32_t outbufsz;
5301 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5302 int target_ifreq_size;
5303 int nb_ifreq;
5304 int free_buf = 0;
5305 int i;
5306 int target_ifc_len;
5307 abi_long target_ifc_buf;
5308 int host_ifc_len;
5309 char *host_ifc_buf;
5310
5311 assert(arg_type[0] == TYPE_PTR);
5312 assert(ie->access == IOC_RW);
5313
5314 arg_type++;
5315 target_size = thunk_type_size(arg_type, 0);
5316
5317 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5318 if (!argptr)
5319 return -TARGET_EFAULT;
5320 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5321 unlock_user(argptr, arg, 0);
5322
5323 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5324 target_ifc_len = host_ifconf->ifc_len;
5325 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5326
5327 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5328 nb_ifreq = target_ifc_len / target_ifreq_size;
5329 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5330
5331 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5332 if (outbufsz > MAX_STRUCT_SIZE) {
5333 /* We can't fit all the extents into the fixed size buffer.
5334 * Allocate one that is large enough and use it instead.
5335 */
5336 host_ifconf = malloc(outbufsz);
5337 if (!host_ifconf) {
5338 return -TARGET_ENOMEM;
5339 }
5340 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5341 free_buf = 1;
5342 }
5343 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5344
5345 host_ifconf->ifc_len = host_ifc_len;
5346 host_ifconf->ifc_buf = host_ifc_buf;
5347
5348 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5349 if (!is_error(ret)) {
5350 /* convert host ifc_len to target ifc_len */
5351
5352 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5353 target_ifc_len = nb_ifreq * target_ifreq_size;
5354 host_ifconf->ifc_len = target_ifc_len;
5355
5356 /* restore target ifc_buf */
5357
5358 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5359
5360 /* copy struct ifconf to target user */
5361
5362 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5363 if (!argptr)
5364 return -TARGET_EFAULT;
5365 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5366 unlock_user(argptr, arg, target_size);
5367
5368 /* copy ifreq[] to target user */
5369
5370 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5371 for (i = 0; i < nb_ifreq ; i++) {
5372 thunk_convert(argptr + i * target_ifreq_size,
5373 host_ifc_buf + i * sizeof(struct ifreq),
5374 ifreq_arg_type, THUNK_TARGET);
5375 }
5376 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5377 }
5378
5379 if (free_buf) {
5380 free(host_ifconf);
5381 }
5382
5383 return ret;
5384 }
5385
5386 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5387 int cmd, abi_long arg)
5388 {
5389 void *argptr;
5390 struct dm_ioctl *host_dm;
5391 abi_long guest_data;
5392 uint32_t guest_data_size;
5393 int target_size;
5394 const argtype *arg_type = ie->arg_type;
5395 abi_long ret;
5396 void *big_buf = NULL;
5397 char *host_data;
5398
5399 arg_type++;
5400 target_size = thunk_type_size(arg_type, 0);
5401 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402 if (!argptr) {
5403 ret = -TARGET_EFAULT;
5404 goto out;
5405 }
5406 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5407 unlock_user(argptr, arg, 0);
5408
5409 /* buf_temp is too small, so fetch things into a bigger buffer */
5410 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5411 memcpy(big_buf, buf_temp, target_size);
5412 buf_temp = big_buf;
5413 host_dm = big_buf;
5414
5415 guest_data = arg + host_dm->data_start;
5416 if ((guest_data - arg) < 0) {
5417 ret = -TARGET_EINVAL;
5418 goto out;
5419 }
5420 guest_data_size = host_dm->data_size - host_dm->data_start;
5421 host_data = (char*)host_dm + host_dm->data_start;
5422
5423 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5424 if (!argptr) {
5425 ret = -TARGET_EFAULT;
5426 goto out;
5427 }
5428
5429 switch (ie->host_cmd) {
5430 case DM_REMOVE_ALL:
5431 case DM_LIST_DEVICES:
5432 case DM_DEV_CREATE:
5433 case DM_DEV_REMOVE:
5434 case DM_DEV_SUSPEND:
5435 case DM_DEV_STATUS:
5436 case DM_DEV_WAIT:
5437 case DM_TABLE_STATUS:
5438 case DM_TABLE_CLEAR:
5439 case DM_TABLE_DEPS:
5440 case DM_LIST_VERSIONS:
5441 /* no input data */
5442 break;
5443 case DM_DEV_RENAME:
5444 case DM_DEV_SET_GEOMETRY:
5445 /* data contains only strings */
5446 memcpy(host_data, argptr, guest_data_size);
5447 break;
5448 case DM_TARGET_MSG:
5449 memcpy(host_data, argptr, guest_data_size);
5450 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5451 break;
5452 case DM_TABLE_LOAD:
5453 {
5454 void *gspec = argptr;
5455 void *cur_data = host_data;
5456 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5457 int spec_size = thunk_type_size(arg_type, 0);
5458 int i;
5459
5460 for (i = 0; i < host_dm->target_count; i++) {
5461 struct dm_target_spec *spec = cur_data;
5462 uint32_t next;
5463 int slen;
5464
5465 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5466 slen = strlen((char*)gspec + spec_size) + 1;
5467 next = spec->next;
5468 spec->next = sizeof(*spec) + slen;
5469 strcpy((char*)&spec[1], gspec + spec_size);
5470 gspec += next;
5471 cur_data += spec->next;
5472 }
5473 break;
5474 }
5475 default:
5476 ret = -TARGET_EINVAL;
5477 unlock_user(argptr, guest_data, 0);
5478 goto out;
5479 }
5480 unlock_user(argptr, guest_data, 0);
5481
5482 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5483 if (!is_error(ret)) {
5484 guest_data = arg + host_dm->data_start;
5485 guest_data_size = host_dm->data_size - host_dm->data_start;
5486 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5487 switch (ie->host_cmd) {
5488 case DM_REMOVE_ALL:
5489 case DM_DEV_CREATE:
5490 case DM_DEV_REMOVE:
5491 case DM_DEV_RENAME:
5492 case DM_DEV_SUSPEND:
5493 case DM_DEV_STATUS:
5494 case DM_TABLE_LOAD:
5495 case DM_TABLE_CLEAR:
5496 case DM_TARGET_MSG:
5497 case DM_DEV_SET_GEOMETRY:
5498 /* no return data */
5499 break;
5500 case DM_LIST_DEVICES:
5501 {
5502 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5503 uint32_t remaining_data = guest_data_size;
5504 void *cur_data = argptr;
5505 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5506 int nl_size = 12; /* can't use thunk_size due to alignment */
5507
5508 while (1) {
5509 uint32_t next = nl->next;
5510 if (next) {
5511 nl->next = nl_size + (strlen(nl->name) + 1);
5512 }
5513 if (remaining_data < nl->next) {
5514 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5515 break;
5516 }
5517 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5518 strcpy(cur_data + nl_size, nl->name);
5519 cur_data += nl->next;
5520 remaining_data -= nl->next;
5521 if (!next) {
5522 break;
5523 }
5524 nl = (void*)nl + next;
5525 }
5526 break;
5527 }
5528 case DM_DEV_WAIT:
5529 case DM_TABLE_STATUS:
5530 {
5531 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5532 void *cur_data = argptr;
5533 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5534 int spec_size = thunk_type_size(arg_type, 0);
5535 int i;
5536
5537 for (i = 0; i < host_dm->target_count; i++) {
5538 uint32_t next = spec->next;
5539 int slen = strlen((char*)&spec[1]) + 1;
5540 spec->next = (cur_data - argptr) + spec_size + slen;
5541 if (guest_data_size < spec->next) {
5542 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5543 break;
5544 }
5545 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5546 strcpy(cur_data + spec_size, (char*)&spec[1]);
5547 cur_data = argptr + spec->next;
5548 spec = (void*)host_dm + host_dm->data_start + next;
5549 }
5550 break;
5551 }
5552 case DM_TABLE_DEPS:
5553 {
5554 void *hdata = (void*)host_dm + host_dm->data_start;
5555 int count = *(uint32_t*)hdata;
5556 uint64_t *hdev = hdata + 8;
5557 uint64_t *gdev = argptr + 8;
5558 int i;
5559
5560 *(uint32_t*)argptr = tswap32(count);
5561 for (i = 0; i < count; i++) {
5562 *gdev = tswap64(*hdev);
5563 gdev++;
5564 hdev++;
5565 }
5566 break;
5567 }
5568 case DM_LIST_VERSIONS:
5569 {
5570 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5571 uint32_t remaining_data = guest_data_size;
5572 void *cur_data = argptr;
5573 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5574 int vers_size = thunk_type_size(arg_type, 0);
5575
5576 while (1) {
5577 uint32_t next = vers->next;
5578 if (next) {
5579 vers->next = vers_size + (strlen(vers->name) + 1);
5580 }
5581 if (remaining_data < vers->next) {
5582 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5583 break;
5584 }
5585 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5586 strcpy(cur_data + vers_size, vers->name);
5587 cur_data += vers->next;
5588 remaining_data -= vers->next;
5589 if (!next) {
5590 break;
5591 }
5592 vers = (void*)vers + next;
5593 }
5594 break;
5595 }
5596 default:
5597 unlock_user(argptr, guest_data, 0);
5598 ret = -TARGET_EINVAL;
5599 goto out;
5600 }
5601 unlock_user(argptr, guest_data, guest_data_size);
5602
5603 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5604 if (!argptr) {
5605 ret = -TARGET_EFAULT;
5606 goto out;
5607 }
5608 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5609 unlock_user(argptr, arg, target_size);
5610 }
5611 out:
5612 g_free(big_buf);
5613 return ret;
5614 }
5615
5616 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5617 int cmd, abi_long arg)
5618 {
5619 void *argptr;
5620 int target_size;
5621 const argtype *arg_type = ie->arg_type;
5622 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5623 abi_long ret;
5624
5625 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5626 struct blkpg_partition host_part;
5627
5628 /* Read and convert blkpg */
5629 arg_type++;
5630 target_size = thunk_type_size(arg_type, 0);
5631 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5632 if (!argptr) {
5633 ret = -TARGET_EFAULT;
5634 goto out;
5635 }
5636 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5637 unlock_user(argptr, arg, 0);
5638
5639 switch (host_blkpg->op) {
5640 case BLKPG_ADD_PARTITION:
5641 case BLKPG_DEL_PARTITION:
5642 /* payload is struct blkpg_partition */
5643 break;
5644 default:
5645 /* Unknown opcode */
5646 ret = -TARGET_EINVAL;
5647 goto out;
5648 }
5649
5650 /* Read and convert blkpg->data */
5651 arg = (abi_long)(uintptr_t)host_blkpg->data;
5652 target_size = thunk_type_size(part_arg_type, 0);
5653 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5654 if (!argptr) {
5655 ret = -TARGET_EFAULT;
5656 goto out;
5657 }
5658 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5659 unlock_user(argptr, arg, 0);
5660
5661 /* Swizzle the data pointer to our local copy and call! */
5662 host_blkpg->data = &host_part;
5663 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5664
5665 out:
5666 return ret;
5667 }
5668
5669 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5670 int fd, int cmd, abi_long arg)
5671 {
5672 const argtype *arg_type = ie->arg_type;
5673 const StructEntry *se;
5674 const argtype *field_types;
5675 const int *dst_offsets, *src_offsets;
5676 int target_size;
5677 void *argptr;
5678 abi_ulong *target_rt_dev_ptr;
5679 unsigned long *host_rt_dev_ptr;
5680 abi_long ret;
5681 int i;
5682
5683 assert(ie->access == IOC_W);
5684 assert(*arg_type == TYPE_PTR);
5685 arg_type++;
5686 assert(*arg_type == TYPE_STRUCT);
5687 target_size = thunk_type_size(arg_type, 0);
5688 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5689 if (!argptr) {
5690 return -TARGET_EFAULT;
5691 }
5692 arg_type++;
5693 assert(*arg_type == (int)STRUCT_rtentry);
5694 se = struct_entries + *arg_type++;
5695 assert(se->convert[0] == NULL);
5696 /* convert struct here to be able to catch rt_dev string */
5697 field_types = se->field_types;
5698 dst_offsets = se->field_offsets[THUNK_HOST];
5699 src_offsets = se->field_offsets[THUNK_TARGET];
5700 for (i = 0; i < se->nb_fields; i++) {
5701 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5702 assert(*field_types == TYPE_PTRVOID);
5703 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5704 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5705 if (*target_rt_dev_ptr != 0) {
5706 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5707 tswapal(*target_rt_dev_ptr));
5708 if (!*host_rt_dev_ptr) {
5709 unlock_user(argptr, arg, 0);
5710 return -TARGET_EFAULT;
5711 }
5712 } else {
5713 *host_rt_dev_ptr = 0;
5714 }
5715 field_types++;
5716 continue;
5717 }
5718 field_types = thunk_convert(buf_temp + dst_offsets[i],
5719 argptr + src_offsets[i],
5720 field_types, THUNK_HOST);
5721 }
5722 unlock_user(argptr, arg, 0);
5723
5724 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5725 if (*host_rt_dev_ptr != 0) {
5726 unlock_user((void *)*host_rt_dev_ptr,
5727 *target_rt_dev_ptr, 0);
5728 }
5729 return ret;
5730 }
5731
5732 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5733 int fd, int cmd, abi_long arg)
5734 {
5735 int sig = target_to_host_signal(arg);
5736 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5737 }
5738
5739 #ifdef TIOCGPTPEER
5740 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5741 int fd, int cmd, abi_long arg)
5742 {
5743 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5744 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5745 }
5746 #endif
5747
5748 static IOCTLEntry ioctl_entries[] = {
5749 #define IOCTL(cmd, access, ...) \
5750 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5751 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5752 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5753 #define IOCTL_IGNORE(cmd) \
5754 { TARGET_ ## cmd, 0, #cmd },
5755 #include "ioctls.h"
5756 { 0, 0, },
5757 };
5758
5759 /* ??? Implement proper locking for ioctls. */
5760 /* do_ioctl() Must return target values and target errnos. */
5761 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5762 {
5763 const IOCTLEntry *ie;
5764 const argtype *arg_type;
5765 abi_long ret;
5766 uint8_t buf_temp[MAX_STRUCT_SIZE];
5767 int target_size;
5768 void *argptr;
5769
5770 ie = ioctl_entries;
5771 for(;;) {
5772 if (ie->target_cmd == 0) {
5773 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5774 return -TARGET_ENOSYS;
5775 }
5776 if (ie->target_cmd == cmd)
5777 break;
5778 ie++;
5779 }
5780 arg_type = ie->arg_type;
5781 #if defined(DEBUG)
5782 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5783 #endif
5784 if (ie->do_ioctl) {
5785 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5786 } else if (!ie->host_cmd) {
5787 /* Some architectures define BSD ioctls in their headers
5788 that are not implemented in Linux. */
5789 return -TARGET_ENOSYS;
5790 }
5791
5792 switch(arg_type[0]) {
5793 case TYPE_NULL:
5794 /* no argument */
5795 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5796 break;
5797 case TYPE_PTRVOID:
5798 case TYPE_INT:
5799 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5800 break;
5801 case TYPE_PTR:
5802 arg_type++;
5803 target_size = thunk_type_size(arg_type, 0);
5804 switch(ie->access) {
5805 case IOC_R:
5806 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5807 if (!is_error(ret)) {
5808 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5809 if (!argptr)
5810 return -TARGET_EFAULT;
5811 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5812 unlock_user(argptr, arg, target_size);
5813 }
5814 break;
5815 case IOC_W:
5816 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5817 if (!argptr)
5818 return -TARGET_EFAULT;
5819 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5820 unlock_user(argptr, arg, 0);
5821 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5822 break;
5823 default:
5824 case IOC_RW:
5825 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5826 if (!argptr)
5827 return -TARGET_EFAULT;
5828 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5829 unlock_user(argptr, arg, 0);
5830 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5831 if (!is_error(ret)) {
5832 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5833 if (!argptr)
5834 return -TARGET_EFAULT;
5835 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5836 unlock_user(argptr, arg, target_size);
5837 }
5838 break;
5839 }
5840 break;
5841 default:
5842 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5843 (long)cmd, arg_type[0]);
5844 ret = -TARGET_ENOSYS;
5845 break;
5846 }
5847 return ret;
5848 }
5849
5850 static const bitmask_transtbl iflag_tbl[] = {
5851 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5852 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5853 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5854 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5855 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5856 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5857 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5858 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5859 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5860 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5861 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5862 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5863 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5864 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5865 { 0, 0, 0, 0 }
5866 };
5867
5868 static const bitmask_transtbl oflag_tbl[] = {
5869 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5870 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5871 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5872 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5873 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5874 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5875 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5876 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5877 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5878 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5879 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5880 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5881 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5882 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5883 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5884 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5885 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5886 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5887 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5888 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5889 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5890 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5891 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5892 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5893 { 0, 0, 0, 0 }
5894 };
5895
5896 static const bitmask_transtbl cflag_tbl[] = {
5897 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5898 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5899 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5900 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5901 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5902 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5903 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5904 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5905 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5906 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5907 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5908 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5909 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5910 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5911 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5912 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5913 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5914 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5915 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5916 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5917 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5918 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5919 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5920 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5921 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5922 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5923 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5924 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5925 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5926 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5927 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5928 { 0, 0, 0, 0 }
5929 };
5930
5931 static const bitmask_transtbl lflag_tbl[] = {
5932 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5933 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5934 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5935 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5936 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5937 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5938 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5939 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5940 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5941 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5942 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5943 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5944 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5945 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5946 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5947 { 0, 0, 0, 0 }
5948 };
5949
5950 static void target_to_host_termios (void *dst, const void *src)
5951 {
5952 struct host_termios *host = dst;
5953 const struct target_termios *target = src;
5954
5955 host->c_iflag =
5956 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5957 host->c_oflag =
5958 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5959 host->c_cflag =
5960 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5961 host->c_lflag =
5962 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5963 host->c_line = target->c_line;
5964
5965 memset(host->c_cc, 0, sizeof(host->c_cc));
5966 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5967 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5968 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5969 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5970 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5971 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5972 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5973 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5974 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5975 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5976 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5977 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5978 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5979 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5980 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5981 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5982 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5983 }
5984
5985 static void host_to_target_termios (void *dst, const void *src)
5986 {
5987 struct target_termios *target = dst;
5988 const struct host_termios *host = src;
5989
5990 target->c_iflag =
5991 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5992 target->c_oflag =
5993 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5994 target->c_cflag =
5995 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5996 target->c_lflag =
5997 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5998 target->c_line = host->c_line;
5999
6000 memset(target->c_cc, 0, sizeof(target->c_cc));
6001 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6002 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6003 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6004 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6005 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6006 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6007 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6008 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6009 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6010 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6011 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6012 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6013 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6014 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6015 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6016 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6017 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6018 }
6019
6020 static const StructEntry struct_termios_def = {
6021 .convert = { host_to_target_termios, target_to_host_termios },
6022 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6023 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6024 };
6025
6026 static bitmask_transtbl mmap_flags_tbl[] = {
6027 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6028 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6029 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6030 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6031 MAP_ANONYMOUS, MAP_ANONYMOUS },
6032 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6033 MAP_GROWSDOWN, MAP_GROWSDOWN },
6034 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6035 MAP_DENYWRITE, MAP_DENYWRITE },
6036 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6037 MAP_EXECUTABLE, MAP_EXECUTABLE },
6038 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6039 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6040 MAP_NORESERVE, MAP_NORESERVE },
6041 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6042 /* MAP_STACK had been ignored by the kernel for quite some time.
6043 Recognize it for the target insofar as we do not want to pass
6044 it through to the host. */
6045 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6046 { 0, 0, 0, 0 }
6047 };
6048
6049 #if defined(TARGET_I386)
6050
6051 /* NOTE: there is really one LDT for all the threads */
6052 static uint8_t *ldt_table;
6053
6054 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6055 {
6056 int size;
6057 void *p;
6058
6059 if (!ldt_table)
6060 return 0;
6061 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6062 if (size > bytecount)
6063 size = bytecount;
6064 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6065 if (!p)
6066 return -TARGET_EFAULT;
6067 /* ??? Should this by byteswapped? */
6068 memcpy(p, ldt_table, size);
6069 unlock_user(p, ptr, size);
6070 return size;
6071 }
6072
6073 /* XXX: add locking support */
6074 static abi_long write_ldt(CPUX86State *env,
6075 abi_ulong ptr, unsigned long bytecount, int oldmode)
6076 {
6077 struct target_modify_ldt_ldt_s ldt_info;
6078 struct target_modify_ldt_ldt_s *target_ldt_info;
6079 int seg_32bit, contents, read_exec_only, limit_in_pages;
6080 int seg_not_present, useable, lm;
6081 uint32_t *lp, entry_1, entry_2;
6082
6083 if (bytecount != sizeof(ldt_info))
6084 return -TARGET_EINVAL;
6085 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6086 return -TARGET_EFAULT;
6087 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6088 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6089 ldt_info.limit = tswap32(target_ldt_info->limit);
6090 ldt_info.flags = tswap32(target_ldt_info->flags);
6091 unlock_user_struct(target_ldt_info, ptr, 0);
6092
6093 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6094 return -TARGET_EINVAL;
6095 seg_32bit = ldt_info.flags & 1;
6096 contents = (ldt_info.flags >> 1) & 3;
6097 read_exec_only = (ldt_info.flags >> 3) & 1;
6098 limit_in_pages = (ldt_info.flags >> 4) & 1;
6099 seg_not_present = (ldt_info.flags >> 5) & 1;
6100 useable = (ldt_info.flags >> 6) & 1;
6101 #ifdef TARGET_ABI32
6102 lm = 0;
6103 #else
6104 lm = (ldt_info.flags >> 7) & 1;
6105 #endif
6106 if (contents == 3) {
6107 if (oldmode)
6108 return -TARGET_EINVAL;
6109 if (seg_not_present == 0)
6110 return -TARGET_EINVAL;
6111 }
6112 /* allocate the LDT */
6113 if (!ldt_table) {
6114 env->ldt.base = target_mmap(0,
6115 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6116 PROT_READ|PROT_WRITE,
6117 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6118 if (env->ldt.base == -1)
6119 return -TARGET_ENOMEM;
6120 memset(g2h(env->ldt.base), 0,
6121 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6122 env->ldt.limit = 0xffff;
6123 ldt_table = g2h(env->ldt.base);
6124 }
6125
6126 /* NOTE: same code as Linux kernel */
6127 /* Allow LDTs to be cleared by the user. */
6128 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6129 if (oldmode ||
6130 (contents == 0 &&
6131 read_exec_only == 1 &&
6132 seg_32bit == 0 &&
6133 limit_in_pages == 0 &&
6134 seg_not_present == 1 &&
6135 useable == 0 )) {
6136 entry_1 = 0;
6137 entry_2 = 0;
6138 goto install;
6139 }
6140 }
6141
6142 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6143 (ldt_info.limit & 0x0ffff);
6144 entry_2 = (ldt_info.base_addr & 0xff000000) |
6145 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6146 (ldt_info.limit & 0xf0000) |
6147 ((read_exec_only ^ 1) << 9) |
6148 (contents << 10) |
6149 ((seg_not_present ^ 1) << 15) |
6150 (seg_32bit << 22) |
6151 (limit_in_pages << 23) |
6152 (lm << 21) |
6153 0x7000;
6154 if (!oldmode)
6155 entry_2 |= (useable << 20);
6156
6157 /* Install the new entry ... */
6158 install:
6159 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6160 lp[0] = tswap32(entry_1);
6161 lp[1] = tswap32(entry_2);
6162 return 0;
6163 }
6164
6165 /* specific and weird i386 syscalls */
6166 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6167 unsigned long bytecount)
6168 {
6169 abi_long ret;
6170
6171 switch (func) {
6172 case 0:
6173 ret = read_ldt(ptr, bytecount);
6174 break;
6175 case 1:
6176 ret = write_ldt(env, ptr, bytecount, 1);
6177 break;
6178 case 0x11:
6179 ret = write_ldt(env, ptr, bytecount, 0);
6180 break;
6181 default:
6182 ret = -TARGET_ENOSYS;
6183 break;
6184 }
6185 return ret;
6186 }
6187
6188 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6189 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6190 {
6191 uint64_t *gdt_table = g2h(env->gdt.base);
6192 struct target_modify_ldt_ldt_s ldt_info;
6193 struct target_modify_ldt_ldt_s *target_ldt_info;
6194 int seg_32bit, contents, read_exec_only, limit_in_pages;
6195 int seg_not_present, useable, lm;
6196 uint32_t *lp, entry_1, entry_2;
6197 int i;
6198
6199 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6200 if (!target_ldt_info)
6201 return -TARGET_EFAULT;
6202 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6203 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6204 ldt_info.limit = tswap32(target_ldt_info->limit);
6205 ldt_info.flags = tswap32(target_ldt_info->flags);
6206 if (ldt_info.entry_number == -1) {
6207 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6208 if (gdt_table[i] == 0) {
6209 ldt_info.entry_number = i;
6210 target_ldt_info->entry_number = tswap32(i);
6211 break;
6212 }
6213 }
6214 }
6215 unlock_user_struct(target_ldt_info, ptr, 1);
6216
6217 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6218 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6219 return -TARGET_EINVAL;
6220 seg_32bit = ldt_info.flags & 1;
6221 contents = (ldt_info.flags >> 1) & 3;
6222 read_exec_only = (ldt_info.flags >> 3) & 1;
6223 limit_in_pages = (ldt_info.flags >> 4) & 1;
6224 seg_not_present = (ldt_info.flags >> 5) & 1;
6225 useable = (ldt_info.flags >> 6) & 1;
6226 #ifdef TARGET_ABI32
6227 lm = 0;
6228 #else
6229 lm = (ldt_info.flags >> 7) & 1;
6230 #endif
6231
6232 if (contents == 3) {
6233 if (seg_not_present == 0)
6234 return -TARGET_EINVAL;
6235 }
6236
6237 /* NOTE: same code as Linux kernel */
6238 /* Allow LDTs to be cleared by the user. */
6239 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6240 if ((contents == 0 &&
6241 read_exec_only == 1 &&
6242 seg_32bit == 0 &&
6243 limit_in_pages == 0 &&
6244 seg_not_present == 1 &&
6245 useable == 0 )) {
6246 entry_1 = 0;
6247 entry_2 = 0;
6248 goto install;
6249 }
6250 }
6251
6252 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6253 (ldt_info.limit & 0x0ffff);
6254 entry_2 = (ldt_info.base_addr & 0xff000000) |
6255 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6256 (ldt_info.limit & 0xf0000) |
6257 ((read_exec_only ^ 1) << 9) |
6258 (contents << 10) |
6259 ((seg_not_present ^ 1) << 15) |
6260 (seg_32bit << 22) |
6261 (limit_in_pages << 23) |
6262 (useable << 20) |
6263 (lm << 21) |
6264 0x7000;
6265
6266 /* Install the new entry ... */
6267 install:
6268 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6269 lp[0] = tswap32(entry_1);
6270 lp[1] = tswap32(entry_2);
6271 return 0;
6272 }
6273
6274 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6275 {
6276 struct target_modify_ldt_ldt_s *target_ldt_info;
6277 uint64_t *gdt_table = g2h(env->gdt.base);
6278 uint32_t base_addr, limit, flags;
6279 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6280 int seg_not_present, useable, lm;
6281 uint32_t *lp, entry_1, entry_2;
6282
6283 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6284 if (!target_ldt_info)
6285 return -TARGET_EFAULT;
6286 idx = tswap32(target_ldt_info->entry_number);
6287 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6288 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6289 unlock_user_struct(target_ldt_info, ptr, 1);
6290 return -TARGET_EINVAL;
6291 }
6292 lp = (uint32_t *)(gdt_table + idx);
6293 entry_1 = tswap32(lp[0]);
6294 entry_2 = tswap32(lp[1]);
6295
6296 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6297 contents = (entry_2 >> 10) & 3;
6298 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6299 seg_32bit = (entry_2 >> 22) & 1;
6300 limit_in_pages = (entry_2 >> 23) & 1;
6301 useable = (entry_2 >> 20) & 1;
6302 #ifdef TARGET_ABI32
6303 lm = 0;
6304 #else
6305 lm = (entry_2 >> 21) & 1;
6306 #endif
6307 flags = (seg_32bit << 0) | (contents << 1) |
6308 (read_exec_only << 3) | (limit_in_pages << 4) |
6309 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6310 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6311 base_addr = (entry_1 >> 16) |
6312 (entry_2 & 0xff000000) |
6313 ((entry_2 & 0xff) << 16);
6314 target_ldt_info->base_addr = tswapal(base_addr);
6315 target_ldt_info->limit = tswap32(limit);
6316 target_ldt_info->flags = tswap32(flags);
6317 unlock_user_struct(target_ldt_info, ptr, 1);
6318 return 0;
6319 }
6320 #endif /* TARGET_I386 && TARGET_ABI32 */
6321
6322 #ifndef TARGET_ABI32
6323 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6324 {
6325 abi_long ret = 0;
6326 abi_ulong val;
6327 int idx;
6328
6329 switch(code) {
6330 case TARGET_ARCH_SET_GS:
6331 case TARGET_ARCH_SET_FS:
6332 if (code == TARGET_ARCH_SET_GS)
6333 idx = R_GS;
6334 else
6335 idx = R_FS;
6336 cpu_x86_load_seg(env, idx, 0);
6337 env->segs[idx].base = addr;
6338 break;
6339 case TARGET_ARCH_GET_GS:
6340 case TARGET_ARCH_GET_FS:
6341 if (code == TARGET_ARCH_GET_GS)
6342 idx = R_GS;
6343 else
6344 idx = R_FS;
6345 val = env->segs[idx].base;
6346 if (put_user(val, addr, abi_ulong))
6347 ret = -TARGET_EFAULT;
6348 break;
6349 default:
6350 ret = -TARGET_EINVAL;
6351 break;
6352 }
6353 return ret;
6354 }
6355 #endif
6356
6357 #endif /* defined(TARGET_I386) */
6358
6359 #define NEW_STACK_SIZE 0x40000
6360
6361
6362 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6363 typedef struct {
6364 CPUArchState *env;
6365 pthread_mutex_t mutex;
6366 pthread_cond_t cond;
6367 pthread_t thread;
6368 uint32_t tid;
6369 abi_ulong child_tidptr;
6370 abi_ulong parent_tidptr;
6371 sigset_t sigmask;
6372 } new_thread_info;
6373
6374 static void *clone_func(void *arg)
6375 {
6376 new_thread_info *info = arg;
6377 CPUArchState *env;
6378 CPUState *cpu;
6379 TaskState *ts;
6380
6381 rcu_register_thread();
6382 tcg_register_thread();
6383 env = info->env;
6384 cpu = ENV_GET_CPU(env);
6385 thread_cpu = cpu;
6386 ts = (TaskState *)cpu->opaque;
6387 info->tid = gettid();
6388 task_settid(ts);
6389 if (info->child_tidptr)
6390 put_user_u32(info->tid, info->child_tidptr);
6391 if (info->parent_tidptr)
6392 put_user_u32(info->tid, info->parent_tidptr);
6393 /* Enable signals. */
6394 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6395 /* Signal to the parent that we're ready. */
6396 pthread_mutex_lock(&info->mutex);
6397 pthread_cond_broadcast(&info->cond);
6398 pthread_mutex_unlock(&info->mutex);
6399 /* Wait until the parent has finished initializing the tls state. */
6400 pthread_mutex_lock(&clone_lock);
6401 pthread_mutex_unlock(&clone_lock);
6402 cpu_loop(env);
6403 /* never exits */
6404 return NULL;
6405 }
6406
6407 /* do_fork() Must return host values and target errnos (unlike most
6408 do_*() functions). */
6409 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6410 abi_ulong parent_tidptr, target_ulong newtls,
6411 abi_ulong child_tidptr)
6412 {
6413 CPUState *cpu = ENV_GET_CPU(env);
6414 int ret;
6415 TaskState *ts;
6416 CPUState *new_cpu;
6417 CPUArchState *new_env;
6418 sigset_t sigmask;
6419
6420 flags &= ~CLONE_IGNORED_FLAGS;
6421
6422 /* Emulate vfork() with fork() */
6423 if (flags & CLONE_VFORK)
6424 flags &= ~(CLONE_VFORK | CLONE_VM);
6425
6426 if (flags & CLONE_VM) {
6427 TaskState *parent_ts = (TaskState *)cpu->opaque;
6428 new_thread_info info;
6429 pthread_attr_t attr;
6430
6431 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6432 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6433 return -TARGET_EINVAL;
6434 }
6435
6436 ts = g_new0(TaskState, 1);
6437 init_task_state(ts);
6438
6439 /* Grab a mutex so that thread setup appears atomic. */
6440 pthread_mutex_lock(&clone_lock);
6441
6442 /* we create a new CPU instance. */
6443 new_env = cpu_copy(env);
6444 /* Init regs that differ from the parent. */
6445 cpu_clone_regs(new_env, newsp);
6446 new_cpu = ENV_GET_CPU(new_env);
6447 new_cpu->opaque = ts;
6448 ts->bprm = parent_ts->bprm;
6449 ts->info = parent_ts->info;
6450 ts->signal_mask = parent_ts->signal_mask;
6451
6452 if (flags & CLONE_CHILD_CLEARTID) {
6453 ts->child_tidptr = child_tidptr;
6454 }
6455
6456 if (flags & CLONE_SETTLS) {
6457 cpu_set_tls (new_env, newtls);
6458 }
6459
6460 memset(&info, 0, sizeof(info));
6461 pthread_mutex_init(&info.mutex, NULL);
6462 pthread_mutex_lock(&info.mutex);
6463 pthread_cond_init(&info.cond, NULL);
6464 info.env = new_env;
6465 if (flags & CLONE_CHILD_SETTID) {
6466 info.child_tidptr = child_tidptr;
6467 }
6468 if (flags & CLONE_PARENT_SETTID) {
6469 info.parent_tidptr = parent_tidptr;
6470 }
6471
6472 ret = pthread_attr_init(&attr);
6473 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6474 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6475 /* It is not safe to deliver signals until the child has finished
6476 initializing, so temporarily block all signals. */
6477 sigfillset(&sigmask);
6478 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6479
6480 /* If this is our first additional thread, we need to ensure we
6481 * generate code for parallel execution and flush old translations.
6482 */
6483 if (!parallel_cpus) {
6484 parallel_cpus = true;
6485 tb_flush(cpu);
6486 }
6487
6488 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6489 /* TODO: Free new CPU state if thread creation failed. */
6490
6491 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6492 pthread_attr_destroy(&attr);
6493 if (ret == 0) {
6494 /* Wait for the child to initialize. */
6495 pthread_cond_wait(&info.cond, &info.mutex);
6496 ret = info.tid;
6497 } else {
6498 ret = -1;
6499 }
6500 pthread_mutex_unlock(&info.mutex);
6501 pthread_cond_destroy(&info.cond);
6502 pthread_mutex_destroy(&info.mutex);
6503 pthread_mutex_unlock(&clone_lock);
6504 } else {
6505 /* if no CLONE_VM, we consider it is a fork */
6506 if (flags & CLONE_INVALID_FORK_FLAGS) {
6507 return -TARGET_EINVAL;
6508 }
6509
6510 /* We can't support custom termination signals */
6511 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6512 return -TARGET_EINVAL;
6513 }
6514
6515 if (block_signals()) {
6516 return -TARGET_ERESTARTSYS;
6517 }
6518
6519 fork_start();
6520 ret = fork();
6521 if (ret == 0) {
6522 /* Child Process. */
6523 cpu_clone_regs(env, newsp);
6524 fork_end(1);
6525 /* There is a race condition here. The parent process could
6526 theoretically read the TID in the child process before the child
6527 tid is set. This would require using either ptrace
6528 (not implemented) or having *_tidptr to point at a shared memory
6529 mapping. We can't repeat the spinlock hack used above because
6530 the child process gets its own copy of the lock. */
6531 if (flags & CLONE_CHILD_SETTID)
6532 put_user_u32(gettid(), child_tidptr);
6533 if (flags & CLONE_PARENT_SETTID)
6534 put_user_u32(gettid(), parent_tidptr);
6535 ts = (TaskState *)cpu->opaque;
6536 if (flags & CLONE_SETTLS)
6537 cpu_set_tls (env, newtls);
6538 if (flags & CLONE_CHILD_CLEARTID)
6539 ts->child_tidptr = child_tidptr;
6540 } else {
6541 fork_end(0);
6542 }
6543 }
6544 return ret;
6545 }
6546
6547 /* warning : doesn't handle linux specific flags... */
6548 static int target_to_host_fcntl_cmd(int cmd)
6549 {
6550 int ret;
6551
6552 switch(cmd) {
6553 case TARGET_F_DUPFD:
6554 case TARGET_F_GETFD:
6555 case TARGET_F_SETFD:
6556 case TARGET_F_GETFL:
6557 case TARGET_F_SETFL:
6558 ret = cmd;
6559 break;
6560 case TARGET_F_GETLK:
6561 ret = F_GETLK64;
6562 break;
6563 case TARGET_F_SETLK:
6564 ret = F_SETLK64;
6565 break;
6566 case TARGET_F_SETLKW:
6567 ret = F_SETLKW64;
6568 break;
6569 case TARGET_F_GETOWN:
6570 ret = F_GETOWN;
6571 break;
6572 case TARGET_F_SETOWN:
6573 ret = F_SETOWN;
6574 break;
6575 case TARGET_F_GETSIG:
6576 ret = F_GETSIG;
6577 break;
6578 case TARGET_F_SETSIG:
6579 ret = F_SETSIG;
6580 break;
6581 #if TARGET_ABI_BITS == 32
6582 case TARGET_F_GETLK64:
6583 ret = F_GETLK64;
6584 break;
6585 case TARGET_F_SETLK64:
6586 ret = F_SETLK64;
6587 break;
6588 case TARGET_F_SETLKW64:
6589 ret = F_SETLKW64;
6590 break;
6591 #endif
6592 case TARGET_F_SETLEASE:
6593 ret = F_SETLEASE;
6594 break;
6595 case TARGET_F_GETLEASE:
6596 ret = F_GETLEASE;
6597 break;
6598 #ifdef F_DUPFD_CLOEXEC
6599 case TARGET_F_DUPFD_CLOEXEC:
6600 ret = F_DUPFD_CLOEXEC;
6601 break;
6602 #endif
6603 case TARGET_F_NOTIFY:
6604 ret = F_NOTIFY;
6605 break;
6606 #ifdef F_GETOWN_EX
6607 case TARGET_F_GETOWN_EX:
6608 ret = F_GETOWN_EX;
6609 break;
6610 #endif
6611 #ifdef F_SETOWN_EX
6612 case TARGET_F_SETOWN_EX:
6613 ret = F_SETOWN_EX;
6614 break;
6615 #endif
6616 #ifdef F_SETPIPE_SZ
6617 case TARGET_F_SETPIPE_SZ:
6618 ret = F_SETPIPE_SZ;
6619 break;
6620 case TARGET_F_GETPIPE_SZ:
6621 ret = F_GETPIPE_SZ;
6622 break;
6623 #endif
6624 default:
6625 ret = -TARGET_EINVAL;
6626 break;
6627 }
6628
6629 #if defined(__powerpc64__)
6630 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6631 * is not supported by kernel. The glibc fcntl call actually adjusts
6632 * them to 5, 6 and 7 before making the syscall(). Since we make the
6633 * syscall directly, adjust to what is supported by the kernel.
6634 */
6635 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6636 ret -= F_GETLK64 - 5;
6637 }
6638 #endif
6639
6640 return ret;
6641 }
6642
6643 #define FLOCK_TRANSTBL \
6644 switch (type) { \
6645 TRANSTBL_CONVERT(F_RDLCK); \
6646 TRANSTBL_CONVERT(F_WRLCK); \
6647 TRANSTBL_CONVERT(F_UNLCK); \
6648 TRANSTBL_CONVERT(F_EXLCK); \
6649 TRANSTBL_CONVERT(F_SHLCK); \
6650 }
6651
6652 static int target_to_host_flock(int type)
6653 {
6654 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6655 FLOCK_TRANSTBL
6656 #undef TRANSTBL_CONVERT
6657 return -TARGET_EINVAL;
6658 }
6659
6660 static int host_to_target_flock(int type)
6661 {
6662 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6663 FLOCK_TRANSTBL
6664 #undef TRANSTBL_CONVERT
6665 /* if we don't know how to convert the value coming
6666 * from the host we copy to the target field as-is
6667 */
6668 return type;
6669 }
6670
6671 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6672 abi_ulong target_flock_addr)
6673 {
6674 struct target_flock *target_fl;
6675 int l_type;
6676
6677 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6678 return -TARGET_EFAULT;
6679 }
6680
6681 __get_user(l_type, &target_fl->l_type);
6682 l_type = target_to_host_flock(l_type);
6683 if (l_type < 0) {
6684 return l_type;
6685 }
6686 fl->l_type = l_type;
6687 __get_user(fl->l_whence, &target_fl->l_whence);
6688 __get_user(fl->l_start, &target_fl->l_start);
6689 __get_user(fl->l_len, &target_fl->l_len);
6690 __get_user(fl->l_pid, &target_fl->l_pid);
6691 unlock_user_struct(target_fl, target_flock_addr, 0);
6692 return 0;
6693 }
6694
6695 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6696 const struct flock64 *fl)
6697 {
6698 struct target_flock *target_fl;
6699 short l_type;
6700
6701 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6702 return -TARGET_EFAULT;
6703 }
6704
6705 l_type = host_to_target_flock(fl->l_type);
6706 __put_user(l_type, &target_fl->l_type);
6707 __put_user(fl->l_whence, &target_fl->l_whence);
6708 __put_user(fl->l_start, &target_fl->l_start);
6709 __put_user(fl->l_len, &target_fl->l_len);
6710 __put_user(fl->l_pid, &target_fl->l_pid);
6711 unlock_user_struct(target_fl, target_flock_addr, 1);
6712 return 0;
6713 }
6714
6715 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6716 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6717
6718 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6719 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6720 abi_ulong target_flock_addr)
6721 {
6722 struct target_oabi_flock64 *target_fl;
6723 int l_type;
6724
6725 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6726 return -TARGET_EFAULT;
6727 }
6728
6729 __get_user(l_type, &target_fl->l_type);
6730 l_type = target_to_host_flock(l_type);
6731 if (l_type < 0) {
6732 return l_type;
6733 }
6734 fl->l_type = l_type;
6735 __get_user(fl->l_whence, &target_fl->l_whence);
6736 __get_user(fl->l_start, &target_fl->l_start);
6737 __get_user(fl->l_len, &target_fl->l_len);
6738 __get_user(fl->l_pid, &target_fl->l_pid);
6739 unlock_user_struct(target_fl, target_flock_addr, 0);
6740 return 0;
6741 }
6742
6743 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6744 const struct flock64 *fl)
6745 {
6746 struct target_oabi_flock64 *target_fl;
6747 short l_type;
6748
6749 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6750 return -TARGET_EFAULT;
6751 }
6752
6753 l_type = host_to_target_flock(fl->l_type);
6754 __put_user(l_type, &target_fl->l_type);
6755 __put_user(fl->l_whence, &target_fl->l_whence);
6756 __put_user(fl->l_start, &target_fl->l_start);
6757 __put_user(fl->l_len, &target_fl->l_len);
6758 __put_user(fl->l_pid, &target_fl->l_pid);
6759 unlock_user_struct(target_fl, target_flock_addr, 1);
6760 return 0;
6761 }
6762 #endif
6763
6764 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6765 abi_ulong target_flock_addr)
6766 {
6767 struct target_flock64 *target_fl;
6768 int l_type;
6769
6770 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6771 return -TARGET_EFAULT;
6772 }
6773
6774 __get_user(l_type, &target_fl->l_type);
6775 l_type = target_to_host_flock(l_type);
6776 if (l_type < 0) {
6777 return l_type;
6778 }
6779 fl->l_type = l_type;
6780 __get_user(fl->l_whence, &target_fl->l_whence);
6781 __get_user(fl->l_start, &target_fl->l_start);
6782 __get_user(fl->l_len, &target_fl->l_len);
6783 __get_user(fl->l_pid, &target_fl->l_pid);
6784 unlock_user_struct(target_fl, target_flock_addr, 0);
6785 return 0;
6786 }
6787
6788 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6789 const struct flock64 *fl)
6790 {
6791 struct target_flock64 *target_fl;
6792 short l_type;
6793
6794 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6795 return -TARGET_EFAULT;
6796 }
6797
6798 l_type = host_to_target_flock(fl->l_type);
6799 __put_user(l_type, &target_fl->l_type);
6800 __put_user(fl->l_whence, &target_fl->l_whence);
6801 __put_user(fl->l_start, &target_fl->l_start);
6802 __put_user(fl->l_len, &target_fl->l_len);
6803 __put_user(fl->l_pid, &target_fl->l_pid);
6804 unlock_user_struct(target_fl, target_flock_addr, 1);
6805 return 0;
6806 }
6807
6808 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6809 {
6810 struct flock64 fl64;
6811 #ifdef F_GETOWN_EX
6812 struct f_owner_ex fox;
6813 struct target_f_owner_ex *target_fox;
6814 #endif
6815 abi_long ret;
6816 int host_cmd = target_to_host_fcntl_cmd(cmd);
6817
6818 if (host_cmd == -TARGET_EINVAL)
6819 return host_cmd;
6820
6821 switch(cmd) {
6822 case TARGET_F_GETLK:
6823 ret = copy_from_user_flock(&fl64, arg);
6824 if (ret) {
6825 return ret;
6826 }
6827 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6828 if (ret == 0) {
6829 ret = copy_to_user_flock(arg, &fl64);
6830 }
6831 break;
6832
6833 case TARGET_F_SETLK:
6834 case TARGET_F_SETLKW:
6835 ret = copy_from_user_flock(&fl64, arg);
6836 if (ret) {
6837 return ret;
6838 }
6839 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6840 break;
6841
6842 case TARGET_F_GETLK64:
6843 ret = copy_from_user_flock64(&fl64, arg);
6844 if (ret) {
6845 return ret;
6846 }
6847 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6848 if (ret == 0) {
6849 ret = copy_to_user_flock64(arg, &fl64);
6850 }
6851 break;
6852 case TARGET_F_SETLK64:
6853 case TARGET_F_SETLKW64:
6854 ret = copy_from_user_flock64(&fl64, arg);
6855 if (ret) {
6856 return ret;
6857 }
6858 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6859 break;
6860
6861 case TARGET_F_GETFL:
6862 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6863 if (ret >= 0) {
6864 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6865 }
6866 break;
6867
6868 case TARGET_F_SETFL:
6869 ret = get_errno(safe_fcntl(fd, host_cmd,
6870 target_to_host_bitmask(arg,
6871 fcntl_flags_tbl)));
6872 break;
6873
6874 #ifdef F_GETOWN_EX
6875 case TARGET_F_GETOWN_EX:
6876 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6877 if (ret >= 0) {
6878 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6879 return -TARGET_EFAULT;
6880 target_fox->type = tswap32(fox.type);
6881 target_fox->pid = tswap32(fox.pid);
6882 unlock_user_struct(target_fox, arg, 1);
6883 }
6884 break;
6885 #endif
6886
6887 #ifdef F_SETOWN_EX
6888 case TARGET_F_SETOWN_EX:
6889 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6890 return -TARGET_EFAULT;
6891 fox.type = tswap32(target_fox->type);
6892 fox.pid = tswap32(target_fox->pid);
6893 unlock_user_struct(target_fox, arg, 0);
6894 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6895 break;
6896 #endif
6897
6898 case TARGET_F_SETOWN:
6899 case TARGET_F_GETOWN:
6900 case TARGET_F_SETSIG:
6901 case TARGET_F_GETSIG:
6902 case TARGET_F_SETLEASE:
6903 case TARGET_F_GETLEASE:
6904 case TARGET_F_SETPIPE_SZ:
6905 case TARGET_F_GETPIPE_SZ:
6906 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6907 break;
6908
6909 default:
6910 ret = get_errno(safe_fcntl(fd, cmd, arg));
6911 break;
6912 }
6913 return ret;
6914 }
6915
6916 #ifdef USE_UID16
6917
6918 static inline int high2lowuid(int uid)
6919 {
6920 if (uid > 65535)
6921 return 65534;
6922 else
6923 return uid;
6924 }
6925
6926 static inline int high2lowgid(int gid)
6927 {
6928 if (gid > 65535)
6929 return 65534;
6930 else
6931 return gid;
6932 }
6933
6934 static inline int low2highuid(int uid)
6935 {
6936 if ((int16_t)uid == -1)
6937 return -1;
6938 else
6939 return uid;
6940 }
6941
6942 static inline int low2highgid(int gid)
6943 {
6944 if ((int16_t)gid == -1)
6945 return -1;
6946 else
6947 return gid;
6948 }
6949 static inline int tswapid(int id)
6950 {
6951 return tswap16(id);
6952 }
6953
6954 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6955
6956 #else /* !USE_UID16 */
6957 static inline int high2lowuid(int uid)
6958 {
6959 return uid;
6960 }
6961 static inline int high2lowgid(int gid)
6962 {
6963 return gid;
6964 }
6965 static inline int low2highuid(int uid)
6966 {
6967 return uid;
6968 }
6969 static inline int low2highgid(int gid)
6970 {
6971 return gid;
6972 }
6973 static inline int tswapid(int id)
6974 {
6975 return tswap32(id);
6976 }
6977
6978 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6979
6980 #endif /* USE_UID16 */
6981
6982 /* We must do direct syscalls for setting UID/GID, because we want to
6983 * implement the Linux system call semantics of "change only for this thread",
6984 * not the libc/POSIX semantics of "change for all threads in process".
6985 * (See http://ewontfix.com/17/ for more details.)
6986 * We use the 32-bit version of the syscalls if present; if it is not
6987 * then either the host architecture supports 32-bit UIDs natively with
6988 * the standard syscall, or the 16-bit UID is the best we can do.
6989 */
6990 #ifdef __NR_setuid32
6991 #define __NR_sys_setuid __NR_setuid32
6992 #else
6993 #define __NR_sys_setuid __NR_setuid
6994 #endif
6995 #ifdef __NR_setgid32
6996 #define __NR_sys_setgid __NR_setgid32
6997 #else
6998 #define __NR_sys_setgid __NR_setgid
6999 #endif
7000 #ifdef __NR_setresuid32
7001 #define __NR_sys_setresuid __NR_setresuid32
7002 #else
7003 #define __NR_sys_setresuid __NR_setresuid
7004 #endif
7005 #ifdef __NR_setresgid32
7006 #define __NR_sys_setresgid __NR_setresgid32
7007 #else
7008 #define __NR_sys_setresgid __NR_setresgid
7009 #endif
7010
7011 _syscall1(int, sys_setuid, uid_t, uid)
7012 _syscall1(int, sys_setgid, gid_t, gid)
7013 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7014 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7015
7016 void syscall_init(void)
7017 {
7018 IOCTLEntry *ie;
7019 const argtype *arg_type;
7020 int size;
7021 int i;
7022
7023 thunk_init(STRUCT_MAX);
7024
7025 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7026 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7027 #include "syscall_types.h"
7028 #undef STRUCT
7029 #undef STRUCT_SPECIAL
7030
7031 /* Build target_to_host_errno_table[] table from
7032 * host_to_target_errno_table[]. */
7033 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7034 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7035 }
7036
7037 /* we patch the ioctl size if necessary. We rely on the fact that
7038 no ioctl has all the bits at '1' in the size field */
7039 ie = ioctl_entries;
7040 while (ie->target_cmd != 0) {
7041 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7042 TARGET_IOC_SIZEMASK) {
7043 arg_type = ie->arg_type;
7044 if (arg_type[0] != TYPE_PTR) {
7045 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7046 ie->target_cmd);
7047 exit(1);
7048 }
7049 arg_type++;
7050 size = thunk_type_size(arg_type, 0);
7051 ie->target_cmd = (ie->target_cmd &
7052 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7053 (size << TARGET_IOC_SIZESHIFT);
7054 }
7055
7056 /* automatic consistency check if same arch */
7057 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7058 (defined(__x86_64__) && defined(TARGET_X86_64))
7059 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7060 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7061 ie->name, ie->target_cmd, ie->host_cmd);
7062 }
7063 #endif
7064 ie++;
7065 }
7066 }
7067
7068 #if TARGET_ABI_BITS == 32
7069 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7070 {
7071 #ifdef TARGET_WORDS_BIGENDIAN
7072 return ((uint64_t)word0 << 32) | word1;
7073 #else
7074 return ((uint64_t)word1 << 32) | word0;
7075 #endif
7076 }
7077 #else /* TARGET_ABI_BITS == 32 */
7078 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7079 {
7080 return word0;
7081 }
7082 #endif /* TARGET_ABI_BITS != 32 */
7083
7084 #ifdef TARGET_NR_truncate64
7085 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7086 abi_long arg2,
7087 abi_long arg3,
7088 abi_long arg4)
7089 {
7090 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7091 arg2 = arg3;
7092 arg3 = arg4;
7093 }
7094 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7095 }
7096 #endif
7097
7098 #ifdef TARGET_NR_ftruncate64
7099 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7100 abi_long arg2,
7101 abi_long arg3,
7102 abi_long arg4)
7103 {
7104 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7105 arg2 = arg3;
7106 arg3 = arg4;
7107 }
7108 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7109 }
7110 #endif
7111
7112 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7113 abi_ulong target_addr)
7114 {
7115 struct target_timespec *target_ts;
7116
7117 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7118 return -TARGET_EFAULT;
7119 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7120 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7121 unlock_user_struct(target_ts, target_addr, 0);
7122 return 0;
7123 }
7124
7125 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7126 struct timespec *host_ts)
7127 {
7128 struct target_timespec *target_ts;
7129
7130 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7131 return -TARGET_EFAULT;
7132 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7133 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7134 unlock_user_struct(target_ts, target_addr, 1);
7135 return 0;
7136 }
7137
7138 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7139 abi_ulong target_addr)
7140 {
7141 struct target_itimerspec *target_itspec;
7142
7143 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7144 return -TARGET_EFAULT;
7145 }
7146
7147 host_itspec->it_interval.tv_sec =
7148 tswapal(target_itspec->it_interval.tv_sec);
7149 host_itspec->it_interval.tv_nsec =
7150 tswapal(target_itspec->it_interval.tv_nsec);
7151 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7152 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7153
7154 unlock_user_struct(target_itspec, target_addr, 1);
7155 return 0;
7156 }
7157
7158 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7159 struct itimerspec *host_its)
7160 {
7161 struct target_itimerspec *target_itspec;
7162
7163 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7164 return -TARGET_EFAULT;
7165 }
7166
7167 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7168 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7169
7170 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7171 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7172
7173 unlock_user_struct(target_itspec, target_addr, 0);
7174 return 0;
7175 }
7176
7177 static inline abi_long target_to_host_timex(struct timex *host_tx,
7178 abi_long target_addr)
7179 {
7180 struct target_timex *target_tx;
7181
7182 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7183 return -TARGET_EFAULT;
7184 }
7185
7186 __get_user(host_tx->modes, &target_tx->modes);
7187 __get_user(host_tx->offset, &target_tx->offset);
7188 __get_user(host_tx->freq, &target_tx->freq);
7189 __get_user(host_tx->maxerror, &target_tx->maxerror);
7190 __get_user(host_tx->esterror, &target_tx->esterror);
7191 __get_user(host_tx->status, &target_tx->status);
7192 __get_user(host_tx->constant, &target_tx->constant);
7193 __get_user(host_tx->precision, &target_tx->precision);
7194 __get_user(host_tx->tolerance, &target_tx->tolerance);
7195 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7196 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7197 __get_user(host_tx->tick, &target_tx->tick);
7198 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7199 __get_user(host_tx->jitter, &target_tx->jitter);
7200 __get_user(host_tx->shift, &target_tx->shift);
7201 __get_user(host_tx->stabil, &target_tx->stabil);
7202 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7203 __get_user(host_tx->calcnt, &target_tx->calcnt);
7204 __get_user(host_tx->errcnt, &target_tx->errcnt);
7205 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7206 __get_user(host_tx->tai, &target_tx->tai);
7207
7208 unlock_user_struct(target_tx, target_addr, 0);
7209 return 0;
7210 }
7211
7212 static inline abi_long host_to_target_timex(abi_long target_addr,
7213 struct timex *host_tx)
7214 {
7215 struct target_timex *target_tx;
7216
7217 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7218 return -TARGET_EFAULT;
7219 }
7220
7221 __put_user(host_tx->modes, &target_tx->modes);
7222 __put_user(host_tx->offset, &target_tx->offset);
7223 __put_user(host_tx->freq, &target_tx->freq);
7224 __put_user(host_tx->maxerror, &target_tx->maxerror);
7225 __put_user(host_tx->esterror, &target_tx->esterror);
7226 __put_user(host_tx->status, &target_tx->status);
7227 __put_user(host_tx->constant, &target_tx->constant);
7228 __put_user(host_tx->precision, &target_tx->precision);
7229 __put_user(host_tx->tolerance, &target_tx->tolerance);
7230 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7231 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7232 __put_user(host_tx->tick, &target_tx->tick);
7233 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7234 __put_user(host_tx->jitter, &target_tx->jitter);
7235 __put_user(host_tx->shift, &target_tx->shift);
7236 __put_user(host_tx->stabil, &target_tx->stabil);
7237 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7238 __put_user(host_tx->calcnt, &target_tx->calcnt);
7239 __put_user(host_tx->errcnt, &target_tx->errcnt);
7240 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7241 __put_user(host_tx->tai, &target_tx->tai);
7242
7243 unlock_user_struct(target_tx, target_addr, 1);
7244 return 0;
7245 }
7246
7247
7248 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7249 abi_ulong target_addr)
7250 {
7251 struct target_sigevent *target_sevp;
7252
7253 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7254 return -TARGET_EFAULT;
7255 }
7256
7257 /* This union is awkward on 64 bit systems because it has a 32 bit
7258 * integer and a pointer in it; we follow the conversion approach
7259 * used for handling sigval types in signal.c so the guest should get
7260 * the correct value back even if we did a 64 bit byteswap and it's
7261 * using the 32 bit integer.
7262 */
7263 host_sevp->sigev_value.sival_ptr =
7264 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7265 host_sevp->sigev_signo =
7266 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7267 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7268 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7269
7270 unlock_user_struct(target_sevp, target_addr, 1);
7271 return 0;
7272 }
7273
7274 #if defined(TARGET_NR_mlockall)
7275 static inline int target_to_host_mlockall_arg(int arg)
7276 {
7277 int result = 0;
7278
7279 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7280 result |= MCL_CURRENT;
7281 }
7282 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7283 result |= MCL_FUTURE;
7284 }
7285 return result;
7286 }
7287 #endif
7288
7289 static inline abi_long host_to_target_stat64(void *cpu_env,
7290 abi_ulong target_addr,
7291 struct stat *host_st)
7292 {
7293 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7294 if (((CPUARMState *)cpu_env)->eabi) {
7295 struct target_eabi_stat64 *target_st;
7296
7297 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7298 return -TARGET_EFAULT;
7299 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7300 __put_user(host_st->st_dev, &target_st->st_dev);
7301 __put_user(host_st->st_ino, &target_st->st_ino);
7302 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7303 __put_user(host_st->st_ino, &target_st->__st_ino);
7304 #endif
7305 __put_user(host_st->st_mode, &target_st->st_mode);
7306 __put_user(host_st->st_nlink, &target_st->st_nlink);
7307 __put_user(host_st->st_uid, &target_st->st_uid);
7308 __put_user(host_st->st_gid, &target_st->st_gid);
7309 __put_user(host_st->st_rdev, &target_st->st_rdev);
7310 __put_user(host_st->st_size, &target_st->st_size);
7311 __put_user(host_st->st_blksize, &target_st->st_blksize);
7312 __put_user(host_st->st_blocks, &target_st->st_blocks);
7313 __put_user(host_st->st_atime, &target_st->target_st_atime);
7314 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7315 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7316 unlock_user_struct(target_st, target_addr, 1);
7317 } else
7318 #endif
7319 {
7320 #if defined(TARGET_HAS_STRUCT_STAT64)
7321 struct target_stat64 *target_st;
7322 #else
7323 struct target_stat *target_st;
7324 #endif
7325
7326 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7327 return -TARGET_EFAULT;
7328 memset(target_st, 0, sizeof(*target_st));
7329 __put_user(host_st->st_dev, &target_st->st_dev);
7330 __put_user(host_st->st_ino, &target_st->st_ino);
7331 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7332 __put_user(host_st->st_ino, &target_st->__st_ino);
7333 #endif
7334 __put_user(host_st->st_mode, &target_st->st_mode);
7335 __put_user(host_st->st_nlink, &target_st->st_nlink);
7336 __put_user(host_st->st_uid, &target_st->st_uid);
7337 __put_user(host_st->st_gid, &target_st->st_gid);
7338 __put_user(host_st->st_rdev, &target_st->st_rdev);
7339 /* XXX: better use of kernel struct */
7340 __put_user(host_st->st_size, &target_st->st_size);
7341 __put_user(host_st->st_blksize, &target_st->st_blksize);
7342 __put_user(host_st->st_blocks, &target_st->st_blocks);
7343 __put_user(host_st->st_atime, &target_st->target_st_atime);
7344 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7345 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7346 unlock_user_struct(target_st, target_addr, 1);
7347 }
7348
7349 return 0;
7350 }
7351
7352 /* ??? Using host futex calls even when target atomic operations
7353 are not really atomic probably breaks things. However implementing
7354 futexes locally would make futexes shared between multiple processes
7355 tricky. However they're probably useless because guest atomic
7356 operations won't work either. */
7357 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7358 target_ulong uaddr2, int val3)
7359 {
7360 struct timespec ts, *pts;
7361 int base_op;
7362
7363 /* ??? We assume FUTEX_* constants are the same on both host
7364 and target. */
7365 #ifdef FUTEX_CMD_MASK
7366 base_op = op & FUTEX_CMD_MASK;
7367 #else
7368 base_op = op;
7369 #endif
7370 switch (base_op) {
7371 case FUTEX_WAIT:
7372 case FUTEX_WAIT_BITSET:
7373 if (timeout) {
7374 pts = &ts;
7375 target_to_host_timespec(pts, timeout);
7376 } else {
7377 pts = NULL;
7378 }
7379 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7380 pts, NULL, val3));
7381 case FUTEX_WAKE:
7382 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7383 case FUTEX_FD:
7384 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7385 case FUTEX_REQUEUE:
7386 case FUTEX_CMP_REQUEUE:
7387 case FUTEX_WAKE_OP:
7388 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7389 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7390 But the prototype takes a `struct timespec *'; insert casts
7391 to satisfy the compiler. We do not need to tswap TIMEOUT
7392 since it's not compared to guest memory. */
7393 pts = (struct timespec *)(uintptr_t) timeout;
7394 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7395 g2h(uaddr2),
7396 (base_op == FUTEX_CMP_REQUEUE
7397 ? tswap32(val3)
7398 : val3)));
7399 default:
7400 return -TARGET_ENOSYS;
7401 }
7402 }
7403 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7404 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7405 abi_long handle, abi_long mount_id,
7406 abi_long flags)
7407 {
7408 struct file_handle *target_fh;
7409 struct file_handle *fh;
7410 int mid = 0;
7411 abi_long ret;
7412 char *name;
7413 unsigned int size, total_size;
7414
7415 if (get_user_s32(size, handle)) {
7416 return -TARGET_EFAULT;
7417 }
7418
7419 name = lock_user_string(pathname);
7420 if (!name) {
7421 return -TARGET_EFAULT;
7422 }
7423
7424 total_size = sizeof(struct file_handle) + size;
7425 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7426 if (!target_fh) {
7427 unlock_user(name, pathname, 0);
7428 return -TARGET_EFAULT;
7429 }
7430
7431 fh = g_malloc0(total_size);
7432 fh->handle_bytes = size;
7433
7434 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7435 unlock_user(name, pathname, 0);
7436
7437 /* man name_to_handle_at(2):
7438 * Other than the use of the handle_bytes field, the caller should treat
7439 * the file_handle structure as an opaque data type
7440 */
7441
7442 memcpy(target_fh, fh, total_size);
7443 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7444 target_fh->handle_type = tswap32(fh->handle_type);
7445 g_free(fh);
7446 unlock_user(target_fh, handle, total_size);
7447
7448 if (put_user_s32(mid, mount_id)) {
7449 return -TARGET_EFAULT;
7450 }
7451
7452 return ret;
7453
7454 }
7455 #endif
7456
7457 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7458 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7459 abi_long flags)
7460 {
7461 struct file_handle *target_fh;
7462 struct file_handle *fh;
7463 unsigned int size, total_size;
7464 abi_long ret;
7465
7466 if (get_user_s32(size, handle)) {
7467 return -TARGET_EFAULT;
7468 }
7469
7470 total_size = sizeof(struct file_handle) + size;
7471 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7472 if (!target_fh) {
7473 return -TARGET_EFAULT;
7474 }
7475
7476 fh = g_memdup(target_fh, total_size);
7477 fh->handle_bytes = size;
7478 fh->handle_type = tswap32(target_fh->handle_type);
7479
7480 ret = get_errno(open_by_handle_at(mount_fd, fh,
7481 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7482
7483 g_free(fh);
7484
7485 unlock_user(target_fh, handle, total_size);
7486
7487 return ret;
7488 }
7489 #endif
7490
7491 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7492
7493 /* signalfd siginfo conversion */
7494
7495 static void
7496 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7497 const struct signalfd_siginfo *info)
7498 {
7499 int sig = host_to_target_signal(info->ssi_signo);
7500
7501 /* linux/signalfd.h defines a ssi_addr_lsb
7502 * not defined in sys/signalfd.h but used by some kernels
7503 */
7504
7505 #ifdef BUS_MCEERR_AO
7506 if (tinfo->ssi_signo == SIGBUS &&
7507 (tinfo->ssi_code == BUS_MCEERR_AR ||
7508 tinfo->ssi_code == BUS_MCEERR_AO)) {
7509 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7510 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7511 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7512 }
7513 #endif
7514
7515 tinfo->ssi_signo = tswap32(sig);
7516 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7517 tinfo->ssi_code = tswap32(info->ssi_code);
7518 tinfo->ssi_pid = tswap32(info->ssi_pid);
7519 tinfo->ssi_uid = tswap32(info->ssi_uid);
7520 tinfo->ssi_fd = tswap32(info->ssi_fd);
7521 tinfo->ssi_tid = tswap32(info->ssi_tid);
7522 tinfo->ssi_band = tswap32(info->ssi_band);
7523 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7524 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7525 tinfo->ssi_status = tswap32(info->ssi_status);
7526 tinfo->ssi_int = tswap32(info->ssi_int);
7527 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7528 tinfo->ssi_utime = tswap64(info->ssi_utime);
7529 tinfo->ssi_stime = tswap64(info->ssi_stime);
7530 tinfo->ssi_addr = tswap64(info->ssi_addr);
7531 }
7532
7533 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7534 {
7535 int i;
7536
7537 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7538 host_to_target_signalfd_siginfo(buf + i, buf + i);
7539 }
7540
7541 return len;
7542 }
7543
7544 static TargetFdTrans target_signalfd_trans = {
7545 .host_to_target_data = host_to_target_data_signalfd,
7546 };
7547
7548 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7549 {
7550 int host_flags;
7551 target_sigset_t *target_mask;
7552 sigset_t host_mask;
7553 abi_long ret;
7554
7555 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7556 return -TARGET_EINVAL;
7557 }
7558 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7559 return -TARGET_EFAULT;
7560 }
7561
7562 target_to_host_sigset(&host_mask, target_mask);
7563
7564 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7565
7566 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7567 if (ret >= 0) {
7568 fd_trans_register(ret, &target_signalfd_trans);
7569 }
7570
7571 unlock_user_struct(target_mask, mask, 0);
7572
7573 return ret;
7574 }
7575 #endif
7576
7577 /* Map host to target signal numbers for the wait family of syscalls.
7578 Assume all other status bits are the same. */
7579 int host_to_target_waitstatus(int status)
7580 {
7581 if (WIFSIGNALED(status)) {
7582 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7583 }
7584 if (WIFSTOPPED(status)) {
7585 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7586 | (status & 0xff);
7587 }
7588 return status;
7589 }
7590
7591 static int open_self_cmdline(void *cpu_env, int fd)
7592 {
7593 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7594 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7595 int i;
7596
7597 for (i = 0; i < bprm->argc; i++) {
7598 size_t len = strlen(bprm->argv[i]) + 1;
7599
7600 if (write(fd, bprm->argv[i], len) != len) {
7601 return -1;
7602 }
7603 }
7604
7605 return 0;
7606 }
7607
7608 static int open_self_maps(void *cpu_env, int fd)
7609 {
7610 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7611 TaskState *ts = cpu->opaque;
7612 FILE *fp;
7613 char *line = NULL;
7614 size_t len = 0;
7615 ssize_t read;
7616
7617 fp = fopen("/proc/self/maps", "r");
7618 if (fp == NULL) {
7619 return -1;
7620 }
7621
7622 while ((read = getline(&line, &len, fp)) != -1) {
7623 int fields, dev_maj, dev_min, inode;
7624 uint64_t min, max, offset;
7625 char flag_r, flag_w, flag_x, flag_p;
7626 char path[512] = "";
7627 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7628 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7629 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7630
7631 if ((fields < 10) || (fields > 11)) {
7632 continue;
7633 }
7634 if (h2g_valid(min)) {
7635 int flags = page_get_flags(h2g(min));
7636 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7637 if (page_check_range(h2g(min), max - min, flags) == -1) {
7638 continue;
7639 }
7640 if (h2g(min) == ts->info->stack_limit) {
7641 pstrcpy(path, sizeof(path), " [stack]");
7642 }
7643 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7644 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7645 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7646 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7647 path[0] ? " " : "", path);
7648 }
7649 }
7650
7651 free(line);
7652 fclose(fp);
7653
7654 return 0;
7655 }
7656
7657 static int open_self_stat(void *cpu_env, int fd)
7658 {
7659 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7660 TaskState *ts = cpu->opaque;
7661 abi_ulong start_stack = ts->info->start_stack;
7662 int i;
7663
7664 for (i = 0; i < 44; i++) {
7665 char buf[128];
7666 int len;
7667 uint64_t val = 0;
7668
7669 if (i == 0) {
7670 /* pid */
7671 val = getpid();
7672 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7673 } else if (i == 1) {
7674 /* app name */
7675 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7676 } else if (i == 27) {
7677 /* stack bottom */
7678 val = start_stack;
7679 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7680 } else {
7681 /* for the rest, there is MasterCard */
7682 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7683 }
7684
7685 len = strlen(buf);
7686 if (write(fd, buf, len) != len) {
7687 return -1;
7688 }
7689 }
7690
7691 return 0;
7692 }
7693
7694 static int open_self_auxv(void *cpu_env, int fd)
7695 {
7696 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7697 TaskState *ts = cpu->opaque;
7698 abi_ulong auxv = ts->info->saved_auxv;
7699 abi_ulong len = ts->info->auxv_len;
7700 char *ptr;
7701
7702 /*
7703 * Auxiliary vector is stored in target process stack.
7704 * read in whole auxv vector and copy it to file
7705 */
7706 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7707 if (ptr != NULL) {
7708 while (len > 0) {
7709 ssize_t r;
7710 r = write(fd, ptr, len);
7711 if (r <= 0) {
7712 break;
7713 }
7714 len -= r;
7715 ptr += r;
7716 }
7717 lseek(fd, 0, SEEK_SET);
7718 unlock_user(ptr, auxv, len);
7719 }
7720
7721 return 0;
7722 }
7723
7724 static int is_proc_myself(const char *filename, const char *entry)
7725 {
7726 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7727 filename += strlen("/proc/");
7728 if (!strncmp(filename, "self/", strlen("self/"))) {
7729 filename += strlen("self/");
7730 } else if (*filename >= '1' && *filename <= '9') {
7731 char myself[80];
7732 snprintf(myself, sizeof(myself), "%d/", getpid());
7733 if (!strncmp(filename, myself, strlen(myself))) {
7734 filename += strlen(myself);
7735 } else {
7736 return 0;
7737 }
7738 } else {
7739 return 0;
7740 }
7741 if (!strcmp(filename, entry)) {
7742 return 1;
7743 }
7744 }
7745 return 0;
7746 }
7747
7748 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7749 static int is_proc(const char *filename, const char *entry)
7750 {
7751 return strcmp(filename, entry) == 0;
7752 }
7753
7754 static int open_net_route(void *cpu_env, int fd)
7755 {
7756 FILE *fp;
7757 char *line = NULL;
7758 size_t len = 0;
7759 ssize_t read;
7760
7761 fp = fopen("/proc/net/route", "r");
7762 if (fp == NULL) {
7763 return -1;
7764 }
7765
7766 /* read header */
7767
7768 read = getline(&line, &len, fp);
7769 dprintf(fd, "%s", line);
7770
7771 /* read routes */
7772
7773 while ((read = getline(&line, &len, fp)) != -1) {
7774 char iface[16];
7775 uint32_t dest, gw, mask;
7776 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7777 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7778 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7779 &mask, &mtu, &window, &irtt);
7780 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7781 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7782 metric, tswap32(mask), mtu, window, irtt);
7783 }
7784
7785 free(line);
7786 fclose(fp);
7787
7788 return 0;
7789 }
7790 #endif
7791
7792 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7793 {
7794 struct fake_open {
7795 const char *filename;
7796 int (*fill)(void *cpu_env, int fd);
7797 int (*cmp)(const char *s1, const char *s2);
7798 };
7799 const struct fake_open *fake_open;
7800 static const struct fake_open fakes[] = {
7801 { "maps", open_self_maps, is_proc_myself },
7802 { "stat", open_self_stat, is_proc_myself },
7803 { "auxv", open_self_auxv, is_proc_myself },
7804 { "cmdline", open_self_cmdline, is_proc_myself },
7805 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7806 { "/proc/net/route", open_net_route, is_proc },
7807 #endif
7808 { NULL, NULL, NULL }
7809 };
7810
7811 if (is_proc_myself(pathname, "exe")) {
7812 int execfd = qemu_getauxval(AT_EXECFD);
7813 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7814 }
7815
7816 for (fake_open = fakes; fake_open->filename; fake_open++) {
7817 if (fake_open->cmp(pathname, fake_open->filename)) {
7818 break;
7819 }
7820 }
7821
7822 if (fake_open->filename) {
7823 const char *tmpdir;
7824 char filename[PATH_MAX];
7825 int fd, r;
7826
7827 /* create temporary file to map stat to */
7828 tmpdir = getenv("TMPDIR");
7829 if (!tmpdir)
7830 tmpdir = "/tmp";
7831 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7832 fd = mkstemp(filename);
7833 if (fd < 0) {
7834 return fd;
7835 }
7836 unlink(filename);
7837
7838 if ((r = fake_open->fill(cpu_env, fd))) {
7839 int e = errno;
7840 close(fd);
7841 errno = e;
7842 return r;
7843 }
7844 lseek(fd, 0, SEEK_SET);
7845
7846 return fd;
7847 }
7848
7849 return safe_openat(dirfd, path(pathname), flags, mode);
7850 }
7851
7852 #define TIMER_MAGIC 0x0caf0000
7853 #define TIMER_MAGIC_MASK 0xffff0000
7854
7855 /* Convert QEMU provided timer ID back to internal 16bit index format */
7856 static target_timer_t get_timer_id(abi_long arg)
7857 {
7858 target_timer_t timerid = arg;
7859
7860 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7861 return -TARGET_EINVAL;
7862 }
7863
7864 timerid &= 0xffff;
7865
7866 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7867 return -TARGET_EINVAL;
7868 }
7869
7870 return timerid;
7871 }
7872
7873 static abi_long swap_data_eventfd(void *buf, size_t len)
7874 {
7875 uint64_t *counter = buf;
7876 int i;
7877
7878 if (len < sizeof(uint64_t)) {
7879 return -EINVAL;
7880 }
7881
7882 for (i = 0; i < len; i += sizeof(uint64_t)) {
7883 *counter = tswap64(*counter);
7884 counter++;
7885 }
7886
7887 return len;
7888 }
7889
7890 static TargetFdTrans target_eventfd_trans = {
7891 .host_to_target_data = swap_data_eventfd,
7892 .target_to_host_data = swap_data_eventfd,
7893 };
7894
7895 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7896 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7897 defined(__NR_inotify_init1))
7898 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7899 {
7900 struct inotify_event *ev;
7901 int i;
7902 uint32_t name_len;
7903
7904 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7905 ev = (struct inotify_event *)((char *)buf + i);
7906 name_len = ev->len;
7907
7908 ev->wd = tswap32(ev->wd);
7909 ev->mask = tswap32(ev->mask);
7910 ev->cookie = tswap32(ev->cookie);
7911 ev->len = tswap32(name_len);
7912 }
7913
7914 return len;
7915 }
7916
7917 static TargetFdTrans target_inotify_trans = {
7918 .host_to_target_data = host_to_target_data_inotify,
7919 };
7920 #endif
7921
7922 static int target_to_host_cpu_mask(unsigned long *host_mask,
7923 size_t host_size,
7924 abi_ulong target_addr,
7925 size_t target_size)
7926 {
7927 unsigned target_bits = sizeof(abi_ulong) * 8;
7928 unsigned host_bits = sizeof(*host_mask) * 8;
7929 abi_ulong *target_mask;
7930 unsigned i, j;
7931
7932 assert(host_size >= target_size);
7933
7934 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7935 if (!target_mask) {
7936 return -TARGET_EFAULT;
7937 }
7938 memset(host_mask, 0, host_size);
7939
7940 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7941 unsigned bit = i * target_bits;
7942 abi_ulong val;
7943
7944 __get_user(val, &target_mask[i]);
7945 for (j = 0; j < target_bits; j++, bit++) {
7946 if (val & (1UL << j)) {
7947 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7948 }
7949 }
7950 }
7951
7952 unlock_user(target_mask, target_addr, 0);
7953 return 0;
7954 }
7955
7956 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7957 size_t host_size,
7958 abi_ulong target_addr,
7959 size_t target_size)
7960 {
7961 unsigned target_bits = sizeof(abi_ulong) * 8;
7962 unsigned host_bits = sizeof(*host_mask) * 8;
7963 abi_ulong *target_mask;
7964 unsigned i, j;
7965
7966 assert(host_size >= target_size);
7967
7968 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7969 if (!target_mask) {
7970 return -TARGET_EFAULT;
7971 }
7972
7973 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7974 unsigned bit = i * target_bits;
7975 abi_ulong val = 0;
7976
7977 for (j = 0; j < target_bits; j++, bit++) {
7978 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7979 val |= 1UL << j;
7980 }
7981 }
7982 __put_user(val, &target_mask[i]);
7983 }
7984
7985 unlock_user(target_mask, target_addr, target_size);
7986 return 0;
7987 }
7988
7989 /* do_syscall() should always have a single exit point at the end so
7990 that actions, such as logging of syscall results, can be performed.
7991 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7992 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7993 abi_long arg2, abi_long arg3, abi_long arg4,
7994 abi_long arg5, abi_long arg6, abi_long arg7,
7995 abi_long arg8)
7996 {
7997 CPUState *cpu = ENV_GET_CPU(cpu_env);
7998 abi_long ret;
7999 struct stat st;
8000 struct statfs stfs;
8001 void *p;
8002
8003 #if defined(DEBUG_ERESTARTSYS)
8004 /* Debug-only code for exercising the syscall-restart code paths
8005 * in the per-architecture cpu main loops: restart every syscall
8006 * the guest makes once before letting it through.
8007 */
8008 {
8009 static int flag;
8010
8011 flag = !flag;
8012 if (flag) {
8013 return -TARGET_ERESTARTSYS;
8014 }
8015 }
8016 #endif
8017
8018 #ifdef DEBUG
8019 gemu_log("syscall %d", num);
8020 #endif
8021 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
8022 if(do_strace)
8023 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
8024
8025 switch(num) {
8026 case TARGET_NR_exit:
8027 /* In old applications this may be used to implement _exit(2).
8028 However in threaded applictions it is used for thread termination,
8029 and _exit_group is used for application termination.
8030 Do thread termination if we have more then one thread. */
8031
8032 if (block_signals()) {
8033 ret = -TARGET_ERESTARTSYS;
8034 break;
8035 }
8036
8037 cpu_list_lock();
8038
8039 if (CPU_NEXT(first_cpu)) {
8040 TaskState *ts;
8041
8042 /* Remove the CPU from the list. */
8043 QTAILQ_REMOVE(&cpus, cpu, node);
8044
8045 cpu_list_unlock();
8046
8047 ts = cpu->opaque;
8048 if (ts->child_tidptr) {
8049 put_user_u32(0, ts->child_tidptr);
8050 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8051 NULL, NULL, 0);
8052 }
8053 thread_cpu = NULL;
8054 object_unref(OBJECT(cpu));
8055 g_free(ts);
8056 rcu_unregister_thread();
8057 pthread_exit(NULL);
8058 }
8059
8060 cpu_list_unlock();
8061 preexit_cleanup(cpu_env, arg1);
8062 _exit(arg1);
8063 ret = 0; /* avoid warning */
8064 break;
8065 case TARGET_NR_read:
8066 if (arg3 == 0)
8067 ret = 0;
8068 else {
8069 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8070 goto efault;
8071 ret = get_errno(safe_read(arg1, p, arg3));
8072 if (ret >= 0 &&
8073 fd_trans_host_to_target_data(arg1)) {
8074 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8075 }
8076 unlock_user(p, arg2, ret);
8077 }
8078 break;
8079 case TARGET_NR_write:
8080 if (arg2 == 0 && arg3 == 0) {
8081 return get_errno(safe_write(arg1, 0, 0));
8082 }
8083 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8084 goto efault;
8085 if (fd_trans_target_to_host_data(arg1)) {
8086 void *copy = g_malloc(arg3);
8087 memcpy(copy, p, arg3);
8088 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8089 if (ret >= 0) {
8090 ret = get_errno(safe_write(arg1, copy, ret));
8091 }
8092 g_free(copy);
8093 } else {
8094 ret = get_errno(safe_write(arg1, p, arg3));
8095 }
8096 unlock_user(p, arg2, 0);
8097 break;
8098 #ifdef TARGET_NR_open
8099 case TARGET_NR_open:
8100 if (!(p = lock_user_string(arg1)))
8101 goto efault;
8102 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8103 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8104 arg3));
8105 fd_trans_unregister(ret);
8106 unlock_user(p, arg1, 0);
8107 break;
8108 #endif
8109 case TARGET_NR_openat:
8110 if (!(p = lock_user_string(arg2)))
8111 goto efault;
8112 ret = get_errno(do_openat(cpu_env, arg1, p,
8113 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8114 arg4));
8115 fd_trans_unregister(ret);
8116 unlock_user(p, arg2, 0);
8117 break;
8118 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8119 case TARGET_NR_name_to_handle_at:
8120 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8121 break;
8122 #endif
8123 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8124 case TARGET_NR_open_by_handle_at:
8125 ret = do_open_by_handle_at(arg1, arg2, arg3);
8126 fd_trans_unregister(ret);
8127 break;
8128 #endif
8129 case TARGET_NR_close:
8130 fd_trans_unregister(arg1);
8131 ret = get_errno(close(arg1));
8132 break;
8133 case TARGET_NR_brk:
8134 ret = do_brk(arg1);
8135 break;
8136 #ifdef TARGET_NR_fork
8137 case TARGET_NR_fork:
8138 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8139 break;
8140 #endif
8141 #ifdef TARGET_NR_waitpid
8142 case TARGET_NR_waitpid:
8143 {
8144 int status;
8145 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8146 if (!is_error(ret) && arg2 && ret
8147 && put_user_s32(host_to_target_waitstatus(status), arg2))
8148 goto efault;
8149 }
8150 break;
8151 #endif
8152 #ifdef TARGET_NR_waitid
8153 case TARGET_NR_waitid:
8154 {
8155 siginfo_t info;
8156 info.si_pid = 0;
8157 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8158 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8159 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8160 goto efault;
8161 host_to_target_siginfo(p, &info);
8162 unlock_user(p, arg3, sizeof(target_siginfo_t));
8163 }
8164 }
8165 break;
8166 #endif
8167 #ifdef TARGET_NR_creat /* not on alpha */
8168 case TARGET_NR_creat:
8169 if (!(p = lock_user_string(arg1)))
8170 goto efault;
8171 ret = get_errno(creat(p, arg2));
8172 fd_trans_unregister(ret);
8173 unlock_user(p, arg1, 0);
8174 break;
8175 #endif
8176 #ifdef TARGET_NR_link
8177 case TARGET_NR_link:
8178 {
8179 void * p2;
8180 p = lock_user_string(arg1);
8181 p2 = lock_user_string(arg2);
8182 if (!p || !p2)
8183 ret = -TARGET_EFAULT;
8184 else
8185 ret = get_errno(link(p, p2));
8186 unlock_user(p2, arg2, 0);
8187 unlock_user(p, arg1, 0);
8188 }
8189 break;
8190 #endif
8191 #if defined(TARGET_NR_linkat)
8192 case TARGET_NR_linkat:
8193 {
8194 void * p2 = NULL;
8195 if (!arg2 || !arg4)
8196 goto efault;
8197 p = lock_user_string(arg2);
8198 p2 = lock_user_string(arg4);
8199 if (!p || !p2)
8200 ret = -TARGET_EFAULT;
8201 else
8202 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8203 unlock_user(p, arg2, 0);
8204 unlock_user(p2, arg4, 0);
8205 }
8206 break;
8207 #endif
8208 #ifdef TARGET_NR_unlink
8209 case TARGET_NR_unlink:
8210 if (!(p = lock_user_string(arg1)))
8211 goto efault;
8212 ret = get_errno(unlink(p));
8213 unlock_user(p, arg1, 0);
8214 break;
8215 #endif
8216 #if defined(TARGET_NR_unlinkat)
8217 case TARGET_NR_unlinkat:
8218 if (!(p = lock_user_string(arg2)))
8219 goto efault;
8220 ret = get_errno(unlinkat(arg1, p, arg3));
8221 unlock_user(p, arg2, 0);
8222 break;
8223 #endif
8224 case TARGET_NR_execve:
8225 {
8226 char **argp, **envp;
8227 int argc, envc;
8228 abi_ulong gp;
8229 abi_ulong guest_argp;
8230 abi_ulong guest_envp;
8231 abi_ulong addr;
8232 char **q;
8233 int total_size = 0;
8234
8235 argc = 0;
8236 guest_argp = arg2;
8237 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8238 if (get_user_ual(addr, gp))
8239 goto efault;
8240 if (!addr)
8241 break;
8242 argc++;
8243 }
8244 envc = 0;
8245 guest_envp = arg3;
8246 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8247 if (get_user_ual(addr, gp))
8248 goto efault;
8249 if (!addr)
8250 break;
8251 envc++;
8252 }
8253
8254 argp = g_new0(char *, argc + 1);
8255 envp = g_new0(char *, envc + 1);
8256
8257 for (gp = guest_argp, q = argp; gp;
8258 gp += sizeof(abi_ulong), q++) {
8259 if (get_user_ual(addr, gp))
8260 goto execve_efault;
8261 if (!addr)
8262 break;
8263 if (!(*q = lock_user_string(addr)))
8264 goto execve_efault;
8265 total_size += strlen(*q) + 1;
8266 }
8267 *q = NULL;
8268
8269 for (gp = guest_envp, q = envp; gp;
8270 gp += sizeof(abi_ulong), q++) {
8271 if (get_user_ual(addr, gp))
8272 goto execve_efault;
8273 if (!addr)
8274 break;
8275 if (!(*q = lock_user_string(addr)))
8276 goto execve_efault;
8277 total_size += strlen(*q) + 1;
8278 }
8279 *q = NULL;
8280
8281 if (!(p = lock_user_string(arg1)))
8282 goto execve_efault;
8283 /* Although execve() is not an interruptible syscall it is
8284 * a special case where we must use the safe_syscall wrapper:
8285 * if we allow a signal to happen before we make the host
8286 * syscall then we will 'lose' it, because at the point of
8287 * execve the process leaves QEMU's control. So we use the
8288 * safe syscall wrapper to ensure that we either take the
8289 * signal as a guest signal, or else it does not happen
8290 * before the execve completes and makes it the other
8291 * program's problem.
8292 */
8293 ret = get_errno(safe_execve(p, argp, envp));
8294 unlock_user(p, arg1, 0);
8295
8296 goto execve_end;
8297
8298 execve_efault:
8299 ret = -TARGET_EFAULT;
8300
8301 execve_end:
8302 for (gp = guest_argp, q = argp; *q;
8303 gp += sizeof(abi_ulong), q++) {
8304 if (get_user_ual(addr, gp)
8305 || !addr)
8306 break;
8307 unlock_user(*q, addr, 0);
8308 }
8309 for (gp = guest_envp, q = envp; *q;
8310 gp += sizeof(abi_ulong), q++) {
8311 if (get_user_ual(addr, gp)
8312 || !addr)
8313 break;
8314 unlock_user(*q, addr, 0);
8315 }
8316
8317 g_free(argp);
8318 g_free(envp);
8319 }
8320 break;
8321 case TARGET_NR_chdir:
8322 if (!(p = lock_user_string(arg1)))
8323 goto efault;
8324 ret = get_errno(chdir(p));
8325 unlock_user(p, arg1, 0);
8326 break;
8327 #ifdef TARGET_NR_time
8328 case TARGET_NR_time:
8329 {
8330 time_t host_time;
8331 ret = get_errno(time(&host_time));
8332 if (!is_error(ret)
8333 && arg1
8334 && put_user_sal(host_time, arg1))
8335 goto efault;
8336 }
8337 break;
8338 #endif
8339 #ifdef TARGET_NR_mknod
8340 case TARGET_NR_mknod:
8341 if (!(p = lock_user_string(arg1)))
8342 goto efault;
8343 ret = get_errno(mknod(p, arg2, arg3));
8344 unlock_user(p, arg1, 0);
8345 break;
8346 #endif
8347 #if defined(TARGET_NR_mknodat)
8348 case TARGET_NR_mknodat:
8349 if (!(p = lock_user_string(arg2)))
8350 goto efault;
8351 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8352 unlock_user(p, arg2, 0);
8353 break;
8354 #endif
8355 #ifdef TARGET_NR_chmod
8356 case TARGET_NR_chmod:
8357 if (!(p = lock_user_string(arg1)))
8358 goto efault;
8359 ret = get_errno(chmod(p, arg2));
8360 unlock_user(p, arg1, 0);
8361 break;
8362 #endif
8363 #ifdef TARGET_NR_break
8364 case TARGET_NR_break:
8365 goto unimplemented;
8366 #endif
8367 #ifdef TARGET_NR_oldstat
8368 case TARGET_NR_oldstat:
8369 goto unimplemented;
8370 #endif
8371 case TARGET_NR_lseek:
8372 ret = get_errno(lseek(arg1, arg2, arg3));
8373 break;
8374 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8375 /* Alpha specific */
8376 case TARGET_NR_getxpid:
8377 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8378 ret = get_errno(getpid());
8379 break;
8380 #endif
8381 #ifdef TARGET_NR_getpid
8382 case TARGET_NR_getpid:
8383 ret = get_errno(getpid());
8384 break;
8385 #endif
8386 case TARGET_NR_mount:
8387 {
8388 /* need to look at the data field */
8389 void *p2, *p3;
8390
8391 if (arg1) {
8392 p = lock_user_string(arg1);
8393 if (!p) {
8394 goto efault;
8395 }
8396 } else {
8397 p = NULL;
8398 }
8399
8400 p2 = lock_user_string(arg2);
8401 if (!p2) {
8402 if (arg1) {
8403 unlock_user(p, arg1, 0);
8404 }
8405 goto efault;
8406 }
8407
8408 if (arg3) {
8409 p3 = lock_user_string(arg3);
8410 if (!p3) {
8411 if (arg1) {
8412 unlock_user(p, arg1, 0);
8413 }
8414 unlock_user(p2, arg2, 0);
8415 goto efault;
8416 }
8417 } else {
8418 p3 = NULL;
8419 }
8420
8421 /* FIXME - arg5 should be locked, but it isn't clear how to
8422 * do that since it's not guaranteed to be a NULL-terminated
8423 * string.
8424 */
8425 if (!arg5) {
8426 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8427 } else {
8428 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8429 }
8430 ret = get_errno(ret);
8431
8432 if (arg1) {
8433 unlock_user(p, arg1, 0);
8434 }
8435 unlock_user(p2, arg2, 0);
8436 if (arg3) {
8437 unlock_user(p3, arg3, 0);
8438 }
8439 }
8440 break;
8441 #ifdef TARGET_NR_umount
8442 case TARGET_NR_umount:
8443 if (!(p = lock_user_string(arg1)))
8444 goto efault;
8445 ret = get_errno(umount(p));
8446 unlock_user(p, arg1, 0);
8447 break;
8448 #endif
8449 #ifdef TARGET_NR_stime /* not on alpha */
8450 case TARGET_NR_stime:
8451 {
8452 time_t host_time;
8453 if (get_user_sal(host_time, arg1))
8454 goto efault;
8455 ret = get_errno(stime(&host_time));
8456 }
8457 break;
8458 #endif
8459 case TARGET_NR_ptrace:
8460 goto unimplemented;
8461 #ifdef TARGET_NR_alarm /* not on alpha */
8462 case TARGET_NR_alarm:
8463 ret = alarm(arg1);
8464 break;
8465 #endif
8466 #ifdef TARGET_NR_oldfstat
8467 case TARGET_NR_oldfstat:
8468 goto unimplemented;
8469 #endif
8470 #ifdef TARGET_NR_pause /* not on alpha */
8471 case TARGET_NR_pause:
8472 if (!block_signals()) {
8473 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8474 }
8475 ret = -TARGET_EINTR;
8476 break;
8477 #endif
8478 #ifdef TARGET_NR_utime
8479 case TARGET_NR_utime:
8480 {
8481 struct utimbuf tbuf, *host_tbuf;
8482 struct target_utimbuf *target_tbuf;
8483 if (arg2) {
8484 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8485 goto efault;
8486 tbuf.actime = tswapal(target_tbuf->actime);
8487 tbuf.modtime = tswapal(target_tbuf->modtime);
8488 unlock_user_struct(target_tbuf, arg2, 0);
8489 host_tbuf = &tbuf;
8490 } else {
8491 host_tbuf = NULL;
8492 }
8493 if (!(p = lock_user_string(arg1)))
8494 goto efault;
8495 ret = get_errno(utime(p, host_tbuf));
8496 unlock_user(p, arg1, 0);
8497 }
8498 break;
8499 #endif
8500 #ifdef TARGET_NR_utimes
8501 case TARGET_NR_utimes:
8502 {
8503 struct timeval *tvp, tv[2];
8504 if (arg2) {
8505 if (copy_from_user_timeval(&tv[0], arg2)
8506 || copy_from_user_timeval(&tv[1],
8507 arg2 + sizeof(struct target_timeval)))
8508 goto efault;
8509 tvp = tv;
8510 } else {
8511 tvp = NULL;
8512 }
8513 if (!(p = lock_user_string(arg1)))
8514 goto efault;
8515 ret = get_errno(utimes(p, tvp));
8516 unlock_user(p, arg1, 0);
8517 }
8518 break;
8519 #endif
8520 #if defined(TARGET_NR_futimesat)
8521 case TARGET_NR_futimesat:
8522 {
8523 struct timeval *tvp, tv[2];
8524 if (arg3) {
8525 if (copy_from_user_timeval(&tv[0], arg3)
8526 || copy_from_user_timeval(&tv[1],
8527 arg3 + sizeof(struct target_timeval)))
8528 goto efault;
8529 tvp = tv;
8530 } else {
8531 tvp = NULL;
8532 }
8533 if (!(p = lock_user_string(arg2)))
8534 goto efault;
8535 ret = get_errno(futimesat(arg1, path(p), tvp));
8536 unlock_user(p, arg2, 0);
8537 }
8538 break;
8539 #endif
8540 #ifdef TARGET_NR_stty
8541 case TARGET_NR_stty:
8542 goto unimplemented;
8543 #endif
8544 #ifdef TARGET_NR_gtty
8545 case TARGET_NR_gtty:
8546 goto unimplemented;
8547 #endif
8548 #ifdef TARGET_NR_access
8549 case TARGET_NR_access:
8550 if (!(p = lock_user_string(arg1)))
8551 goto efault;
8552 ret = get_errno(access(path(p), arg2));
8553 unlock_user(p, arg1, 0);
8554 break;
8555 #endif
8556 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8557 case TARGET_NR_faccessat:
8558 if (!(p = lock_user_string(arg2)))
8559 goto efault;
8560 ret = get_errno(faccessat(arg1, p, arg3, 0));
8561 unlock_user(p, arg2, 0);
8562 break;
8563 #endif
8564 #ifdef TARGET_NR_nice /* not on alpha */
8565 case TARGET_NR_nice:
8566 ret = get_errno(nice(arg1));
8567 break;
8568 #endif
8569 #ifdef TARGET_NR_ftime
8570 case TARGET_NR_ftime:
8571 goto unimplemented;
8572 #endif
8573 case TARGET_NR_sync:
8574 sync();
8575 ret = 0;
8576 break;
8577 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8578 case TARGET_NR_syncfs:
8579 ret = get_errno(syncfs(arg1));
8580 break;
8581 #endif
8582 case TARGET_NR_kill:
8583 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8584 break;
8585 #ifdef TARGET_NR_rename
8586 case TARGET_NR_rename:
8587 {
8588 void *p2;
8589 p = lock_user_string(arg1);
8590 p2 = lock_user_string(arg2);
8591 if (!p || !p2)
8592 ret = -TARGET_EFAULT;
8593 else
8594 ret = get_errno(rename(p, p2));
8595 unlock_user(p2, arg2, 0);
8596 unlock_user(p, arg1, 0);
8597 }
8598 break;
8599 #endif
8600 #if defined(TARGET_NR_renameat)
8601 case TARGET_NR_renameat:
8602 {
8603 void *p2;
8604 p = lock_user_string(arg2);
8605 p2 = lock_user_string(arg4);
8606 if (!p || !p2)
8607 ret = -TARGET_EFAULT;
8608 else
8609 ret = get_errno(renameat(arg1, p, arg3, p2));
8610 unlock_user(p2, arg4, 0);
8611 unlock_user(p, arg2, 0);
8612 }
8613 break;
8614 #endif
8615 #if defined(TARGET_NR_renameat2)
8616 case TARGET_NR_renameat2:
8617 {
8618 void *p2;
8619 p = lock_user_string(arg2);
8620 p2 = lock_user_string(arg4);
8621 if (!p || !p2) {
8622 ret = -TARGET_EFAULT;
8623 } else {
8624 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8625 }
8626 unlock_user(p2, arg4, 0);
8627 unlock_user(p, arg2, 0);
8628 }
8629 break;
8630 #endif
8631 #ifdef TARGET_NR_mkdir
8632 case TARGET_NR_mkdir:
8633 if (!(p = lock_user_string(arg1)))
8634 goto efault;
8635 ret = get_errno(mkdir(p, arg2));
8636 unlock_user(p, arg1, 0);
8637 break;
8638 #endif
8639 #if defined(TARGET_NR_mkdirat)
8640 case TARGET_NR_mkdirat:
8641 if (!(p = lock_user_string(arg2)))
8642 goto efault;
8643 ret = get_errno(mkdirat(arg1, p, arg3));
8644 unlock_user(p, arg2, 0);
8645 break;
8646 #endif
8647 #ifdef TARGET_NR_rmdir
8648 case TARGET_NR_rmdir:
8649 if (!(p = lock_user_string(arg1)))
8650 goto efault;
8651 ret = get_errno(rmdir(p));
8652 unlock_user(p, arg1, 0);
8653 break;
8654 #endif
8655 case TARGET_NR_dup:
8656 ret = get_errno(dup(arg1));
8657 if (ret >= 0) {
8658 fd_trans_dup(arg1, ret);
8659 }
8660 break;
8661 #ifdef TARGET_NR_pipe
8662 case TARGET_NR_pipe:
8663 ret = do_pipe(cpu_env, arg1, 0, 0);
8664 break;
8665 #endif
8666 #ifdef TARGET_NR_pipe2
8667 case TARGET_NR_pipe2:
8668 ret = do_pipe(cpu_env, arg1,
8669 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8670 break;
8671 #endif
8672 case TARGET_NR_times:
8673 {
8674 struct target_tms *tmsp;
8675 struct tms tms;
8676 ret = get_errno(times(&tms));
8677 if (arg1) {
8678 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8679 if (!tmsp)
8680 goto efault;
8681 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8682 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8683 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8684 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8685 }
8686 if (!is_error(ret))
8687 ret = host_to_target_clock_t(ret);
8688 }
8689 break;
8690 #ifdef TARGET_NR_prof
8691 case TARGET_NR_prof:
8692 goto unimplemented;
8693 #endif
8694 #ifdef TARGET_NR_signal
8695 case TARGET_NR_signal:
8696 goto unimplemented;
8697 #endif
8698 case TARGET_NR_acct:
8699 if (arg1 == 0) {
8700 ret = get_errno(acct(NULL));
8701 } else {
8702 if (!(p = lock_user_string(arg1)))
8703 goto efault;
8704 ret = get_errno(acct(path(p)));
8705 unlock_user(p, arg1, 0);
8706 }
8707 break;
8708 #ifdef TARGET_NR_umount2
8709 case TARGET_NR_umount2:
8710 if (!(p = lock_user_string(arg1)))
8711 goto efault;
8712 ret = get_errno(umount2(p, arg2));
8713 unlock_user(p, arg1, 0);
8714 break;
8715 #endif
8716 #ifdef TARGET_NR_lock
8717 case TARGET_NR_lock:
8718 goto unimplemented;
8719 #endif
8720 case TARGET_NR_ioctl:
8721 ret = do_ioctl(arg1, arg2, arg3);
8722 break;
8723 #ifdef TARGET_NR_fcntl
8724 case TARGET_NR_fcntl:
8725 ret = do_fcntl(arg1, arg2, arg3);
8726 break;
8727 #endif
8728 #ifdef TARGET_NR_mpx
8729 case TARGET_NR_mpx:
8730 goto unimplemented;
8731 #endif
8732 case TARGET_NR_setpgid:
8733 ret = get_errno(setpgid(arg1, arg2));
8734 break;
8735 #ifdef TARGET_NR_ulimit
8736 case TARGET_NR_ulimit:
8737 goto unimplemented;
8738 #endif
8739 #ifdef TARGET_NR_oldolduname
8740 case TARGET_NR_oldolduname:
8741 goto unimplemented;
8742 #endif
8743 case TARGET_NR_umask:
8744 ret = get_errno(umask(arg1));
8745 break;
8746 case TARGET_NR_chroot:
8747 if (!(p = lock_user_string(arg1)))
8748 goto efault;
8749 ret = get_errno(chroot(p));
8750 unlock_user(p, arg1, 0);
8751 break;
8752 #ifdef TARGET_NR_ustat
8753 case TARGET_NR_ustat:
8754 goto unimplemented;
8755 #endif
8756 #ifdef TARGET_NR_dup2
8757 case TARGET_NR_dup2:
8758 ret = get_errno(dup2(arg1, arg2));
8759 if (ret >= 0) {
8760 fd_trans_dup(arg1, arg2);
8761 }
8762 break;
8763 #endif
8764 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8765 case TARGET_NR_dup3:
8766 {
8767 int host_flags;
8768
8769 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8770 return -EINVAL;
8771 }
8772 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8773 ret = get_errno(dup3(arg1, arg2, host_flags));
8774 if (ret >= 0) {
8775 fd_trans_dup(arg1, arg2);
8776 }
8777 break;
8778 }
8779 #endif
8780 #ifdef TARGET_NR_getppid /* not on alpha */
8781 case TARGET_NR_getppid:
8782 ret = get_errno(getppid());
8783 break;
8784 #endif
8785 #ifdef TARGET_NR_getpgrp
8786 case TARGET_NR_getpgrp:
8787 ret = get_errno(getpgrp());
8788 break;
8789 #endif
8790 case TARGET_NR_setsid:
8791 ret = get_errno(setsid());
8792 break;
8793 #ifdef TARGET_NR_sigaction
8794 case TARGET_NR_sigaction:
8795 {
8796 #if defined(TARGET_ALPHA)
8797 struct target_sigaction act, oact, *pact = 0;
8798 struct target_old_sigaction *old_act;
8799 if (arg2) {
8800 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8801 goto efault;
8802 act._sa_handler = old_act->_sa_handler;
8803 target_siginitset(&act.sa_mask, old_act->sa_mask);
8804 act.sa_flags = old_act->sa_flags;
8805 act.sa_restorer = 0;
8806 unlock_user_struct(old_act, arg2, 0);
8807 pact = &act;
8808 }
8809 ret = get_errno(do_sigaction(arg1, pact, &oact));
8810 if (!is_error(ret) && arg3) {
8811 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8812 goto efault;
8813 old_act->_sa_handler = oact._sa_handler;
8814 old_act->sa_mask = oact.sa_mask.sig[0];
8815 old_act->sa_flags = oact.sa_flags;
8816 unlock_user_struct(old_act, arg3, 1);
8817 }
8818 #elif defined(TARGET_MIPS)
8819 struct target_sigaction act, oact, *pact, *old_act;
8820
8821 if (arg2) {
8822 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8823 goto efault;
8824 act._sa_handler = old_act->_sa_handler;
8825 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8826 act.sa_flags = old_act->sa_flags;
8827 unlock_user_struct(old_act, arg2, 0);
8828 pact = &act;
8829 } else {
8830 pact = NULL;
8831 }
8832
8833 ret = get_errno(do_sigaction(arg1, pact, &oact));
8834
8835 if (!is_error(ret) && arg3) {
8836 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8837 goto efault;
8838 old_act->_sa_handler = oact._sa_handler;
8839 old_act->sa_flags = oact.sa_flags;
8840 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8841 old_act->sa_mask.sig[1] = 0;
8842 old_act->sa_mask.sig[2] = 0;
8843 old_act->sa_mask.sig[3] = 0;
8844 unlock_user_struct(old_act, arg3, 1);
8845 }
8846 #else
8847 struct target_old_sigaction *old_act;
8848 struct target_sigaction act, oact, *pact;
8849 if (arg2) {
8850 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8851 goto efault;
8852 act._sa_handler = old_act->_sa_handler;
8853 target_siginitset(&act.sa_mask, old_act->sa_mask);
8854 act.sa_flags = old_act->sa_flags;
8855 act.sa_restorer = old_act->sa_restorer;
8856 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8857 act.ka_restorer = 0;
8858 #endif
8859 unlock_user_struct(old_act, arg2, 0);
8860 pact = &act;
8861 } else {
8862 pact = NULL;
8863 }
8864 ret = get_errno(do_sigaction(arg1, pact, &oact));
8865 if (!is_error(ret) && arg3) {
8866 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8867 goto efault;
8868 old_act->_sa_handler = oact._sa_handler;
8869 old_act->sa_mask = oact.sa_mask.sig[0];
8870 old_act->sa_flags = oact.sa_flags;
8871 old_act->sa_restorer = oact.sa_restorer;
8872 unlock_user_struct(old_act, arg3, 1);
8873 }
8874 #endif
8875 }
8876 break;
8877 #endif
8878 case TARGET_NR_rt_sigaction:
8879 {
8880 #if defined(TARGET_ALPHA)
8881 /* For Alpha and SPARC this is a 5 argument syscall, with
8882 * a 'restorer' parameter which must be copied into the
8883 * sa_restorer field of the sigaction struct.
8884 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8885 * and arg5 is the sigsetsize.
8886 * Alpha also has a separate rt_sigaction struct that it uses
8887 * here; SPARC uses the usual sigaction struct.
8888 */
8889 struct target_rt_sigaction *rt_act;
8890 struct target_sigaction act, oact, *pact = 0;
8891
8892 if (arg4 != sizeof(target_sigset_t)) {
8893 ret = -TARGET_EINVAL;
8894 break;
8895 }
8896 if (arg2) {
8897 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8898 goto efault;
8899 act._sa_handler = rt_act->_sa_handler;
8900 act.sa_mask = rt_act->sa_mask;
8901 act.sa_flags = rt_act->sa_flags;
8902 act.sa_restorer = arg5;
8903 unlock_user_struct(rt_act, arg2, 0);
8904 pact = &act;
8905 }
8906 ret = get_errno(do_sigaction(arg1, pact, &oact));
8907 if (!is_error(ret) && arg3) {
8908 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8909 goto efault;
8910 rt_act->_sa_handler = oact._sa_handler;
8911 rt_act->sa_mask = oact.sa_mask;
8912 rt_act->sa_flags = oact.sa_flags;
8913 unlock_user_struct(rt_act, arg3, 1);
8914 }
8915 #else
8916 #ifdef TARGET_SPARC
8917 target_ulong restorer = arg4;
8918 target_ulong sigsetsize = arg5;
8919 #else
8920 target_ulong sigsetsize = arg4;
8921 #endif
8922 struct target_sigaction *act;
8923 struct target_sigaction *oact;
8924
8925 if (sigsetsize != sizeof(target_sigset_t)) {
8926 ret = -TARGET_EINVAL;
8927 break;
8928 }
8929 if (arg2) {
8930 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8931 goto efault;
8932 }
8933 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8934 act->ka_restorer = restorer;
8935 #endif
8936 } else {
8937 act = NULL;
8938 }
8939 if (arg3) {
8940 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8941 ret = -TARGET_EFAULT;
8942 goto rt_sigaction_fail;
8943 }
8944 } else
8945 oact = NULL;
8946 ret = get_errno(do_sigaction(arg1, act, oact));
8947 rt_sigaction_fail:
8948 if (act)
8949 unlock_user_struct(act, arg2, 0);
8950 if (oact)
8951 unlock_user_struct(oact, arg3, 1);
8952 #endif
8953 }
8954 break;
8955 #ifdef TARGET_NR_sgetmask /* not on alpha */
8956 case TARGET_NR_sgetmask:
8957 {
8958 sigset_t cur_set;
8959 abi_ulong target_set;
8960 ret = do_sigprocmask(0, NULL, &cur_set);
8961 if (!ret) {
8962 host_to_target_old_sigset(&target_set, &cur_set);
8963 ret = target_set;
8964 }
8965 }
8966 break;
8967 #endif
8968 #ifdef TARGET_NR_ssetmask /* not on alpha */
8969 case TARGET_NR_ssetmask:
8970 {
8971 sigset_t set, oset;
8972 abi_ulong target_set = arg1;
8973 target_to_host_old_sigset(&set, &target_set);
8974 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8975 if (!ret) {
8976 host_to_target_old_sigset(&target_set, &oset);
8977 ret = target_set;
8978 }
8979 }
8980 break;
8981 #endif
8982 #ifdef TARGET_NR_sigprocmask
8983 case TARGET_NR_sigprocmask:
8984 {
8985 #if defined(TARGET_ALPHA)
8986 sigset_t set, oldset;
8987 abi_ulong mask;
8988 int how;
8989
8990 switch (arg1) {
8991 case TARGET_SIG_BLOCK:
8992 how = SIG_BLOCK;
8993 break;
8994 case TARGET_SIG_UNBLOCK:
8995 how = SIG_UNBLOCK;
8996 break;
8997 case TARGET_SIG_SETMASK:
8998 how = SIG_SETMASK;
8999 break;
9000 default:
9001 ret = -TARGET_EINVAL;
9002 goto fail;
9003 }
9004 mask = arg2;
9005 target_to_host_old_sigset(&set, &mask);
9006
9007 ret = do_sigprocmask(how, &set, &oldset);
9008 if (!is_error(ret)) {
9009 host_to_target_old_sigset(&mask, &oldset);
9010 ret = mask;
9011 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9012 }
9013 #else
9014 sigset_t set, oldset, *set_ptr;
9015 int how;
9016
9017 if (arg2) {
9018 switch (arg1) {
9019 case TARGET_SIG_BLOCK:
9020 how = SIG_BLOCK;
9021 break;
9022 case TARGET_SIG_UNBLOCK:
9023 how = SIG_UNBLOCK;
9024 break;
9025 case TARGET_SIG_SETMASK:
9026 how = SIG_SETMASK;
9027 break;
9028 default:
9029 ret = -TARGET_EINVAL;
9030 goto fail;
9031 }
9032 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9033 goto efault;
9034 target_to_host_old_sigset(&set, p);
9035 unlock_user(p, arg2, 0);
9036 set_ptr = &set;
9037 } else {
9038 how = 0;
9039 set_ptr = NULL;
9040 }
9041 ret = do_sigprocmask(how, set_ptr, &oldset);
9042 if (!is_error(ret) && arg3) {
9043 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9044 goto efault;
9045 host_to_target_old_sigset(p, &oldset);
9046 unlock_user(p, arg3, sizeof(target_sigset_t));
9047 }
9048 #endif
9049 }
9050 break;
9051 #endif
9052 case TARGET_NR_rt_sigprocmask:
9053 {
9054 int how = arg1;
9055 sigset_t set, oldset, *set_ptr;
9056
9057 if (arg4 != sizeof(target_sigset_t)) {
9058 ret = -TARGET_EINVAL;
9059 break;
9060 }
9061
9062 if (arg2) {
9063 switch(how) {
9064 case TARGET_SIG_BLOCK:
9065 how = SIG_BLOCK;
9066 break;
9067 case TARGET_SIG_UNBLOCK:
9068 how = SIG_UNBLOCK;
9069 break;
9070 case TARGET_SIG_SETMASK:
9071 how = SIG_SETMASK;
9072 break;
9073 default:
9074 ret = -TARGET_EINVAL;
9075 goto fail;
9076 }
9077 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9078 goto efault;
9079 target_to_host_sigset(&set, p);
9080 unlock_user(p, arg2, 0);
9081 set_ptr = &set;
9082 } else {
9083 how = 0;
9084 set_ptr = NULL;
9085 }
9086 ret = do_sigprocmask(how, set_ptr, &oldset);
9087 if (!is_error(ret) && arg3) {
9088 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9089 goto efault;
9090 host_to_target_sigset(p, &oldset);
9091 unlock_user(p, arg3, sizeof(target_sigset_t));
9092 }
9093 }
9094 break;
9095 #ifdef TARGET_NR_sigpending
9096 case TARGET_NR_sigpending:
9097 {
9098 sigset_t set;
9099 ret = get_errno(sigpending(&set));
9100 if (!is_error(ret)) {
9101 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9102 goto efault;
9103 host_to_target_old_sigset(p, &set);
9104 unlock_user(p, arg1, sizeof(target_sigset_t));
9105 }
9106 }
9107 break;
9108 #endif
9109 case TARGET_NR_rt_sigpending:
9110 {
9111 sigset_t set;
9112
9113 /* Yes, this check is >, not != like most. We follow the kernel's
9114 * logic and it does it like this because it implements
9115 * NR_sigpending through the same code path, and in that case
9116 * the old_sigset_t is smaller in size.
9117 */
9118 if (arg2 > sizeof(target_sigset_t)) {
9119 ret = -TARGET_EINVAL;
9120 break;
9121 }
9122
9123 ret = get_errno(sigpending(&set));
9124 if (!is_error(ret)) {
9125 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9126 goto efault;
9127 host_to_target_sigset(p, &set);
9128 unlock_user(p, arg1, sizeof(target_sigset_t));
9129 }
9130 }
9131 break;
9132 #ifdef TARGET_NR_sigsuspend
9133 case TARGET_NR_sigsuspend:
9134 {
9135 TaskState *ts = cpu->opaque;
9136 #if defined(TARGET_ALPHA)
9137 abi_ulong mask = arg1;
9138 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9139 #else
9140 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9141 goto efault;
9142 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9143 unlock_user(p, arg1, 0);
9144 #endif
9145 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9146 SIGSET_T_SIZE));
9147 if (ret != -TARGET_ERESTARTSYS) {
9148 ts->in_sigsuspend = 1;
9149 }
9150 }
9151 break;
9152 #endif
9153 case TARGET_NR_rt_sigsuspend:
9154 {
9155 TaskState *ts = cpu->opaque;
9156
9157 if (arg2 != sizeof(target_sigset_t)) {
9158 ret = -TARGET_EINVAL;
9159 break;
9160 }
9161 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9162 goto efault;
9163 target_to_host_sigset(&ts->sigsuspend_mask, p);
9164 unlock_user(p, arg1, 0);
9165 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9166 SIGSET_T_SIZE));
9167 if (ret != -TARGET_ERESTARTSYS) {
9168 ts->in_sigsuspend = 1;
9169 }
9170 }
9171 break;
9172 case TARGET_NR_rt_sigtimedwait:
9173 {
9174 sigset_t set;
9175 struct timespec uts, *puts;
9176 siginfo_t uinfo;
9177
9178 if (arg4 != sizeof(target_sigset_t)) {
9179 ret = -TARGET_EINVAL;
9180 break;
9181 }
9182
9183 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9184 goto efault;
9185 target_to_host_sigset(&set, p);
9186 unlock_user(p, arg1, 0);
9187 if (arg3) {
9188 puts = &uts;
9189 target_to_host_timespec(puts, arg3);
9190 } else {
9191 puts = NULL;
9192 }
9193 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9194 SIGSET_T_SIZE));
9195 if (!is_error(ret)) {
9196 if (arg2) {
9197 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9198 0);
9199 if (!p) {
9200 goto efault;
9201 }
9202 host_to_target_siginfo(p, &uinfo);
9203 unlock_user(p, arg2, sizeof(target_siginfo_t));
9204 }
9205 ret = host_to_target_signal(ret);
9206 }
9207 }
9208 break;
9209 case TARGET_NR_rt_sigqueueinfo:
9210 {
9211 siginfo_t uinfo;
9212
9213 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9214 if (!p) {
9215 goto efault;
9216 }
9217 target_to_host_siginfo(&uinfo, p);
9218 unlock_user(p, arg3, 0);
9219 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9220 }
9221 break;
9222 case TARGET_NR_rt_tgsigqueueinfo:
9223 {
9224 siginfo_t uinfo;
9225
9226 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9227 if (!p) {
9228 goto efault;
9229 }
9230 target_to_host_siginfo(&uinfo, p);
9231 unlock_user(p, arg4, 0);
9232 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9233 }
9234 break;
9235 #ifdef TARGET_NR_sigreturn
9236 case TARGET_NR_sigreturn:
9237 if (block_signals()) {
9238 ret = -TARGET_ERESTARTSYS;
9239 } else {
9240 ret = do_sigreturn(cpu_env);
9241 }
9242 break;
9243 #endif
9244 case TARGET_NR_rt_sigreturn:
9245 if (block_signals()) {
9246 ret = -TARGET_ERESTARTSYS;
9247 } else {
9248 ret = do_rt_sigreturn(cpu_env);
9249 }
9250 break;
9251 case TARGET_NR_sethostname:
9252 if (!(p = lock_user_string(arg1)))
9253 goto efault;
9254 ret = get_errno(sethostname(p, arg2));
9255 unlock_user(p, arg1, 0);
9256 break;
9257 case TARGET_NR_setrlimit:
9258 {
9259 int resource = target_to_host_resource(arg1);
9260 struct target_rlimit *target_rlim;
9261 struct rlimit rlim;
9262 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9263 goto efault;
9264 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9265 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9266 unlock_user_struct(target_rlim, arg2, 0);
9267 ret = get_errno(setrlimit(resource, &rlim));
9268 }
9269 break;
9270 case TARGET_NR_getrlimit:
9271 {
9272 int resource = target_to_host_resource(arg1);
9273 struct target_rlimit *target_rlim;
9274 struct rlimit rlim;
9275
9276 ret = get_errno(getrlimit(resource, &rlim));
9277 if (!is_error(ret)) {
9278 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9279 goto efault;
9280 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9281 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9282 unlock_user_struct(target_rlim, arg2, 1);
9283 }
9284 }
9285 break;
9286 case TARGET_NR_getrusage:
9287 {
9288 struct rusage rusage;
9289 ret = get_errno(getrusage(arg1, &rusage));
9290 if (!is_error(ret)) {
9291 ret = host_to_target_rusage(arg2, &rusage);
9292 }
9293 }
9294 break;
9295 case TARGET_NR_gettimeofday:
9296 {
9297 struct timeval tv;
9298 ret = get_errno(gettimeofday(&tv, NULL));
9299 if (!is_error(ret)) {
9300 if (copy_to_user_timeval(arg1, &tv))
9301 goto efault;
9302 }
9303 }
9304 break;
9305 case TARGET_NR_settimeofday:
9306 {
9307 struct timeval tv, *ptv = NULL;
9308 struct timezone tz, *ptz = NULL;
9309
9310 if (arg1) {
9311 if (copy_from_user_timeval(&tv, arg1)) {
9312 goto efault;
9313 }
9314 ptv = &tv;
9315 }
9316
9317 if (arg2) {
9318 if (copy_from_user_timezone(&tz, arg2)) {
9319 goto efault;
9320 }
9321 ptz = &tz;
9322 }
9323
9324 ret = get_errno(settimeofday(ptv, ptz));
9325 }
9326 break;
9327 #if defined(TARGET_NR_select)
9328 case TARGET_NR_select:
9329 #if defined(TARGET_WANT_NI_OLD_SELECT)
9330 /* some architectures used to have old_select here
9331 * but now ENOSYS it.
9332 */
9333 ret = -TARGET_ENOSYS;
9334 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9335 ret = do_old_select(arg1);
9336 #else
9337 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9338 #endif
9339 break;
9340 #endif
9341 #ifdef TARGET_NR_pselect6
9342 case TARGET_NR_pselect6:
9343 {
9344 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9345 fd_set rfds, wfds, efds;
9346 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9347 struct timespec ts, *ts_ptr;
9348
9349 /*
9350 * The 6th arg is actually two args smashed together,
9351 * so we cannot use the C library.
9352 */
9353 sigset_t set;
9354 struct {
9355 sigset_t *set;
9356 size_t size;
9357 } sig, *sig_ptr;
9358
9359 abi_ulong arg_sigset, arg_sigsize, *arg7;
9360 target_sigset_t *target_sigset;
9361
9362 n = arg1;
9363 rfd_addr = arg2;
9364 wfd_addr = arg3;
9365 efd_addr = arg4;
9366 ts_addr = arg5;
9367
9368 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9369 if (ret) {
9370 goto fail;
9371 }
9372 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9373 if (ret) {
9374 goto fail;
9375 }
9376 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9377 if (ret) {
9378 goto fail;
9379 }
9380
9381 /*
9382 * This takes a timespec, and not a timeval, so we cannot
9383 * use the do_select() helper ...
9384 */
9385 if (ts_addr) {
9386 if (target_to_host_timespec(&ts, ts_addr)) {
9387 goto efault;
9388 }
9389 ts_ptr = &ts;
9390 } else {
9391 ts_ptr = NULL;
9392 }
9393
9394 /* Extract the two packed args for the sigset */
9395 if (arg6) {
9396 sig_ptr = &sig;
9397 sig.size = SIGSET_T_SIZE;
9398
9399 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9400 if (!arg7) {
9401 goto efault;
9402 }
9403 arg_sigset = tswapal(arg7[0]);
9404 arg_sigsize = tswapal(arg7[1]);
9405 unlock_user(arg7, arg6, 0);
9406
9407 if (arg_sigset) {
9408 sig.set = &set;
9409 if (arg_sigsize != sizeof(*target_sigset)) {
9410 /* Like the kernel, we enforce correct size sigsets */
9411 ret = -TARGET_EINVAL;
9412 goto fail;
9413 }
9414 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9415 sizeof(*target_sigset), 1);
9416 if (!target_sigset) {
9417 goto efault;
9418 }
9419 target_to_host_sigset(&set, target_sigset);
9420 unlock_user(target_sigset, arg_sigset, 0);
9421 } else {
9422 sig.set = NULL;
9423 }
9424 } else {
9425 sig_ptr = NULL;
9426 }
9427
9428 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9429 ts_ptr, sig_ptr));
9430
9431 if (!is_error(ret)) {
9432 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9433 goto efault;
9434 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9435 goto efault;
9436 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9437 goto efault;
9438
9439 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9440 goto efault;
9441 }
9442 }
9443 break;
9444 #endif
9445 #ifdef TARGET_NR_symlink
9446 case TARGET_NR_symlink:
9447 {
9448 void *p2;
9449 p = lock_user_string(arg1);
9450 p2 = lock_user_string(arg2);
9451 if (!p || !p2)
9452 ret = -TARGET_EFAULT;
9453 else
9454 ret = get_errno(symlink(p, p2));
9455 unlock_user(p2, arg2, 0);
9456 unlock_user(p, arg1, 0);
9457 }
9458 break;
9459 #endif
9460 #if defined(TARGET_NR_symlinkat)
9461 case TARGET_NR_symlinkat:
9462 {
9463 void *p2;
9464 p = lock_user_string(arg1);
9465 p2 = lock_user_string(arg3);
9466 if (!p || !p2)
9467 ret = -TARGET_EFAULT;
9468 else
9469 ret = get_errno(symlinkat(p, arg2, p2));
9470 unlock_user(p2, arg3, 0);
9471 unlock_user(p, arg1, 0);
9472 }
9473 break;
9474 #endif
9475 #ifdef TARGET_NR_oldlstat
9476 case TARGET_NR_oldlstat:
9477 goto unimplemented;
9478 #endif
9479 #ifdef TARGET_NR_readlink
9480 case TARGET_NR_readlink:
9481 {
9482 void *p2;
9483 p = lock_user_string(arg1);
9484 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9485 if (!p || !p2) {
9486 ret = -TARGET_EFAULT;
9487 } else if (!arg3) {
9488 /* Short circuit this for the magic exe check. */
9489 ret = -TARGET_EINVAL;
9490 } else if (is_proc_myself((const char *)p, "exe")) {
9491 char real[PATH_MAX], *temp;
9492 temp = realpath(exec_path, real);
9493 /* Return value is # of bytes that we wrote to the buffer. */
9494 if (temp == NULL) {
9495 ret = get_errno(-1);
9496 } else {
9497 /* Don't worry about sign mismatch as earlier mapping
9498 * logic would have thrown a bad address error. */
9499 ret = MIN(strlen(real), arg3);
9500 /* We cannot NUL terminate the string. */
9501 memcpy(p2, real, ret);
9502 }
9503 } else {
9504 ret = get_errno(readlink(path(p), p2, arg3));
9505 }
9506 unlock_user(p2, arg2, ret);
9507 unlock_user(p, arg1, 0);
9508 }
9509 break;
9510 #endif
9511 #if defined(TARGET_NR_readlinkat)
9512 case TARGET_NR_readlinkat:
9513 {
9514 void *p2;
9515 p = lock_user_string(arg2);
9516 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9517 if (!p || !p2) {
9518 ret = -TARGET_EFAULT;
9519 } else if (is_proc_myself((const char *)p, "exe")) {
9520 char real[PATH_MAX], *temp;
9521 temp = realpath(exec_path, real);
9522 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9523 snprintf((char *)p2, arg4, "%s", real);
9524 } else {
9525 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9526 }
9527 unlock_user(p2, arg3, ret);
9528 unlock_user(p, arg2, 0);
9529 }
9530 break;
9531 #endif
9532 #ifdef TARGET_NR_uselib
9533 case TARGET_NR_uselib:
9534 goto unimplemented;
9535 #endif
9536 #ifdef TARGET_NR_swapon
9537 case TARGET_NR_swapon:
9538 if (!(p = lock_user_string(arg1)))
9539 goto efault;
9540 ret = get_errno(swapon(p, arg2));
9541 unlock_user(p, arg1, 0);
9542 break;
9543 #endif
9544 case TARGET_NR_reboot:
9545 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9546 /* arg4 must be ignored in all other cases */
9547 p = lock_user_string(arg4);
9548 if (!p) {
9549 goto efault;
9550 }
9551 ret = get_errno(reboot(arg1, arg2, arg3, p));
9552 unlock_user(p, arg4, 0);
9553 } else {
9554 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9555 }
9556 break;
9557 #ifdef TARGET_NR_readdir
9558 case TARGET_NR_readdir:
9559 goto unimplemented;
9560 #endif
9561 #ifdef TARGET_NR_mmap
9562 case TARGET_NR_mmap:
9563 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9564 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9565 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9566 || defined(TARGET_S390X)
9567 {
9568 abi_ulong *v;
9569 abi_ulong v1, v2, v3, v4, v5, v6;
9570 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9571 goto efault;
9572 v1 = tswapal(v[0]);
9573 v2 = tswapal(v[1]);
9574 v3 = tswapal(v[2]);
9575 v4 = tswapal(v[3]);
9576 v5 = tswapal(v[4]);
9577 v6 = tswapal(v[5]);
9578 unlock_user(v, arg1, 0);
9579 ret = get_errno(target_mmap(v1, v2, v3,
9580 target_to_host_bitmask(v4, mmap_flags_tbl),
9581 v5, v6));
9582 }
9583 #else
9584 ret = get_errno(target_mmap(arg1, arg2, arg3,
9585 target_to_host_bitmask(arg4, mmap_flags_tbl),
9586 arg5,
9587 arg6));
9588 #endif
9589 break;
9590 #endif
9591 #ifdef TARGET_NR_mmap2
9592 case TARGET_NR_mmap2:
9593 #ifndef MMAP_SHIFT
9594 #define MMAP_SHIFT 12
9595 #endif
9596 ret = get_errno(target_mmap(arg1, arg2, arg3,
9597 target_to_host_bitmask(arg4, mmap_flags_tbl),
9598 arg5,
9599 arg6 << MMAP_SHIFT));
9600 break;
9601 #endif
9602 case TARGET_NR_munmap:
9603 ret = get_errno(target_munmap(arg1, arg2));
9604 break;
9605 case TARGET_NR_mprotect:
9606 {
9607 TaskState *ts = cpu->opaque;
9608 /* Special hack to detect libc making the stack executable. */
9609 if ((arg3 & PROT_GROWSDOWN)
9610 && arg1 >= ts->info->stack_limit
9611 && arg1 <= ts->info->start_stack) {
9612 arg3 &= ~PROT_GROWSDOWN;
9613 arg2 = arg2 + arg1 - ts->info->stack_limit;
9614 arg1 = ts->info->stack_limit;
9615 }
9616 }
9617 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9618 break;
9619 #ifdef TARGET_NR_mremap
9620 case TARGET_NR_mremap:
9621 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9622 break;
9623 #endif
9624 /* ??? msync/mlock/munlock are broken for softmmu. */
9625 #ifdef TARGET_NR_msync
9626 case TARGET_NR_msync:
9627 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9628 break;
9629 #endif
9630 #ifdef TARGET_NR_mlock
9631 case TARGET_NR_mlock:
9632 ret = get_errno(mlock(g2h(arg1), arg2));
9633 break;
9634 #endif
9635 #ifdef TARGET_NR_munlock
9636 case TARGET_NR_munlock:
9637 ret = get_errno(munlock(g2h(arg1), arg2));
9638 break;
9639 #endif
9640 #ifdef TARGET_NR_mlockall
9641 case TARGET_NR_mlockall:
9642 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9643 break;
9644 #endif
9645 #ifdef TARGET_NR_munlockall
9646 case TARGET_NR_munlockall:
9647 ret = get_errno(munlockall());
9648 break;
9649 #endif
9650 case TARGET_NR_truncate:
9651 if (!(p = lock_user_string(arg1)))
9652 goto efault;
9653 ret = get_errno(truncate(p, arg2));
9654 unlock_user(p, arg1, 0);
9655 break;
9656 case TARGET_NR_ftruncate:
9657 ret = get_errno(ftruncate(arg1, arg2));
9658 break;
9659 case TARGET_NR_fchmod:
9660 ret = get_errno(fchmod(arg1, arg2));
9661 break;
9662 #if defined(TARGET_NR_fchmodat)
9663 case TARGET_NR_fchmodat:
9664 if (!(p = lock_user_string(arg2)))
9665 goto efault;
9666 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9667 unlock_user(p, arg2, 0);
9668 break;
9669 #endif
9670 case TARGET_NR_getpriority:
9671 /* Note that negative values are valid for getpriority, so we must
9672 differentiate based on errno settings. */
9673 errno = 0;
9674 ret = getpriority(arg1, arg2);
9675 if (ret == -1 && errno != 0) {
9676 ret = -host_to_target_errno(errno);
9677 break;
9678 }
9679 #ifdef TARGET_ALPHA
9680 /* Return value is the unbiased priority. Signal no error. */
9681 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9682 #else
9683 /* Return value is a biased priority to avoid negative numbers. */
9684 ret = 20 - ret;
9685 #endif
9686 break;
9687 case TARGET_NR_setpriority:
9688 ret = get_errno(setpriority(arg1, arg2, arg3));
9689 break;
9690 #ifdef TARGET_NR_profil
9691 case TARGET_NR_profil:
9692 goto unimplemented;
9693 #endif
9694 case TARGET_NR_statfs:
9695 if (!(p = lock_user_string(arg1)))
9696 goto efault;
9697 ret = get_errno(statfs(path(p), &stfs));
9698 unlock_user(p, arg1, 0);
9699 convert_statfs:
9700 if (!is_error(ret)) {
9701 struct target_statfs *target_stfs;
9702
9703 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9704 goto efault;
9705 __put_user(stfs.f_type, &target_stfs->f_type);
9706 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9707 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9708 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9709 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9710 __put_user(stfs.f_files, &target_stfs->f_files);
9711 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9712 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9713 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9714 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9715 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9716 #ifdef _STATFS_F_FLAGS
9717 __put_user(stfs.f_flags, &target_stfs->f_flags);
9718 #else
9719 __put_user(0, &target_stfs->f_flags);
9720 #endif
9721 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9722 unlock_user_struct(target_stfs, arg2, 1);
9723 }
9724 break;
9725 case TARGET_NR_fstatfs:
9726 ret = get_errno(fstatfs(arg1, &stfs));
9727 goto convert_statfs;
9728 #ifdef TARGET_NR_statfs64
9729 case TARGET_NR_statfs64:
9730 if (!(p = lock_user_string(arg1)))
9731 goto efault;
9732 ret = get_errno(statfs(path(p), &stfs));
9733 unlock_user(p, arg1, 0);
9734 convert_statfs64:
9735 if (!is_error(ret)) {
9736 struct target_statfs64 *target_stfs;
9737
9738 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9739 goto efault;
9740 __put_user(stfs.f_type, &target_stfs->f_type);
9741 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9742 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9743 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9744 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9745 __put_user(stfs.f_files, &target_stfs->f_files);
9746 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9747 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9748 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9749 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9750 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9751 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9752 unlock_user_struct(target_stfs, arg3, 1);
9753 }
9754 break;
9755 case TARGET_NR_fstatfs64:
9756 ret = get_errno(fstatfs(arg1, &stfs));
9757 goto convert_statfs64;
9758 #endif
9759 #ifdef TARGET_NR_ioperm
9760 case TARGET_NR_ioperm:
9761 goto unimplemented;
9762 #endif
9763 #ifdef TARGET_NR_socketcall
9764 case TARGET_NR_socketcall:
9765 ret = do_socketcall(arg1, arg2);
9766 break;
9767 #endif
9768 #ifdef TARGET_NR_accept
9769 case TARGET_NR_accept:
9770 ret = do_accept4(arg1, arg2, arg3, 0);
9771 break;
9772 #endif
9773 #ifdef TARGET_NR_accept4
9774 case TARGET_NR_accept4:
9775 ret = do_accept4(arg1, arg2, arg3, arg4);
9776 break;
9777 #endif
9778 #ifdef TARGET_NR_bind
9779 case TARGET_NR_bind:
9780 ret = do_bind(arg1, arg2, arg3);
9781 break;
9782 #endif
9783 #ifdef TARGET_NR_connect
9784 case TARGET_NR_connect:
9785 ret = do_connect(arg1, arg2, arg3);
9786 break;
9787 #endif
9788 #ifdef TARGET_NR_getpeername
9789 case TARGET_NR_getpeername:
9790 ret = do_getpeername(arg1, arg2, arg3);
9791 break;
9792 #endif
9793 #ifdef TARGET_NR_getsockname
9794 case TARGET_NR_getsockname:
9795 ret = do_getsockname(arg1, arg2, arg3);
9796 break;
9797 #endif
9798 #ifdef TARGET_NR_getsockopt
9799 case TARGET_NR_getsockopt:
9800 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9801 break;
9802 #endif
9803 #ifdef TARGET_NR_listen
9804 case TARGET_NR_listen:
9805 ret = get_errno(listen(arg1, arg2));
9806 break;
9807 #endif
9808 #ifdef TARGET_NR_recv
9809 case TARGET_NR_recv:
9810 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9811 break;
9812 #endif
9813 #ifdef TARGET_NR_recvfrom
9814 case TARGET_NR_recvfrom:
9815 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9816 break;
9817 #endif
9818 #ifdef TARGET_NR_recvmsg
9819 case TARGET_NR_recvmsg:
9820 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9821 break;
9822 #endif
9823 #ifdef TARGET_NR_send
9824 case TARGET_NR_send:
9825 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9826 break;
9827 #endif
9828 #ifdef TARGET_NR_sendmsg
9829 case TARGET_NR_sendmsg:
9830 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9831 break;
9832 #endif
9833 #ifdef TARGET_NR_sendmmsg
9834 case TARGET_NR_sendmmsg:
9835 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9836 break;
9837 case TARGET_NR_recvmmsg:
9838 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9839 break;
9840 #endif
9841 #ifdef TARGET_NR_sendto
9842 case TARGET_NR_sendto:
9843 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9844 break;
9845 #endif
9846 #ifdef TARGET_NR_shutdown
9847 case TARGET_NR_shutdown:
9848 ret = get_errno(shutdown(arg1, arg2));
9849 break;
9850 #endif
9851 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9852 case TARGET_NR_getrandom:
9853 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9854 if (!p) {
9855 goto efault;
9856 }
9857 ret = get_errno(getrandom(p, arg2, arg3));
9858 unlock_user(p, arg1, ret);
9859 break;
9860 #endif
9861 #ifdef TARGET_NR_socket
9862 case TARGET_NR_socket:
9863 ret = do_socket(arg1, arg2, arg3);
9864 break;
9865 #endif
9866 #ifdef TARGET_NR_socketpair
9867 case TARGET_NR_socketpair:
9868 ret = do_socketpair(arg1, arg2, arg3, arg4);
9869 break;
9870 #endif
9871 #ifdef TARGET_NR_setsockopt
9872 case TARGET_NR_setsockopt:
9873 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9874 break;
9875 #endif
9876 #if defined(TARGET_NR_syslog)
9877 case TARGET_NR_syslog:
9878 {
9879 int len = arg2;
9880
9881 switch (arg1) {
9882 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9883 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9884 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9885 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9886 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9887 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9888 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9889 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9890 {
9891 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9892 }
9893 break;
9894 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9895 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9896 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9897 {
9898 ret = -TARGET_EINVAL;
9899 if (len < 0) {
9900 goto fail;
9901 }
9902 ret = 0;
9903 if (len == 0) {
9904 break;
9905 }
9906 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9907 if (!p) {
9908 ret = -TARGET_EFAULT;
9909 goto fail;
9910 }
9911 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9912 unlock_user(p, arg2, arg3);
9913 }
9914 break;
9915 default:
9916 ret = -EINVAL;
9917 break;
9918 }
9919 }
9920 break;
9921 #endif
9922 case TARGET_NR_setitimer:
9923 {
9924 struct itimerval value, ovalue, *pvalue;
9925
9926 if (arg2) {
9927 pvalue = &value;
9928 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9929 || copy_from_user_timeval(&pvalue->it_value,
9930 arg2 + sizeof(struct target_timeval)))
9931 goto efault;
9932 } else {
9933 pvalue = NULL;
9934 }
9935 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9936 if (!is_error(ret) && arg3) {
9937 if (copy_to_user_timeval(arg3,
9938 &ovalue.it_interval)
9939 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9940 &ovalue.it_value))
9941 goto efault;
9942 }
9943 }
9944 break;
9945 case TARGET_NR_getitimer:
9946 {
9947 struct itimerval value;
9948
9949 ret = get_errno(getitimer(arg1, &value));
9950 if (!is_error(ret) && arg2) {
9951 if (copy_to_user_timeval(arg2,
9952 &value.it_interval)
9953 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9954 &value.it_value))
9955 goto efault;
9956 }
9957 }
9958 break;
9959 #ifdef TARGET_NR_stat
9960 case TARGET_NR_stat:
9961 if (!(p = lock_user_string(arg1)))
9962 goto efault;
9963 ret = get_errno(stat(path(p), &st));
9964 unlock_user(p, arg1, 0);
9965 goto do_stat;
9966 #endif
9967 #ifdef TARGET_NR_lstat
9968 case TARGET_NR_lstat:
9969 if (!(p = lock_user_string(arg1)))
9970 goto efault;
9971 ret = get_errno(lstat(path(p), &st));
9972 unlock_user(p, arg1, 0);
9973 goto do_stat;
9974 #endif
9975 case TARGET_NR_fstat:
9976 {
9977 ret = get_errno(fstat(arg1, &st));
9978 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9979 do_stat:
9980 #endif
9981 if (!is_error(ret)) {
9982 struct target_stat *target_st;
9983
9984 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9985 goto efault;
9986 memset(target_st, 0, sizeof(*target_st));
9987 __put_user(st.st_dev, &target_st->st_dev);
9988 __put_user(st.st_ino, &target_st->st_ino);
9989 __put_user(st.st_mode, &target_st->st_mode);
9990 __put_user(st.st_uid, &target_st->st_uid);
9991 __put_user(st.st_gid, &target_st->st_gid);
9992 __put_user(st.st_nlink, &target_st->st_nlink);
9993 __put_user(st.st_rdev, &target_st->st_rdev);
9994 __put_user(st.st_size, &target_st->st_size);
9995 __put_user(st.st_blksize, &target_st->st_blksize);
9996 __put_user(st.st_blocks, &target_st->st_blocks);
9997 __put_user(st.st_atime, &target_st->target_st_atime);
9998 __put_user(st.st_mtime, &target_st->target_st_mtime);
9999 __put_user(st.st_ctime, &target_st->target_st_ctime);
10000 unlock_user_struct(target_st, arg2, 1);
10001 }
10002 }
10003 break;
10004 #ifdef TARGET_NR_olduname
10005 case TARGET_NR_olduname:
10006 goto unimplemented;
10007 #endif
10008 #ifdef TARGET_NR_iopl
10009 case TARGET_NR_iopl:
10010 goto unimplemented;
10011 #endif
10012 case TARGET_NR_vhangup:
10013 ret = get_errno(vhangup());
10014 break;
10015 #ifdef TARGET_NR_idle
10016 case TARGET_NR_idle:
10017 goto unimplemented;
10018 #endif
10019 #ifdef TARGET_NR_syscall
10020 case TARGET_NR_syscall:
10021 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10022 arg6, arg7, arg8, 0);
10023 break;
10024 #endif
10025 case TARGET_NR_wait4:
10026 {
10027 int status;
10028 abi_long status_ptr = arg2;
10029 struct rusage rusage, *rusage_ptr;
10030 abi_ulong target_rusage = arg4;
10031 abi_long rusage_err;
10032 if (target_rusage)
10033 rusage_ptr = &rusage;
10034 else
10035 rusage_ptr = NULL;
10036 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10037 if (!is_error(ret)) {
10038 if (status_ptr && ret) {
10039 status = host_to_target_waitstatus(status);
10040 if (put_user_s32(status, status_ptr))
10041 goto efault;
10042 }
10043 if (target_rusage) {
10044 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10045 if (rusage_err) {
10046 ret = rusage_err;
10047 }
10048 }
10049 }
10050 }
10051 break;
10052 #ifdef TARGET_NR_swapoff
10053 case TARGET_NR_swapoff:
10054 if (!(p = lock_user_string(arg1)))
10055 goto efault;
10056 ret = get_errno(swapoff(p));
10057 unlock_user(p, arg1, 0);
10058 break;
10059 #endif
10060 case TARGET_NR_sysinfo:
10061 {
10062 struct target_sysinfo *target_value;
10063 struct sysinfo value;
10064 ret = get_errno(sysinfo(&value));
10065 if (!is_error(ret) && arg1)
10066 {
10067 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10068 goto efault;
10069 __put_user(value.uptime, &target_value->uptime);
10070 __put_user(value.loads[0], &target_value->loads[0]);
10071 __put_user(value.loads[1], &target_value->loads[1]);
10072 __put_user(value.loads[2], &target_value->loads[2]);
10073 __put_user(value.totalram, &target_value->totalram);
10074 __put_user(value.freeram, &target_value->freeram);
10075 __put_user(value.sharedram, &target_value->sharedram);
10076 __put_user(value.bufferram, &target_value->bufferram);
10077 __put_user(value.totalswap, &target_value->totalswap);
10078 __put_user(value.freeswap, &target_value->freeswap);
10079 __put_user(value.procs, &target_value->procs);
10080 __put_user(value.totalhigh, &target_value->totalhigh);
10081 __put_user(value.freehigh, &target_value->freehigh);
10082 __put_user(value.mem_unit, &target_value->mem_unit);
10083 unlock_user_struct(target_value, arg1, 1);
10084 }
10085 }
10086 break;
10087 #ifdef TARGET_NR_ipc
10088 case TARGET_NR_ipc:
10089 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10090 break;
10091 #endif
10092 #ifdef TARGET_NR_semget
10093 case TARGET_NR_semget:
10094 ret = get_errno(semget(arg1, arg2, arg3));
10095 break;
10096 #endif
10097 #ifdef TARGET_NR_semop
10098 case TARGET_NR_semop:
10099 ret = do_semop(arg1, arg2, arg3);
10100 break;
10101 #endif
10102 #ifdef TARGET_NR_semctl
10103 case TARGET_NR_semctl:
10104 ret = do_semctl(arg1, arg2, arg3, arg4);
10105 break;
10106 #endif
10107 #ifdef TARGET_NR_msgctl
10108 case TARGET_NR_msgctl:
10109 ret = do_msgctl(arg1, arg2, arg3);
10110 break;
10111 #endif
10112 #ifdef TARGET_NR_msgget
10113 case TARGET_NR_msgget:
10114 ret = get_errno(msgget(arg1, arg2));
10115 break;
10116 #endif
10117 #ifdef TARGET_NR_msgrcv
10118 case TARGET_NR_msgrcv:
10119 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10120 break;
10121 #endif
10122 #ifdef TARGET_NR_msgsnd
10123 case TARGET_NR_msgsnd:
10124 ret = do_msgsnd(arg1, arg2, arg3, arg4);
10125 break;
10126 #endif
10127 #ifdef TARGET_NR_shmget
10128 case TARGET_NR_shmget:
10129 ret = get_errno(shmget(arg1, arg2, arg3));
10130 break;
10131 #endif
10132 #ifdef TARGET_NR_shmctl
10133 case TARGET_NR_shmctl:
10134 ret = do_shmctl(arg1, arg2, arg3);
10135 break;
10136 #endif
10137 #ifdef TARGET_NR_shmat
10138 case TARGET_NR_shmat:
10139 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10140 break;
10141 #endif
10142 #ifdef TARGET_NR_shmdt
10143 case TARGET_NR_shmdt:
10144 ret = do_shmdt(arg1);
10145 break;
10146 #endif
10147 case TARGET_NR_fsync:
10148 ret = get_errno(fsync(arg1));
10149 break;
10150 case TARGET_NR_clone:
10151 /* Linux manages to have three different orderings for its
10152 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10153 * match the kernel's CONFIG_CLONE_* settings.
10154 * Microblaze is further special in that it uses a sixth
10155 * implicit argument to clone for the TLS pointer.
10156 */
10157 #if defined(TARGET_MICROBLAZE)
10158 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10159 #elif defined(TARGET_CLONE_BACKWARDS)
10160 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10161 #elif defined(TARGET_CLONE_BACKWARDS2)
10162 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10163 #else
10164 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10165 #endif
10166 break;
10167 #ifdef __NR_exit_group
10168 /* new thread calls */
10169 case TARGET_NR_exit_group:
10170 preexit_cleanup(cpu_env, arg1);
10171 ret = get_errno(exit_group(arg1));
10172 break;
10173 #endif
10174 case TARGET_NR_setdomainname:
10175 if (!(p = lock_user_string(arg1)))
10176 goto efault;
10177 ret = get_errno(setdomainname(p, arg2));
10178 unlock_user(p, arg1, 0);
10179 break;
10180 case TARGET_NR_uname:
10181 /* no need to transcode because we use the linux syscall */
10182 {
10183 struct new_utsname * buf;
10184
10185 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10186 goto efault;
10187 ret = get_errno(sys_uname(buf));
10188 if (!is_error(ret)) {
10189 /* Overwrite the native machine name with whatever is being
10190 emulated. */
10191 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10192 sizeof(buf->machine));
10193 /* Allow the user to override the reported release. */
10194 if (qemu_uname_release && *qemu_uname_release) {
10195 g_strlcpy(buf->release, qemu_uname_release,
10196 sizeof(buf->release));
10197 }
10198 }
10199 unlock_user_struct(buf, arg1, 1);
10200 }
10201 break;
10202 #ifdef TARGET_I386
10203 case TARGET_NR_modify_ldt:
10204 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10205 break;
10206 #if !defined(TARGET_X86_64)
10207 case TARGET_NR_vm86old:
10208 goto unimplemented;
10209 case TARGET_NR_vm86:
10210 ret = do_vm86(cpu_env, arg1, arg2);
10211 break;
10212 #endif
10213 #endif
10214 case TARGET_NR_adjtimex:
10215 {
10216 struct timex host_buf;
10217
10218 if (target_to_host_timex(&host_buf, arg1) != 0) {
10219 goto efault;
10220 }
10221 ret = get_errno(adjtimex(&host_buf));
10222 if (!is_error(ret)) {
10223 if (host_to_target_timex(arg1, &host_buf) != 0) {
10224 goto efault;
10225 }
10226 }
10227 }
10228 break;
10229 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10230 case TARGET_NR_clock_adjtime:
10231 {
10232 struct timex htx, *phtx = &htx;
10233
10234 if (target_to_host_timex(phtx, arg2) != 0) {
10235 goto efault;
10236 }
10237 ret = get_errno(clock_adjtime(arg1, phtx));
10238 if (!is_error(ret) && phtx) {
10239 if (host_to_target_timex(arg2, phtx) != 0) {
10240 goto efault;
10241 }
10242 }
10243 }
10244 break;
10245 #endif
10246 #ifdef TARGET_NR_create_module
10247 case TARGET_NR_create_module:
10248 #endif
10249 case TARGET_NR_init_module:
10250 case TARGET_NR_delete_module:
10251 #ifdef TARGET_NR_get_kernel_syms
10252 case TARGET_NR_get_kernel_syms:
10253 #endif
10254 goto unimplemented;
10255 case TARGET_NR_quotactl:
10256 goto unimplemented;
10257 case TARGET_NR_getpgid:
10258 ret = get_errno(getpgid(arg1));
10259 break;
10260 case TARGET_NR_fchdir:
10261 ret = get_errno(fchdir(arg1));
10262 break;
10263 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10264 case TARGET_NR_bdflush:
10265 goto unimplemented;
10266 #endif
10267 #ifdef TARGET_NR_sysfs
10268 case TARGET_NR_sysfs:
10269 goto unimplemented;
10270 #endif
10271 case TARGET_NR_personality:
10272 ret = get_errno(personality(arg1));
10273 break;
10274 #ifdef TARGET_NR_afs_syscall
10275 case TARGET_NR_afs_syscall:
10276 goto unimplemented;
10277 #endif
10278 #ifdef TARGET_NR__llseek /* Not on alpha */
10279 case TARGET_NR__llseek:
10280 {
10281 int64_t res;
10282 #if !defined(__NR_llseek)
10283 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10284 if (res == -1) {
10285 ret = get_errno(res);
10286 } else {
10287 ret = 0;
10288 }
10289 #else
10290 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10291 #endif
10292 if ((ret == 0) && put_user_s64(res, arg4)) {
10293 goto efault;
10294 }
10295 }
10296 break;
10297 #endif
10298 #ifdef TARGET_NR_getdents
10299 case TARGET_NR_getdents:
10300 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10301 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10302 {
10303 struct target_dirent *target_dirp;
10304 struct linux_dirent *dirp;
10305 abi_long count = arg3;
10306
10307 dirp = g_try_malloc(count);
10308 if (!dirp) {
10309 ret = -TARGET_ENOMEM;
10310 goto fail;
10311 }
10312
10313 ret = get_errno(sys_getdents(arg1, dirp, count));
10314 if (!is_error(ret)) {
10315 struct linux_dirent *de;
10316 struct target_dirent *tde;
10317 int len = ret;
10318 int reclen, treclen;
10319 int count1, tnamelen;
10320
10321 count1 = 0;
10322 de = dirp;
10323 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10324 goto efault;
10325 tde = target_dirp;
10326 while (len > 0) {
10327 reclen = de->d_reclen;
10328 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10329 assert(tnamelen >= 0);
10330 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10331 assert(count1 + treclen <= count);
10332 tde->d_reclen = tswap16(treclen);
10333 tde->d_ino = tswapal(de->d_ino);
10334 tde->d_off = tswapal(de->d_off);
10335 memcpy(tde->d_name, de->d_name, tnamelen);
10336 de = (struct linux_dirent *)((char *)de + reclen);
10337 len -= reclen;
10338 tde = (struct target_dirent *)((char *)tde + treclen);
10339 count1 += treclen;
10340 }
10341 ret = count1;
10342 unlock_user(target_dirp, arg2, ret);
10343 }
10344 g_free(dirp);
10345 }
10346 #else
10347 {
10348 struct linux_dirent *dirp;
10349 abi_long count = arg3;
10350
10351 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10352 goto efault;
10353 ret = get_errno(sys_getdents(arg1, dirp, count));
10354 if (!is_error(ret)) {
10355 struct linux_dirent *de;
10356 int len = ret;
10357 int reclen;
10358 de = dirp;
10359 while (len > 0) {
10360 reclen = de->d_reclen;
10361 if (reclen > len)
10362 break;
10363 de->d_reclen = tswap16(reclen);
10364 tswapls(&de->d_ino);
10365 tswapls(&de->d_off);
10366 de = (struct linux_dirent *)((char *)de + reclen);
10367 len -= reclen;
10368 }
10369 }
10370 unlock_user(dirp, arg2, ret);
10371 }
10372 #endif
10373 #else
10374 /* Implement getdents in terms of getdents64 */
10375 {
10376 struct linux_dirent64 *dirp;
10377 abi_long count = arg3;
10378
10379 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10380 if (!dirp) {
10381 goto efault;
10382 }
10383 ret = get_errno(sys_getdents64(arg1, dirp, count));
10384 if (!is_error(ret)) {
10385 /* Convert the dirent64 structs to target dirent. We do this
10386 * in-place, since we can guarantee that a target_dirent is no
10387 * larger than a dirent64; however this means we have to be
10388 * careful to read everything before writing in the new format.
10389 */
10390 struct linux_dirent64 *de;
10391 struct target_dirent *tde;
10392 int len = ret;
10393 int tlen = 0;
10394
10395 de = dirp;
10396 tde = (struct target_dirent *)dirp;
10397 while (len > 0) {
10398 int namelen, treclen;
10399 int reclen = de->d_reclen;
10400 uint64_t ino = de->d_ino;
10401 int64_t off = de->d_off;
10402 uint8_t type = de->d_type;
10403
10404 namelen = strlen(de->d_name);
10405 treclen = offsetof(struct target_dirent, d_name)
10406 + namelen + 2;
10407 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10408
10409 memmove(tde->d_name, de->d_name, namelen + 1);
10410 tde->d_ino = tswapal(ino);
10411 tde->d_off = tswapal(off);
10412 tde->d_reclen = tswap16(treclen);
10413 /* The target_dirent type is in what was formerly a padding
10414 * byte at the end of the structure:
10415 */
10416 *(((char *)tde) + treclen - 1) = type;
10417
10418 de = (struct linux_dirent64 *)((char *)de + reclen);
10419 tde = (struct target_dirent *)((char *)tde + treclen);
10420 len -= reclen;
10421 tlen += treclen;
10422 }
10423 ret = tlen;
10424 }
10425 unlock_user(dirp, arg2, ret);
10426 }
10427 #endif
10428 break;
10429 #endif /* TARGET_NR_getdents */
10430 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10431 case TARGET_NR_getdents64:
10432 {
10433 struct linux_dirent64 *dirp;
10434 abi_long count = arg3;
10435 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10436 goto efault;
10437 ret = get_errno(sys_getdents64(arg1, dirp, count));
10438 if (!is_error(ret)) {
10439 struct linux_dirent64 *de;
10440 int len = ret;
10441 int reclen;
10442 de = dirp;
10443 while (len > 0) {
10444 reclen = de->d_reclen;
10445 if (reclen > len)
10446 break;
10447 de->d_reclen = tswap16(reclen);
10448 tswap64s((uint64_t *)&de->d_ino);
10449 tswap64s((uint64_t *)&de->d_off);
10450 de = (struct linux_dirent64 *)((char *)de + reclen);
10451 len -= reclen;
10452 }
10453 }
10454 unlock_user(dirp, arg2, ret);
10455 }
10456 break;
10457 #endif /* TARGET_NR_getdents64 */
10458 #if defined(TARGET_NR__newselect)
10459 case TARGET_NR__newselect:
10460 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10461 break;
10462 #endif
10463 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10464 # ifdef TARGET_NR_poll
10465 case TARGET_NR_poll:
10466 # endif
10467 # ifdef TARGET_NR_ppoll
10468 case TARGET_NR_ppoll:
10469 # endif
10470 {
10471 struct target_pollfd *target_pfd;
10472 unsigned int nfds = arg2;
10473 struct pollfd *pfd;
10474 unsigned int i;
10475
10476 pfd = NULL;
10477 target_pfd = NULL;
10478 if (nfds) {
10479 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10480 ret = -TARGET_EINVAL;
10481 break;
10482 }
10483
10484 target_pfd = lock_user(VERIFY_WRITE, arg1,
10485 sizeof(struct target_pollfd) * nfds, 1);
10486 if (!target_pfd) {
10487 goto efault;
10488 }
10489
10490 pfd = alloca(sizeof(struct pollfd) * nfds);
10491 for (i = 0; i < nfds; i++) {
10492 pfd[i].fd = tswap32(target_pfd[i].fd);
10493 pfd[i].events = tswap16(target_pfd[i].events);
10494 }
10495 }
10496
10497 switch (num) {
10498 # ifdef TARGET_NR_ppoll
10499 case TARGET_NR_ppoll:
10500 {
10501 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10502 target_sigset_t *target_set;
10503 sigset_t _set, *set = &_set;
10504
10505 if (arg3) {
10506 if (target_to_host_timespec(timeout_ts, arg3)) {
10507 unlock_user(target_pfd, arg1, 0);
10508 goto efault;
10509 }
10510 } else {
10511 timeout_ts = NULL;
10512 }
10513
10514 if (arg4) {
10515 if (arg5 != sizeof(target_sigset_t)) {
10516 unlock_user(target_pfd, arg1, 0);
10517 ret = -TARGET_EINVAL;
10518 break;
10519 }
10520
10521 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10522 if (!target_set) {
10523 unlock_user(target_pfd, arg1, 0);
10524 goto efault;
10525 }
10526 target_to_host_sigset(set, target_set);
10527 } else {
10528 set = NULL;
10529 }
10530
10531 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10532 set, SIGSET_T_SIZE));
10533
10534 if (!is_error(ret) && arg3) {
10535 host_to_target_timespec(arg3, timeout_ts);
10536 }
10537 if (arg4) {
10538 unlock_user(target_set, arg4, 0);
10539 }
10540 break;
10541 }
10542 # endif
10543 # ifdef TARGET_NR_poll
10544 case TARGET_NR_poll:
10545 {
10546 struct timespec ts, *pts;
10547
10548 if (arg3 >= 0) {
10549 /* Convert ms to secs, ns */
10550 ts.tv_sec = arg3 / 1000;
10551 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10552 pts = &ts;
10553 } else {
10554 /* -ve poll() timeout means "infinite" */
10555 pts = NULL;
10556 }
10557 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10558 break;
10559 }
10560 # endif
10561 default:
10562 g_assert_not_reached();
10563 }
10564
10565 if (!is_error(ret)) {
10566 for(i = 0; i < nfds; i++) {
10567 target_pfd[i].revents = tswap16(pfd[i].revents);
10568 }
10569 }
10570 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10571 }
10572 break;
10573 #endif
10574 case TARGET_NR_flock:
10575 /* NOTE: the flock constant seems to be the same for every
10576 Linux platform */
10577 ret = get_errno(safe_flock(arg1, arg2));
10578 break;
10579 case TARGET_NR_readv:
10580 {
10581 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10582 if (vec != NULL) {
10583 ret = get_errno(safe_readv(arg1, vec, arg3));
10584 unlock_iovec(vec, arg2, arg3, 1);
10585 } else {
10586 ret = -host_to_target_errno(errno);
10587 }
10588 }
10589 break;
10590 case TARGET_NR_writev:
10591 {
10592 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10593 if (vec != NULL) {
10594 ret = get_errno(safe_writev(arg1, vec, arg3));
10595 unlock_iovec(vec, arg2, arg3, 0);
10596 } else {
10597 ret = -host_to_target_errno(errno);
10598 }
10599 }
10600 break;
10601 #if defined(TARGET_NR_preadv)
10602 case TARGET_NR_preadv:
10603 {
10604 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10605 if (vec != NULL) {
10606 unsigned long low, high;
10607
10608 target_to_host_low_high(arg4, arg5, &low, &high);
10609 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10610 unlock_iovec(vec, arg2, arg3, 1);
10611 } else {
10612 ret = -host_to_target_errno(errno);
10613 }
10614 }
10615 break;
10616 #endif
10617 #if defined(TARGET_NR_pwritev)
10618 case TARGET_NR_pwritev:
10619 {
10620 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10621 if (vec != NULL) {
10622 unsigned long low, high;
10623
10624 target_to_host_low_high(arg4, arg5, &low, &high);
10625 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10626 unlock_iovec(vec, arg2, arg3, 0);
10627 } else {
10628 ret = -host_to_target_errno(errno);
10629 }
10630 }
10631 break;
10632 #endif
10633 case TARGET_NR_getsid:
10634 ret = get_errno(getsid(arg1));
10635 break;
10636 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10637 case TARGET_NR_fdatasync:
10638 ret = get_errno(fdatasync(arg1));
10639 break;
10640 #endif
10641 #ifdef TARGET_NR__sysctl
10642 case TARGET_NR__sysctl:
10643 /* We don't implement this, but ENOTDIR is always a safe
10644 return value. */
10645 ret = -TARGET_ENOTDIR;
10646 break;
10647 #endif
10648 case TARGET_NR_sched_getaffinity:
10649 {
10650 unsigned int mask_size;
10651 unsigned long *mask;
10652
10653 /*
10654 * sched_getaffinity needs multiples of ulong, so need to take
10655 * care of mismatches between target ulong and host ulong sizes.
10656 */
10657 if (arg2 & (sizeof(abi_ulong) - 1)) {
10658 ret = -TARGET_EINVAL;
10659 break;
10660 }
10661 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10662
10663 mask = alloca(mask_size);
10664 memset(mask, 0, mask_size);
10665 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10666
10667 if (!is_error(ret)) {
10668 if (ret > arg2) {
10669 /* More data returned than the caller's buffer will fit.
10670 * This only happens if sizeof(abi_long) < sizeof(long)
10671 * and the caller passed us a buffer holding an odd number
10672 * of abi_longs. If the host kernel is actually using the
10673 * extra 4 bytes then fail EINVAL; otherwise we can just
10674 * ignore them and only copy the interesting part.
10675 */
10676 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10677 if (numcpus > arg2 * 8) {
10678 ret = -TARGET_EINVAL;
10679 break;
10680 }
10681 ret = arg2;
10682 }
10683
10684 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10685 goto efault;
10686 }
10687 }
10688 }
10689 break;
10690 case TARGET_NR_sched_setaffinity:
10691 {
10692 unsigned int mask_size;
10693 unsigned long *mask;
10694
10695 /*
10696 * sched_setaffinity needs multiples of ulong, so need to take
10697 * care of mismatches between target ulong and host ulong sizes.
10698 */
10699 if (arg2 & (sizeof(abi_ulong) - 1)) {
10700 ret = -TARGET_EINVAL;
10701 break;
10702 }
10703 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10704 mask = alloca(mask_size);
10705
10706 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10707 if (ret) {
10708 break;
10709 }
10710
10711 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10712 }
10713 break;
10714 case TARGET_NR_getcpu:
10715 {
10716 unsigned cpu, node;
10717 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10718 arg2 ? &node : NULL,
10719 NULL));
10720 if (is_error(ret)) {
10721 goto fail;
10722 }
10723 if (arg1 && put_user_u32(cpu, arg1)) {
10724 goto efault;
10725 }
10726 if (arg2 && put_user_u32(node, arg2)) {
10727 goto efault;
10728 }
10729 }
10730 break;
10731 case TARGET_NR_sched_setparam:
10732 {
10733 struct sched_param *target_schp;
10734 struct sched_param schp;
10735
10736 if (arg2 == 0) {
10737 return -TARGET_EINVAL;
10738 }
10739 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10740 goto efault;
10741 schp.sched_priority = tswap32(target_schp->sched_priority);
10742 unlock_user_struct(target_schp, arg2, 0);
10743 ret = get_errno(sched_setparam(arg1, &schp));
10744 }
10745 break;
10746 case TARGET_NR_sched_getparam:
10747 {
10748 struct sched_param *target_schp;
10749 struct sched_param schp;
10750
10751 if (arg2 == 0) {
10752 return -TARGET_EINVAL;
10753 }
10754 ret = get_errno(sched_getparam(arg1, &schp));
10755 if (!is_error(ret)) {
10756 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10757 goto efault;
10758 target_schp->sched_priority = tswap32(schp.sched_priority);
10759 unlock_user_struct(target_schp, arg2, 1);
10760 }
10761 }
10762 break;
10763 case TARGET_NR_sched_setscheduler:
10764 {
10765 struct sched_param *target_schp;
10766 struct sched_param schp;
10767 if (arg3 == 0) {
10768 return -TARGET_EINVAL;
10769 }
10770 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10771 goto efault;
10772 schp.sched_priority = tswap32(target_schp->sched_priority);
10773 unlock_user_struct(target_schp, arg3, 0);
10774 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10775 }
10776 break;
10777 case TARGET_NR_sched_getscheduler:
10778 ret = get_errno(sched_getscheduler(arg1));
10779 break;
10780 case TARGET_NR_sched_yield:
10781 ret = get_errno(sched_yield());
10782 break;
10783 case TARGET_NR_sched_get_priority_max:
10784 ret = get_errno(sched_get_priority_max(arg1));
10785 break;
10786 case TARGET_NR_sched_get_priority_min:
10787 ret = get_errno(sched_get_priority_min(arg1));
10788 break;
10789 case TARGET_NR_sched_rr_get_interval:
10790 {
10791 struct timespec ts;
10792 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10793 if (!is_error(ret)) {
10794 ret = host_to_target_timespec(arg2, &ts);
10795 }
10796 }
10797 break;
10798 case TARGET_NR_nanosleep:
10799 {
10800 struct timespec req, rem;
10801 target_to_host_timespec(&req, arg1);
10802 ret = get_errno(safe_nanosleep(&req, &rem));
10803 if (is_error(ret) && arg2) {
10804 host_to_target_timespec(arg2, &rem);
10805 }
10806 }
10807 break;
10808 #ifdef TARGET_NR_query_module
10809 case TARGET_NR_query_module:
10810 goto unimplemented;
10811 #endif
10812 #ifdef TARGET_NR_nfsservctl
10813 case TARGET_NR_nfsservctl:
10814 goto unimplemented;
10815 #endif
10816 case TARGET_NR_prctl:
10817 switch (arg1) {
10818 case PR_GET_PDEATHSIG:
10819 {
10820 int deathsig;
10821 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10822 if (!is_error(ret) && arg2
10823 && put_user_ual(deathsig, arg2)) {
10824 goto efault;
10825 }
10826 break;
10827 }
10828 #ifdef PR_GET_NAME
10829 case PR_GET_NAME:
10830 {
10831 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10832 if (!name) {
10833 goto efault;
10834 }
10835 ret = get_errno(prctl(arg1, (unsigned long)name,
10836 arg3, arg4, arg5));
10837 unlock_user(name, arg2, 16);
10838 break;
10839 }
10840 case PR_SET_NAME:
10841 {
10842 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10843 if (!name) {
10844 goto efault;
10845 }
10846 ret = get_errno(prctl(arg1, (unsigned long)name,
10847 arg3, arg4, arg5));
10848 unlock_user(name, arg2, 0);
10849 break;
10850 }
10851 #endif
10852 #ifdef TARGET_AARCH64
10853 case TARGET_PR_SVE_SET_VL:
10854 /*
10855 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10856 * PR_SVE_VL_INHERIT. Note the kernel definition
10857 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10858 * even though the current architectural maximum is VQ=16.
10859 */
10860 ret = -TARGET_EINVAL;
10861 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10862 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10863 CPUARMState *env = cpu_env;
10864 ARMCPU *cpu = arm_env_get_cpu(env);
10865 uint32_t vq, old_vq;
10866
10867 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10868 vq = MAX(arg2 / 16, 1);
10869 vq = MIN(vq, cpu->sve_max_vq);
10870
10871 if (vq < old_vq) {
10872 aarch64_sve_narrow_vq(env, vq);
10873 }
10874 env->vfp.zcr_el[1] = vq - 1;
10875 ret = vq * 16;
10876 }
10877 break;
10878 case TARGET_PR_SVE_GET_VL:
10879 ret = -TARGET_EINVAL;
10880 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10881 CPUARMState *env = cpu_env;
10882 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10883 }
10884 break;
10885 #endif /* AARCH64 */
10886 case PR_GET_SECCOMP:
10887 case PR_SET_SECCOMP:
10888 /* Disable seccomp to prevent the target disabling syscalls we
10889 * need. */
10890 ret = -TARGET_EINVAL;
10891 break;
10892 default:
10893 /* Most prctl options have no pointer arguments */
10894 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10895 break;
10896 }
10897 break;
10898 #ifdef TARGET_NR_arch_prctl
10899 case TARGET_NR_arch_prctl:
10900 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10901 ret = do_arch_prctl(cpu_env, arg1, arg2);
10902 break;
10903 #else
10904 goto unimplemented;
10905 #endif
10906 #endif
10907 #ifdef TARGET_NR_pread64
10908 case TARGET_NR_pread64:
10909 if (regpairs_aligned(cpu_env, num)) {
10910 arg4 = arg5;
10911 arg5 = arg6;
10912 }
10913 if (arg2 == 0 && arg3 == 0) {
10914 /* Special-case NULL buffer and zero length, which should succeed */
10915 p = 0;
10916 } else {
10917 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10918 if (!p) {
10919 goto efault;
10920 }
10921 }
10922 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10923 unlock_user(p, arg2, ret);
10924 break;
10925 case TARGET_NR_pwrite64:
10926 if (regpairs_aligned(cpu_env, num)) {
10927 arg4 = arg5;
10928 arg5 = arg6;
10929 }
10930 if (arg2 == 0 && arg3 == 0) {
10931 /* Special-case NULL buffer and zero length, which should succeed */
10932 p = 0;
10933 } else {
10934 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10935 if (!p) {
10936 goto efault;
10937 }
10938 }
10939 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10940 unlock_user(p, arg2, 0);
10941 break;
10942 #endif
10943 case TARGET_NR_getcwd:
10944 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10945 goto efault;
10946 ret = get_errno(sys_getcwd1(p, arg2));
10947 unlock_user(p, arg1, ret);
10948 break;
10949 case TARGET_NR_capget:
10950 case TARGET_NR_capset:
10951 {
10952 struct target_user_cap_header *target_header;
10953 struct target_user_cap_data *target_data = NULL;
10954 struct __user_cap_header_struct header;
10955 struct __user_cap_data_struct data[2];
10956 struct __user_cap_data_struct *dataptr = NULL;
10957 int i, target_datalen;
10958 int data_items = 1;
10959
10960 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10961 goto efault;
10962 }
10963 header.version = tswap32(target_header->version);
10964 header.pid = tswap32(target_header->pid);
10965
10966 if (header.version != _LINUX_CAPABILITY_VERSION) {
10967 /* Version 2 and up takes pointer to two user_data structs */
10968 data_items = 2;
10969 }
10970
10971 target_datalen = sizeof(*target_data) * data_items;
10972
10973 if (arg2) {
10974 if (num == TARGET_NR_capget) {
10975 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10976 } else {
10977 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10978 }
10979 if (!target_data) {
10980 unlock_user_struct(target_header, arg1, 0);
10981 goto efault;
10982 }
10983
10984 if (num == TARGET_NR_capset) {
10985 for (i = 0; i < data_items; i++) {
10986 data[i].effective = tswap32(target_data[i].effective);
10987 data[i].permitted = tswap32(target_data[i].permitted);
10988 data[i].inheritable = tswap32(target_data[i].inheritable);
10989 }
10990 }
10991
10992 dataptr = data;
10993 }
10994
10995 if (num == TARGET_NR_capget) {
10996 ret = get_errno(capget(&header, dataptr));
10997 } else {
10998 ret = get_errno(capset(&header, dataptr));
10999 }
11000
11001 /* The kernel always updates version for both capget and capset */
11002 target_header->version = tswap32(header.version);
11003 unlock_user_struct(target_header, arg1, 1);
11004
11005 if (arg2) {
11006 if (num == TARGET_NR_capget) {
11007 for (i = 0; i < data_items; i++) {
11008 target_data[i].effective = tswap32(data[i].effective);
11009 target_data[i].permitted = tswap32(data[i].permitted);
11010 target_data[i].inheritable = tswap32(data[i].inheritable);
11011 }
11012 unlock_user(target_data, arg2, target_datalen);
11013 } else {
11014 unlock_user(target_data, arg2, 0);
11015 }
11016 }
11017 break;
11018 }
11019 case TARGET_NR_sigaltstack:
11020 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
11021 break;
11022
11023 #ifdef CONFIG_SENDFILE
11024 case TARGET_NR_sendfile:
11025 {
11026 off_t *offp = NULL;
11027 off_t off;
11028 if (arg3) {
11029 ret = get_user_sal(off, arg3);
11030 if (is_error(ret)) {
11031 break;
11032 }
11033 offp = &off;
11034 }
11035 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11036 if (!is_error(ret) && arg3) {
11037 abi_long ret2 = put_user_sal(off, arg3);
11038 if (is_error(ret2)) {
11039 ret = ret2;
11040 }
11041 }
11042 break;
11043 }
11044 #ifdef TARGET_NR_sendfile64
11045 case TARGET_NR_sendfile64:
11046 {
11047 off_t *offp = NULL;
11048 off_t off;
11049 if (arg3) {
11050 ret = get_user_s64(off, arg3);
11051 if (is_error(ret)) {
11052 break;
11053 }
11054 offp = &off;
11055 }
11056 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11057 if (!is_error(ret) && arg3) {
11058 abi_long ret2 = put_user_s64(off, arg3);
11059 if (is_error(ret2)) {
11060 ret = ret2;
11061 }
11062 }
11063 break;
11064 }
11065 #endif
11066 #else
11067 case TARGET_NR_sendfile:
11068 #ifdef TARGET_NR_sendfile64
11069 case TARGET_NR_sendfile64:
11070 #endif
11071 goto unimplemented;
11072 #endif
11073
11074 #ifdef TARGET_NR_getpmsg
11075 case TARGET_NR_getpmsg:
11076 goto unimplemented;
11077 #endif
11078 #ifdef TARGET_NR_putpmsg
11079 case TARGET_NR_putpmsg:
11080 goto unimplemented;
11081 #endif
11082 #ifdef TARGET_NR_vfork
11083 case TARGET_NR_vfork:
11084 ret = get_errno(do_fork(cpu_env,
11085 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11086 0, 0, 0, 0));
11087 break;
11088 #endif
11089 #ifdef TARGET_NR_ugetrlimit
11090 case TARGET_NR_ugetrlimit:
11091 {
11092 struct rlimit rlim;
11093 int resource = target_to_host_resource(arg1);
11094 ret = get_errno(getrlimit(resource, &rlim));
11095 if (!is_error(ret)) {
11096 struct target_rlimit *target_rlim;
11097 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11098 goto efault;
11099 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11100 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11101 unlock_user_struct(target_rlim, arg2, 1);
11102 }
11103 break;
11104 }
11105 #endif
11106 #ifdef TARGET_NR_truncate64
11107 case TARGET_NR_truncate64:
11108 if (!(p = lock_user_string(arg1)))
11109 goto efault;
11110 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11111 unlock_user(p, arg1, 0);
11112 break;
11113 #endif
11114 #ifdef TARGET_NR_ftruncate64
11115 case TARGET_NR_ftruncate64:
11116 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11117 break;
11118 #endif
11119 #ifdef TARGET_NR_stat64
11120 case TARGET_NR_stat64:
11121 if (!(p = lock_user_string(arg1)))
11122 goto efault;
11123 ret = get_errno(stat(path(p), &st));
11124 unlock_user(p, arg1, 0);
11125 if (!is_error(ret))
11126 ret = host_to_target_stat64(cpu_env, arg2, &st);
11127 break;
11128 #endif
11129 #ifdef TARGET_NR_lstat64
11130 case TARGET_NR_lstat64:
11131 if (!(p = lock_user_string(arg1)))
11132 goto efault;
11133 ret = get_errno(lstat(path(p), &st));
11134 unlock_user(p, arg1, 0);
11135 if (!is_error(ret))
11136 ret = host_to_target_stat64(cpu_env, arg2, &st);
11137 break;
11138 #endif
11139 #ifdef TARGET_NR_fstat64
11140 case TARGET_NR_fstat64:
11141 ret = get_errno(fstat(arg1, &st));
11142 if (!is_error(ret))
11143 ret = host_to_target_stat64(cpu_env, arg2, &st);
11144 break;
11145 #endif
11146 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11147 #ifdef TARGET_NR_fstatat64
11148 case TARGET_NR_fstatat64:
11149 #endif
11150 #ifdef TARGET_NR_newfstatat
11151 case TARGET_NR_newfstatat:
11152 #endif
11153 if (!(p = lock_user_string(arg2)))
11154 goto efault;
11155 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11156 if (!is_error(ret))
11157 ret = host_to_target_stat64(cpu_env, arg3, &st);
11158 break;
11159 #endif
11160 #ifdef TARGET_NR_lchown
11161 case TARGET_NR_lchown:
11162 if (!(p = lock_user_string(arg1)))
11163 goto efault;
11164 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11165 unlock_user(p, arg1, 0);
11166 break;
11167 #endif
11168 #ifdef TARGET_NR_getuid
11169 case TARGET_NR_getuid:
11170 ret = get_errno(high2lowuid(getuid()));
11171 break;
11172 #endif
11173 #ifdef TARGET_NR_getgid
11174 case TARGET_NR_getgid:
11175 ret = get_errno(high2lowgid(getgid()));
11176 break;
11177 #endif
11178 #ifdef TARGET_NR_geteuid
11179 case TARGET_NR_geteuid:
11180 ret = get_errno(high2lowuid(geteuid()));
11181 break;
11182 #endif
11183 #ifdef TARGET_NR_getegid
11184 case TARGET_NR_getegid:
11185 ret = get_errno(high2lowgid(getegid()));
11186 break;
11187 #endif
11188 case TARGET_NR_setreuid:
11189 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11190 break;
11191 case TARGET_NR_setregid:
11192 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11193 break;
11194 case TARGET_NR_getgroups:
11195 {
11196 int gidsetsize = arg1;
11197 target_id *target_grouplist;
11198 gid_t *grouplist;
11199 int i;
11200
11201 grouplist = alloca(gidsetsize * sizeof(gid_t));
11202 ret = get_errno(getgroups(gidsetsize, grouplist));
11203 if (gidsetsize == 0)
11204 break;
11205 if (!is_error(ret)) {
11206 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11207 if (!target_grouplist)
11208 goto efault;
11209 for(i = 0;i < ret; i++)
11210 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11211 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11212 }
11213 }
11214 break;
11215 case TARGET_NR_setgroups:
11216 {
11217 int gidsetsize = arg1;
11218 target_id *target_grouplist;
11219 gid_t *grouplist = NULL;
11220 int i;
11221 if (gidsetsize) {
11222 grouplist = alloca(gidsetsize * sizeof(gid_t));
11223 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11224 if (!target_grouplist) {
11225 ret = -TARGET_EFAULT;
11226 goto fail;
11227 }
11228 for (i = 0; i < gidsetsize; i++) {
11229 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11230 }
11231 unlock_user(target_grouplist, arg2, 0);
11232 }
11233 ret = get_errno(setgroups(gidsetsize, grouplist));
11234 }
11235 break;
11236 case TARGET_NR_fchown:
11237 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11238 break;
11239 #if defined(TARGET_NR_fchownat)
11240 case TARGET_NR_fchownat:
11241 if (!(p = lock_user_string(arg2)))
11242 goto efault;
11243 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11244 low2highgid(arg4), arg5));
11245 unlock_user(p, arg2, 0);
11246 break;
11247 #endif
11248 #ifdef TARGET_NR_setresuid
11249 case TARGET_NR_setresuid:
11250 ret = get_errno(sys_setresuid(low2highuid(arg1),
11251 low2highuid(arg2),
11252 low2highuid(arg3)));
11253 break;
11254 #endif
11255 #ifdef TARGET_NR_getresuid
11256 case TARGET_NR_getresuid:
11257 {
11258 uid_t ruid, euid, suid;
11259 ret = get_errno(getresuid(&ruid, &euid, &suid));
11260 if (!is_error(ret)) {
11261 if (put_user_id(high2lowuid(ruid), arg1)
11262 || put_user_id(high2lowuid(euid), arg2)
11263 || put_user_id(high2lowuid(suid), arg3))
11264 goto efault;
11265 }
11266 }
11267 break;
11268 #endif
11269 #ifdef TARGET_NR_getresgid
11270 case TARGET_NR_setresgid:
11271 ret = get_errno(sys_setresgid(low2highgid(arg1),
11272 low2highgid(arg2),
11273 low2highgid(arg3)));
11274 break;
11275 #endif
11276 #ifdef TARGET_NR_getresgid
11277 case TARGET_NR_getresgid:
11278 {
11279 gid_t rgid, egid, sgid;
11280 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11281 if (!is_error(ret)) {
11282 if (put_user_id(high2lowgid(rgid), arg1)
11283 || put_user_id(high2lowgid(egid), arg2)
11284 || put_user_id(high2lowgid(sgid), arg3))
11285 goto efault;
11286 }
11287 }
11288 break;
11289 #endif
11290 #ifdef TARGET_NR_chown
11291 case TARGET_NR_chown:
11292 if (!(p = lock_user_string(arg1)))
11293 goto efault;
11294 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11295 unlock_user(p, arg1, 0);
11296 break;
11297 #endif
11298 case TARGET_NR_setuid:
11299 ret = get_errno(sys_setuid(low2highuid(arg1)));
11300 break;
11301 case TARGET_NR_setgid:
11302 ret = get_errno(sys_setgid(low2highgid(arg1)));
11303 break;
11304 case TARGET_NR_setfsuid:
11305 ret = get_errno(setfsuid(arg1));
11306 break;
11307 case TARGET_NR_setfsgid:
11308 ret = get_errno(setfsgid(arg1));
11309 break;
11310
11311 #ifdef TARGET_NR_lchown32
11312 case TARGET_NR_lchown32:
11313 if (!(p = lock_user_string(arg1)))
11314 goto efault;
11315 ret = get_errno(lchown(p, arg2, arg3));
11316 unlock_user(p, arg1, 0);
11317 break;
11318 #endif
11319 #ifdef TARGET_NR_getuid32
11320 case TARGET_NR_getuid32:
11321 ret = get_errno(getuid());
11322 break;
11323 #endif
11324
11325 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11326 /* Alpha specific */
11327 case TARGET_NR_getxuid:
11328 {
11329 uid_t euid;
11330 euid=geteuid();
11331 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11332 }
11333 ret = get_errno(getuid());
11334 break;
11335 #endif
11336 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11337 /* Alpha specific */
11338 case TARGET_NR_getxgid:
11339 {
11340 uid_t egid;
11341 egid=getegid();
11342 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11343 }
11344 ret = get_errno(getgid());
11345 break;
11346 #endif
11347 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11348 /* Alpha specific */
11349 case TARGET_NR_osf_getsysinfo:
11350 ret = -TARGET_EOPNOTSUPP;
11351 switch (arg1) {
11352 case TARGET_GSI_IEEE_FP_CONTROL:
11353 {
11354 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11355
11356 /* Copied from linux ieee_fpcr_to_swcr. */
11357 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11358 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11359 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11360 | SWCR_TRAP_ENABLE_DZE
11361 | SWCR_TRAP_ENABLE_OVF);
11362 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11363 | SWCR_TRAP_ENABLE_INE);
11364 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11365 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11366
11367 if (put_user_u64 (swcr, arg2))
11368 goto efault;
11369 ret = 0;
11370 }
11371 break;
11372
11373 /* case GSI_IEEE_STATE_AT_SIGNAL:
11374 -- Not implemented in linux kernel.
11375 case GSI_UACPROC:
11376 -- Retrieves current unaligned access state; not much used.
11377 case GSI_PROC_TYPE:
11378 -- Retrieves implver information; surely not used.
11379 case GSI_GET_HWRPB:
11380 -- Grabs a copy of the HWRPB; surely not used.
11381 */
11382 }
11383 break;
11384 #endif
11385 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11386 /* Alpha specific */
11387 case TARGET_NR_osf_setsysinfo:
11388 ret = -TARGET_EOPNOTSUPP;
11389 switch (arg1) {
11390 case TARGET_SSI_IEEE_FP_CONTROL:
11391 {
11392 uint64_t swcr, fpcr, orig_fpcr;
11393
11394 if (get_user_u64 (swcr, arg2)) {
11395 goto efault;
11396 }
11397 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11398 fpcr = orig_fpcr & FPCR_DYN_MASK;
11399
11400 /* Copied from linux ieee_swcr_to_fpcr. */
11401 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11402 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11403 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11404 | SWCR_TRAP_ENABLE_DZE
11405 | SWCR_TRAP_ENABLE_OVF)) << 48;
11406 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11407 | SWCR_TRAP_ENABLE_INE)) << 57;
11408 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11409 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11410
11411 cpu_alpha_store_fpcr(cpu_env, fpcr);
11412 ret = 0;
11413 }
11414 break;
11415
11416 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11417 {
11418 uint64_t exc, fpcr, orig_fpcr;
11419 int si_code;
11420
11421 if (get_user_u64(exc, arg2)) {
11422 goto efault;
11423 }
11424
11425 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11426
11427 /* We only add to the exception status here. */
11428 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11429
11430 cpu_alpha_store_fpcr(cpu_env, fpcr);
11431 ret = 0;
11432
11433 /* Old exceptions are not signaled. */
11434 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11435
11436 /* If any exceptions set by this call,
11437 and are unmasked, send a signal. */
11438 si_code = 0;
11439 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11440 si_code = TARGET_FPE_FLTRES;
11441 }
11442 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11443 si_code = TARGET_FPE_FLTUND;
11444 }
11445 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11446 si_code = TARGET_FPE_FLTOVF;
11447 }
11448 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11449 si_code = TARGET_FPE_FLTDIV;
11450 }
11451 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11452 si_code = TARGET_FPE_FLTINV;
11453 }
11454 if (si_code != 0) {
11455 target_siginfo_t info;
11456 info.si_signo = SIGFPE;
11457 info.si_errno = 0;
11458 info.si_code = si_code;
11459 info._sifields._sigfault._addr
11460 = ((CPUArchState *)cpu_env)->pc;
11461 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11462 QEMU_SI_FAULT, &info);
11463 }
11464 }
11465 break;
11466
11467 /* case SSI_NVPAIRS:
11468 -- Used with SSIN_UACPROC to enable unaligned accesses.
11469 case SSI_IEEE_STATE_AT_SIGNAL:
11470 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11471 -- Not implemented in linux kernel
11472 */
11473 }
11474 break;
11475 #endif
11476 #ifdef TARGET_NR_osf_sigprocmask
11477 /* Alpha specific. */
11478 case TARGET_NR_osf_sigprocmask:
11479 {
11480 abi_ulong mask;
11481 int how;
11482 sigset_t set, oldset;
11483
11484 switch(arg1) {
11485 case TARGET_SIG_BLOCK:
11486 how = SIG_BLOCK;
11487 break;
11488 case TARGET_SIG_UNBLOCK:
11489 how = SIG_UNBLOCK;
11490 break;
11491 case TARGET_SIG_SETMASK:
11492 how = SIG_SETMASK;
11493 break;
11494 default:
11495 ret = -TARGET_EINVAL;
11496 goto fail;
11497 }
11498 mask = arg2;
11499 target_to_host_old_sigset(&set, &mask);
11500 ret = do_sigprocmask(how, &set, &oldset);
11501 if (!ret) {
11502 host_to_target_old_sigset(&mask, &oldset);
11503 ret = mask;
11504 }
11505 }
11506 break;
11507 #endif
11508
11509 #ifdef TARGET_NR_getgid32
11510 case TARGET_NR_getgid32:
11511 ret = get_errno(getgid());
11512 break;
11513 #endif
11514 #ifdef TARGET_NR_geteuid32
11515 case TARGET_NR_geteuid32:
11516 ret = get_errno(geteuid());
11517 break;
11518 #endif
11519 #ifdef TARGET_NR_getegid32
11520 case TARGET_NR_getegid32:
11521 ret = get_errno(getegid());
11522 break;
11523 #endif
11524 #ifdef TARGET_NR_setreuid32
11525 case TARGET_NR_setreuid32:
11526 ret = get_errno(setreuid(arg1, arg2));
11527 break;
11528 #endif
11529 #ifdef TARGET_NR_setregid32
11530 case TARGET_NR_setregid32:
11531 ret = get_errno(setregid(arg1, arg2));
11532 break;
11533 #endif
11534 #ifdef TARGET_NR_getgroups32
11535 case TARGET_NR_getgroups32:
11536 {
11537 int gidsetsize = arg1;
11538 uint32_t *target_grouplist;
11539 gid_t *grouplist;
11540 int i;
11541
11542 grouplist = alloca(gidsetsize * sizeof(gid_t));
11543 ret = get_errno(getgroups(gidsetsize, grouplist));
11544 if (gidsetsize == 0)
11545 break;
11546 if (!is_error(ret)) {
11547 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11548 if (!target_grouplist) {
11549 ret = -TARGET_EFAULT;
11550 goto fail;
11551 }
11552 for(i = 0;i < ret; i++)
11553 target_grouplist[i] = tswap32(grouplist[i]);
11554 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11555 }
11556 }
11557 break;
11558 #endif
11559 #ifdef TARGET_NR_setgroups32
11560 case TARGET_NR_setgroups32:
11561 {
11562 int gidsetsize = arg1;
11563 uint32_t *target_grouplist;
11564 gid_t *grouplist;
11565 int i;
11566
11567 grouplist = alloca(gidsetsize * sizeof(gid_t));
11568 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11569 if (!target_grouplist) {
11570 ret = -TARGET_EFAULT;
11571 goto fail;
11572 }
11573 for(i = 0;i < gidsetsize; i++)
11574 grouplist[i] = tswap32(target_grouplist[i]);
11575 unlock_user(target_grouplist, arg2, 0);
11576 ret = get_errno(setgroups(gidsetsize, grouplist));
11577 }
11578 break;
11579 #endif
11580 #ifdef TARGET_NR_fchown32
11581 case TARGET_NR_fchown32:
11582 ret = get_errno(fchown(arg1, arg2, arg3));
11583 break;
11584 #endif
11585 #ifdef TARGET_NR_setresuid32
11586 case TARGET_NR_setresuid32:
11587 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11588 break;
11589 #endif
11590 #ifdef TARGET_NR_getresuid32
11591 case TARGET_NR_getresuid32:
11592 {
11593 uid_t ruid, euid, suid;
11594 ret = get_errno(getresuid(&ruid, &euid, &suid));
11595 if (!is_error(ret)) {
11596 if (put_user_u32(ruid, arg1)
11597 || put_user_u32(euid, arg2)
11598 || put_user_u32(suid, arg3))
11599 goto efault;
11600 }
11601 }
11602 break;
11603 #endif
11604 #ifdef TARGET_NR_setresgid32
11605 case TARGET_NR_setresgid32:
11606 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11607 break;
11608 #endif
11609 #ifdef TARGET_NR_getresgid32
11610 case TARGET_NR_getresgid32:
11611 {
11612 gid_t rgid, egid, sgid;
11613 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11614 if (!is_error(ret)) {
11615 if (put_user_u32(rgid, arg1)
11616 || put_user_u32(egid, arg2)
11617 || put_user_u32(sgid, arg3))
11618 goto efault;
11619 }
11620 }
11621 break;
11622 #endif
11623 #ifdef TARGET_NR_chown32
11624 case TARGET_NR_chown32:
11625 if (!(p = lock_user_string(arg1)))
11626 goto efault;
11627 ret = get_errno(chown(p, arg2, arg3));
11628 unlock_user(p, arg1, 0);
11629 break;
11630 #endif
11631 #ifdef TARGET_NR_setuid32
11632 case TARGET_NR_setuid32:
11633 ret = get_errno(sys_setuid(arg1));
11634 break;
11635 #endif
11636 #ifdef TARGET_NR_setgid32
11637 case TARGET_NR_setgid32:
11638 ret = get_errno(sys_setgid(arg1));
11639 break;
11640 #endif
11641 #ifdef TARGET_NR_setfsuid32
11642 case TARGET_NR_setfsuid32:
11643 ret = get_errno(setfsuid(arg1));
11644 break;
11645 #endif
11646 #ifdef TARGET_NR_setfsgid32
11647 case TARGET_NR_setfsgid32:
11648 ret = get_errno(setfsgid(arg1));
11649 break;
11650 #endif
11651
11652 case TARGET_NR_pivot_root:
11653 goto unimplemented;
11654 #ifdef TARGET_NR_mincore
11655 case TARGET_NR_mincore:
11656 {
11657 void *a;
11658 ret = -TARGET_ENOMEM;
11659 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11660 if (!a) {
11661 goto fail;
11662 }
11663 ret = -TARGET_EFAULT;
11664 p = lock_user_string(arg3);
11665 if (!p) {
11666 goto mincore_fail;
11667 }
11668 ret = get_errno(mincore(a, arg2, p));
11669 unlock_user(p, arg3, ret);
11670 mincore_fail:
11671 unlock_user(a, arg1, 0);
11672 }
11673 break;
11674 #endif
11675 #ifdef TARGET_NR_arm_fadvise64_64
11676 case TARGET_NR_arm_fadvise64_64:
11677 /* arm_fadvise64_64 looks like fadvise64_64 but
11678 * with different argument order: fd, advice, offset, len
11679 * rather than the usual fd, offset, len, advice.
11680 * Note that offset and len are both 64-bit so appear as
11681 * pairs of 32-bit registers.
11682 */
11683 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11684 target_offset64(arg5, arg6), arg2);
11685 ret = -host_to_target_errno(ret);
11686 break;
11687 #endif
11688
11689 #if TARGET_ABI_BITS == 32
11690
11691 #ifdef TARGET_NR_fadvise64_64
11692 case TARGET_NR_fadvise64_64:
11693 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11694 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11695 ret = arg2;
11696 arg2 = arg3;
11697 arg3 = arg4;
11698 arg4 = arg5;
11699 arg5 = arg6;
11700 arg6 = ret;
11701 #else
11702 /* 6 args: fd, offset (high, low), len (high, low), advice */
11703 if (regpairs_aligned(cpu_env, num)) {
11704 /* offset is in (3,4), len in (5,6) and advice in 7 */
11705 arg2 = arg3;
11706 arg3 = arg4;
11707 arg4 = arg5;
11708 arg5 = arg6;
11709 arg6 = arg7;
11710 }
11711 #endif
11712 ret = -host_to_target_errno(posix_fadvise(arg1,
11713 target_offset64(arg2, arg3),
11714 target_offset64(arg4, arg5),
11715 arg6));
11716 break;
11717 #endif
11718
11719 #ifdef TARGET_NR_fadvise64
11720 case TARGET_NR_fadvise64:
11721 /* 5 args: fd, offset (high, low), len, advice */
11722 if (regpairs_aligned(cpu_env, num)) {
11723 /* offset is in (3,4), len in 5 and advice in 6 */
11724 arg2 = arg3;
11725 arg3 = arg4;
11726 arg4 = arg5;
11727 arg5 = arg6;
11728 }
11729 ret = -host_to_target_errno(posix_fadvise(arg1,
11730 target_offset64(arg2, arg3),
11731 arg4, arg5));
11732 break;
11733 #endif
11734
11735 #else /* not a 32-bit ABI */
11736 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11737 #ifdef TARGET_NR_fadvise64_64
11738 case TARGET_NR_fadvise64_64:
11739 #endif
11740 #ifdef TARGET_NR_fadvise64
11741 case TARGET_NR_fadvise64:
11742 #endif
11743 #ifdef TARGET_S390X
11744 switch (arg4) {
11745 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11746 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11747 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11748 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11749 default: break;
11750 }
11751 #endif
11752 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11753 break;
11754 #endif
11755 #endif /* end of 64-bit ABI fadvise handling */
11756
11757 #ifdef TARGET_NR_madvise
11758 case TARGET_NR_madvise:
11759 /* A straight passthrough may not be safe because qemu sometimes
11760 turns private file-backed mappings into anonymous mappings.
11761 This will break MADV_DONTNEED.
11762 This is a hint, so ignoring and returning success is ok. */
11763 ret = get_errno(0);
11764 break;
11765 #endif
11766 #if TARGET_ABI_BITS == 32
11767 case TARGET_NR_fcntl64:
11768 {
11769 int cmd;
11770 struct flock64 fl;
11771 from_flock64_fn *copyfrom = copy_from_user_flock64;
11772 to_flock64_fn *copyto = copy_to_user_flock64;
11773
11774 #ifdef TARGET_ARM
11775 if (!((CPUARMState *)cpu_env)->eabi) {
11776 copyfrom = copy_from_user_oabi_flock64;
11777 copyto = copy_to_user_oabi_flock64;
11778 }
11779 #endif
11780
11781 cmd = target_to_host_fcntl_cmd(arg2);
11782 if (cmd == -TARGET_EINVAL) {
11783 ret = cmd;
11784 break;
11785 }
11786
11787 switch(arg2) {
11788 case TARGET_F_GETLK64:
11789 ret = copyfrom(&fl, arg3);
11790 if (ret) {
11791 break;
11792 }
11793 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11794 if (ret == 0) {
11795 ret = copyto(arg3, &fl);
11796 }
11797 break;
11798
11799 case TARGET_F_SETLK64:
11800 case TARGET_F_SETLKW64:
11801 ret = copyfrom(&fl, arg3);
11802 if (ret) {
11803 break;
11804 }
11805 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11806 break;
11807 default:
11808 ret = do_fcntl(arg1, arg2, arg3);
11809 break;
11810 }
11811 break;
11812 }
11813 #endif
11814 #ifdef TARGET_NR_cacheflush
11815 case TARGET_NR_cacheflush:
11816 /* self-modifying code is handled automatically, so nothing needed */
11817 ret = 0;
11818 break;
11819 #endif
11820 #ifdef TARGET_NR_security
11821 case TARGET_NR_security:
11822 goto unimplemented;
11823 #endif
11824 #ifdef TARGET_NR_getpagesize
11825 case TARGET_NR_getpagesize:
11826 ret = TARGET_PAGE_SIZE;
11827 break;
11828 #endif
11829 case TARGET_NR_gettid:
11830 ret = get_errno(gettid());
11831 break;
11832 #ifdef TARGET_NR_readahead
11833 case TARGET_NR_readahead:
11834 #if TARGET_ABI_BITS == 32
11835 if (regpairs_aligned(cpu_env, num)) {
11836 arg2 = arg3;
11837 arg3 = arg4;
11838 arg4 = arg5;
11839 }
11840 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11841 #else
11842 ret = get_errno(readahead(arg1, arg2, arg3));
11843 #endif
11844 break;
11845 #endif
11846 #ifdef CONFIG_ATTR
11847 #ifdef TARGET_NR_setxattr
11848 case TARGET_NR_listxattr:
11849 case TARGET_NR_llistxattr:
11850 {
11851 void *p, *b = 0;
11852 if (arg2) {
11853 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11854 if (!b) {
11855 ret = -TARGET_EFAULT;
11856 break;
11857 }
11858 }
11859 p = lock_user_string(arg1);
11860 if (p) {
11861 if (num == TARGET_NR_listxattr) {
11862 ret = get_errno(listxattr(p, b, arg3));
11863 } else {
11864 ret = get_errno(llistxattr(p, b, arg3));
11865 }
11866 } else {
11867 ret = -TARGET_EFAULT;
11868 }
11869 unlock_user(p, arg1, 0);
11870 unlock_user(b, arg2, arg3);
11871 break;
11872 }
11873 case TARGET_NR_flistxattr:
11874 {
11875 void *b = 0;
11876 if (arg2) {
11877 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11878 if (!b) {
11879 ret = -TARGET_EFAULT;
11880 break;
11881 }
11882 }
11883 ret = get_errno(flistxattr(arg1, b, arg3));
11884 unlock_user(b, arg2, arg3);
11885 break;
11886 }
11887 case TARGET_NR_setxattr:
11888 case TARGET_NR_lsetxattr:
11889 {
11890 void *p, *n, *v = 0;
11891 if (arg3) {
11892 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11893 if (!v) {
11894 ret = -TARGET_EFAULT;
11895 break;
11896 }
11897 }
11898 p = lock_user_string(arg1);
11899 n = lock_user_string(arg2);
11900 if (p && n) {
11901 if (num == TARGET_NR_setxattr) {
11902 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11903 } else {
11904 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11905 }
11906 } else {
11907 ret = -TARGET_EFAULT;
11908 }
11909 unlock_user(p, arg1, 0);
11910 unlock_user(n, arg2, 0);
11911 unlock_user(v, arg3, 0);
11912 }
11913 break;
11914 case TARGET_NR_fsetxattr:
11915 {
11916 void *n, *v = 0;
11917 if (arg3) {
11918 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11919 if (!v) {
11920 ret = -TARGET_EFAULT;
11921 break;
11922 }
11923 }
11924 n = lock_user_string(arg2);
11925 if (n) {
11926 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11927 } else {
11928 ret = -TARGET_EFAULT;
11929 }
11930 unlock_user(n, arg2, 0);
11931 unlock_user(v, arg3, 0);
11932 }
11933 break;
11934 case TARGET_NR_getxattr:
11935 case TARGET_NR_lgetxattr:
11936 {
11937 void *p, *n, *v = 0;
11938 if (arg3) {
11939 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11940 if (!v) {
11941 ret = -TARGET_EFAULT;
11942 break;
11943 }
11944 }
11945 p = lock_user_string(arg1);
11946 n = lock_user_string(arg2);
11947 if (p && n) {
11948 if (num == TARGET_NR_getxattr) {
11949 ret = get_errno(getxattr(p, n, v, arg4));
11950 } else {
11951 ret = get_errno(lgetxattr(p, n, v, arg4));
11952 }
11953 } else {
11954 ret = -TARGET_EFAULT;
11955 }
11956 unlock_user(p, arg1, 0);
11957 unlock_user(n, arg2, 0);
11958 unlock_user(v, arg3, arg4);
11959 }
11960 break;
11961 case TARGET_NR_fgetxattr:
11962 {
11963 void *n, *v = 0;
11964 if (arg3) {
11965 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11966 if (!v) {
11967 ret = -TARGET_EFAULT;
11968 break;
11969 }
11970 }
11971 n = lock_user_string(arg2);
11972 if (n) {
11973 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11974 } else {
11975 ret = -TARGET_EFAULT;
11976 }
11977 unlock_user(n, arg2, 0);
11978 unlock_user(v, arg3, arg4);
11979 }
11980 break;
11981 case TARGET_NR_removexattr:
11982 case TARGET_NR_lremovexattr:
11983 {
11984 void *p, *n;
11985 p = lock_user_string(arg1);
11986 n = lock_user_string(arg2);
11987 if (p && n) {
11988 if (num == TARGET_NR_removexattr) {
11989 ret = get_errno(removexattr(p, n));
11990 } else {
11991 ret = get_errno(lremovexattr(p, n));
11992 }
11993 } else {
11994 ret = -TARGET_EFAULT;
11995 }
11996 unlock_user(p, arg1, 0);
11997 unlock_user(n, arg2, 0);
11998 }
11999 break;
12000 case TARGET_NR_fremovexattr:
12001 {
12002 void *n;
12003 n = lock_user_string(arg2);
12004 if (n) {
12005 ret = get_errno(fremovexattr(arg1, n));
12006 } else {
12007 ret = -TARGET_EFAULT;
12008 }
12009 unlock_user(n, arg2, 0);
12010 }
12011 break;
12012 #endif
12013 #endif /* CONFIG_ATTR */
12014 #ifdef TARGET_NR_set_thread_area
12015 case TARGET_NR_set_thread_area:
12016 #if defined(TARGET_MIPS)
12017 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12018 ret = 0;
12019 break;
12020 #elif defined(TARGET_CRIS)
12021 if (arg1 & 0xff)
12022 ret = -TARGET_EINVAL;
12023 else {
12024 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12025 ret = 0;
12026 }
12027 break;
12028 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12029 ret = do_set_thread_area(cpu_env, arg1);
12030 break;
12031 #elif defined(TARGET_M68K)
12032 {
12033 TaskState *ts = cpu->opaque;
12034 ts->tp_value = arg1;
12035 ret = 0;
12036 break;
12037 }
12038 #else
12039 goto unimplemented_nowarn;
12040 #endif
12041 #endif
12042 #ifdef TARGET_NR_get_thread_area
12043 case TARGET_NR_get_thread_area:
12044 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12045 ret = do_get_thread_area(cpu_env, arg1);
12046 break;
12047 #elif defined(TARGET_M68K)
12048 {
12049 TaskState *ts = cpu->opaque;
12050 ret = ts->tp_value;
12051 break;
12052 }
12053 #else
12054 goto unimplemented_nowarn;
12055 #endif
12056 #endif
12057 #ifdef TARGET_NR_getdomainname
12058 case TARGET_NR_getdomainname:
12059 goto unimplemented_nowarn;
12060 #endif
12061
12062 #ifdef TARGET_NR_clock_settime
12063 case TARGET_NR_clock_settime:
12064 {
12065 struct timespec ts;
12066
12067 ret = target_to_host_timespec(&ts, arg2);
12068 if (!is_error(ret)) {
12069 ret = get_errno(clock_settime(arg1, &ts));
12070 }
12071 break;
12072 }
12073 #endif
12074 #ifdef TARGET_NR_clock_gettime
12075 case TARGET_NR_clock_gettime:
12076 {
12077 struct timespec ts;
12078 ret = get_errno(clock_gettime(arg1, &ts));
12079 if (!is_error(ret)) {
12080 ret = host_to_target_timespec(arg2, &ts);
12081 }
12082 break;
12083 }
12084 #endif
12085 #ifdef TARGET_NR_clock_getres
12086 case TARGET_NR_clock_getres:
12087 {
12088 struct timespec ts;
12089 ret = get_errno(clock_getres(arg1, &ts));
12090 if (!is_error(ret)) {
12091 host_to_target_timespec(arg2, &ts);
12092 }
12093 break;
12094 }
12095 #endif
12096 #ifdef TARGET_NR_clock_nanosleep
12097 case TARGET_NR_clock_nanosleep:
12098 {
12099 struct timespec ts;
12100 target_to_host_timespec(&ts, arg3);
12101 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12102 &ts, arg4 ? &ts : NULL));
12103 if (arg4)
12104 host_to_target_timespec(arg4, &ts);
12105
12106 #if defined(TARGET_PPC)
12107 /* clock_nanosleep is odd in that it returns positive errno values.
12108 * On PPC, CR0 bit 3 should be set in such a situation. */
12109 if (ret && ret != -TARGET_ERESTARTSYS) {
12110 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12111 }
12112 #endif
12113 break;
12114 }
12115 #endif
12116
12117 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12118 case TARGET_NR_set_tid_address:
12119 ret = get_errno(set_tid_address((int *)g2h(arg1)));
12120 break;
12121 #endif
12122
12123 case TARGET_NR_tkill:
12124 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12125 break;
12126
12127 case TARGET_NR_tgkill:
12128 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12129 target_to_host_signal(arg3)));
12130 break;
12131
12132 #ifdef TARGET_NR_set_robust_list
12133 case TARGET_NR_set_robust_list:
12134 case TARGET_NR_get_robust_list:
12135 /* The ABI for supporting robust futexes has userspace pass
12136 * the kernel a pointer to a linked list which is updated by
12137 * userspace after the syscall; the list is walked by the kernel
12138 * when the thread exits. Since the linked list in QEMU guest
12139 * memory isn't a valid linked list for the host and we have
12140 * no way to reliably intercept the thread-death event, we can't
12141 * support these. Silently return ENOSYS so that guest userspace
12142 * falls back to a non-robust futex implementation (which should
12143 * be OK except in the corner case of the guest crashing while
12144 * holding a mutex that is shared with another process via
12145 * shared memory).
12146 */
12147 goto unimplemented_nowarn;
12148 #endif
12149
12150 #if defined(TARGET_NR_utimensat)
12151 case TARGET_NR_utimensat:
12152 {
12153 struct timespec *tsp, ts[2];
12154 if (!arg3) {
12155 tsp = NULL;
12156 } else {
12157 target_to_host_timespec(ts, arg3);
12158 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12159 tsp = ts;
12160 }
12161 if (!arg2)
12162 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12163 else {
12164 if (!(p = lock_user_string(arg2))) {
12165 ret = -TARGET_EFAULT;
12166 goto fail;
12167 }
12168 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12169 unlock_user(p, arg2, 0);
12170 }
12171 }
12172 break;
12173 #endif
12174 case TARGET_NR_futex:
12175 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12176 break;
12177 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12178 case TARGET_NR_inotify_init:
12179 ret = get_errno(sys_inotify_init());
12180 if (ret >= 0) {
12181 fd_trans_register(ret, &target_inotify_trans);
12182 }
12183 break;
12184 #endif
12185 #ifdef CONFIG_INOTIFY1
12186 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12187 case TARGET_NR_inotify_init1:
12188 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12189 fcntl_flags_tbl)));
12190 if (ret >= 0) {
12191 fd_trans_register(ret, &target_inotify_trans);
12192 }
12193 break;
12194 #endif
12195 #endif
12196 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12197 case TARGET_NR_inotify_add_watch:
12198 p = lock_user_string(arg2);
12199 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12200 unlock_user(p, arg2, 0);
12201 break;
12202 #endif
12203 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12204 case TARGET_NR_inotify_rm_watch:
12205 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12206 break;
12207 #endif
12208
12209 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12210 case TARGET_NR_mq_open:
12211 {
12212 struct mq_attr posix_mq_attr;
12213 struct mq_attr *pposix_mq_attr;
12214 int host_flags;
12215
12216 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12217 pposix_mq_attr = NULL;
12218 if (arg4) {
12219 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12220 goto efault;
12221 }
12222 pposix_mq_attr = &posix_mq_attr;
12223 }
12224 p = lock_user_string(arg1 - 1);
12225 if (!p) {
12226 goto efault;
12227 }
12228 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12229 unlock_user (p, arg1, 0);
12230 }
12231 break;
12232
12233 case TARGET_NR_mq_unlink:
12234 p = lock_user_string(arg1 - 1);
12235 if (!p) {
12236 ret = -TARGET_EFAULT;
12237 break;
12238 }
12239 ret = get_errno(mq_unlink(p));
12240 unlock_user (p, arg1, 0);
12241 break;
12242
12243 case TARGET_NR_mq_timedsend:
12244 {
12245 struct timespec ts;
12246
12247 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12248 if (arg5 != 0) {
12249 target_to_host_timespec(&ts, arg5);
12250 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12251 host_to_target_timespec(arg5, &ts);
12252 } else {
12253 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12254 }
12255 unlock_user (p, arg2, arg3);
12256 }
12257 break;
12258
12259 case TARGET_NR_mq_timedreceive:
12260 {
12261 struct timespec ts;
12262 unsigned int prio;
12263
12264 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12265 if (arg5 != 0) {
12266 target_to_host_timespec(&ts, arg5);
12267 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12268 &prio, &ts));
12269 host_to_target_timespec(arg5, &ts);
12270 } else {
12271 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12272 &prio, NULL));
12273 }
12274 unlock_user (p, arg2, arg3);
12275 if (arg4 != 0)
12276 put_user_u32(prio, arg4);
12277 }
12278 break;
12279
12280 /* Not implemented for now... */
12281 /* case TARGET_NR_mq_notify: */
12282 /* break; */
12283
12284 case TARGET_NR_mq_getsetattr:
12285 {
12286 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12287 ret = 0;
12288 if (arg2 != 0) {
12289 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12290 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12291 &posix_mq_attr_out));
12292 } else if (arg3 != 0) {
12293 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12294 }
12295 if (ret == 0 && arg3 != 0) {
12296 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12297 }
12298 }
12299 break;
12300 #endif
12301
12302 #ifdef CONFIG_SPLICE
12303 #ifdef TARGET_NR_tee
12304 case TARGET_NR_tee:
12305 {
12306 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12307 }
12308 break;
12309 #endif
12310 #ifdef TARGET_NR_splice
12311 case TARGET_NR_splice:
12312 {
12313 loff_t loff_in, loff_out;
12314 loff_t *ploff_in = NULL, *ploff_out = NULL;
12315 if (arg2) {
12316 if (get_user_u64(loff_in, arg2)) {
12317 goto efault;
12318 }
12319 ploff_in = &loff_in;
12320 }
12321 if (arg4) {
12322 if (get_user_u64(loff_out, arg4)) {
12323 goto efault;
12324 }
12325 ploff_out = &loff_out;
12326 }
12327 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12328 if (arg2) {
12329 if (put_user_u64(loff_in, arg2)) {
12330 goto efault;
12331 }
12332 }
12333 if (arg4) {
12334 if (put_user_u64(loff_out, arg4)) {
12335 goto efault;
12336 }
12337 }
12338 }
12339 break;
12340 #endif
12341 #ifdef TARGET_NR_vmsplice
12342 case TARGET_NR_vmsplice:
12343 {
12344 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12345 if (vec != NULL) {
12346 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12347 unlock_iovec(vec, arg2, arg3, 0);
12348 } else {
12349 ret = -host_to_target_errno(errno);
12350 }
12351 }
12352 break;
12353 #endif
12354 #endif /* CONFIG_SPLICE */
12355 #ifdef CONFIG_EVENTFD
12356 #if defined(TARGET_NR_eventfd)
12357 case TARGET_NR_eventfd:
12358 ret = get_errno(eventfd(arg1, 0));
12359 if (ret >= 0) {
12360 fd_trans_register(ret, &target_eventfd_trans);
12361 }
12362 break;
12363 #endif
12364 #if defined(TARGET_NR_eventfd2)
12365 case TARGET_NR_eventfd2:
12366 {
12367 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12368 if (arg2 & TARGET_O_NONBLOCK) {
12369 host_flags |= O_NONBLOCK;
12370 }
12371 if (arg2 & TARGET_O_CLOEXEC) {
12372 host_flags |= O_CLOEXEC;
12373 }
12374 ret = get_errno(eventfd(arg1, host_flags));
12375 if (ret >= 0) {
12376 fd_trans_register(ret, &target_eventfd_trans);
12377 }
12378 break;
12379 }
12380 #endif
12381 #endif /* CONFIG_EVENTFD */
12382 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12383 case TARGET_NR_fallocate:
12384 #if TARGET_ABI_BITS == 32
12385 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12386 target_offset64(arg5, arg6)));
12387 #else
12388 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12389 #endif
12390 break;
12391 #endif
12392 #if defined(CONFIG_SYNC_FILE_RANGE)
12393 #if defined(TARGET_NR_sync_file_range)
12394 case TARGET_NR_sync_file_range:
12395 #if TARGET_ABI_BITS == 32
12396 #if defined(TARGET_MIPS)
12397 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12398 target_offset64(arg5, arg6), arg7));
12399 #else
12400 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12401 target_offset64(arg4, arg5), arg6));
12402 #endif /* !TARGET_MIPS */
12403 #else
12404 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12405 #endif
12406 break;
12407 #endif
12408 #if defined(TARGET_NR_sync_file_range2)
12409 case TARGET_NR_sync_file_range2:
12410 /* This is like sync_file_range but the arguments are reordered */
12411 #if TARGET_ABI_BITS == 32
12412 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12413 target_offset64(arg5, arg6), arg2));
12414 #else
12415 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12416 #endif
12417 break;
12418 #endif
12419 #endif
12420 #if defined(TARGET_NR_signalfd4)
12421 case TARGET_NR_signalfd4:
12422 ret = do_signalfd4(arg1, arg2, arg4);
12423 break;
12424 #endif
12425 #if defined(TARGET_NR_signalfd)
12426 case TARGET_NR_signalfd:
12427 ret = do_signalfd4(arg1, arg2, 0);
12428 break;
12429 #endif
12430 #if defined(CONFIG_EPOLL)
12431 #if defined(TARGET_NR_epoll_create)
12432 case TARGET_NR_epoll_create:
12433 ret = get_errno(epoll_create(arg1));
12434 break;
12435 #endif
12436 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12437 case TARGET_NR_epoll_create1:
12438 ret = get_errno(epoll_create1(arg1));
12439 break;
12440 #endif
12441 #if defined(TARGET_NR_epoll_ctl)
12442 case TARGET_NR_epoll_ctl:
12443 {
12444 struct epoll_event ep;
12445 struct epoll_event *epp = 0;
12446 if (arg4) {
12447 struct target_epoll_event *target_ep;
12448 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12449 goto efault;
12450 }
12451 ep.events = tswap32(target_ep->events);
12452 /* The epoll_data_t union is just opaque data to the kernel,
12453 * so we transfer all 64 bits across and need not worry what
12454 * actual data type it is.
12455 */
12456 ep.data.u64 = tswap64(target_ep->data.u64);
12457 unlock_user_struct(target_ep, arg4, 0);
12458 epp = &ep;
12459 }
12460 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12461 break;
12462 }
12463 #endif
12464
12465 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12466 #if defined(TARGET_NR_epoll_wait)
12467 case TARGET_NR_epoll_wait:
12468 #endif
12469 #if defined(TARGET_NR_epoll_pwait)
12470 case TARGET_NR_epoll_pwait:
12471 #endif
12472 {
12473 struct target_epoll_event *target_ep;
12474 struct epoll_event *ep;
12475 int epfd = arg1;
12476 int maxevents = arg3;
12477 int timeout = arg4;
12478
12479 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12480 ret = -TARGET_EINVAL;
12481 break;
12482 }
12483
12484 target_ep = lock_user(VERIFY_WRITE, arg2,
12485 maxevents * sizeof(struct target_epoll_event), 1);
12486 if (!target_ep) {
12487 goto efault;
12488 }
12489
12490 ep = g_try_new(struct epoll_event, maxevents);
12491 if (!ep) {
12492 unlock_user(target_ep, arg2, 0);
12493 ret = -TARGET_ENOMEM;
12494 break;
12495 }
12496
12497 switch (num) {
12498 #if defined(TARGET_NR_epoll_pwait)
12499 case TARGET_NR_epoll_pwait:
12500 {
12501 target_sigset_t *target_set;
12502 sigset_t _set, *set = &_set;
12503
12504 if (arg5) {
12505 if (arg6 != sizeof(target_sigset_t)) {
12506 ret = -TARGET_EINVAL;
12507 break;
12508 }
12509
12510 target_set = lock_user(VERIFY_READ, arg5,
12511 sizeof(target_sigset_t), 1);
12512 if (!target_set) {
12513 ret = -TARGET_EFAULT;
12514 break;
12515 }
12516 target_to_host_sigset(set, target_set);
12517 unlock_user(target_set, arg5, 0);
12518 } else {
12519 set = NULL;
12520 }
12521
12522 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12523 set, SIGSET_T_SIZE));
12524 break;
12525 }
12526 #endif
12527 #if defined(TARGET_NR_epoll_wait)
12528 case TARGET_NR_epoll_wait:
12529 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12530 NULL, 0));
12531 break;
12532 #endif
12533 default:
12534 ret = -TARGET_ENOSYS;
12535 }
12536 if (!is_error(ret)) {
12537 int i;
12538 for (i = 0; i < ret; i++) {
12539 target_ep[i].events = tswap32(ep[i].events);
12540 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12541 }
12542 unlock_user(target_ep, arg2,
12543 ret * sizeof(struct target_epoll_event));
12544 } else {
12545 unlock_user(target_ep, arg2, 0);
12546 }
12547 g_free(ep);
12548 break;
12549 }
12550 #endif
12551 #endif
12552 #ifdef TARGET_NR_prlimit64
12553 case TARGET_NR_prlimit64:
12554 {
12555 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12556 struct target_rlimit64 *target_rnew, *target_rold;
12557 struct host_rlimit64 rnew, rold, *rnewp = 0;
12558 int resource = target_to_host_resource(arg2);
12559 if (arg3) {
12560 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12561 goto efault;
12562 }
12563 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12564 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12565 unlock_user_struct(target_rnew, arg3, 0);
12566 rnewp = &rnew;
12567 }
12568
12569 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12570 if (!is_error(ret) && arg4) {
12571 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12572 goto efault;
12573 }
12574 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12575 target_rold->rlim_max = tswap64(rold.rlim_max);
12576 unlock_user_struct(target_rold, arg4, 1);
12577 }
12578 break;
12579 }
12580 #endif
12581 #ifdef TARGET_NR_gethostname
12582 case TARGET_NR_gethostname:
12583 {
12584 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12585 if (name) {
12586 ret = get_errno(gethostname(name, arg2));
12587 unlock_user(name, arg1, arg2);
12588 } else {
12589 ret = -TARGET_EFAULT;
12590 }
12591 break;
12592 }
12593 #endif
12594 #ifdef TARGET_NR_atomic_cmpxchg_32
12595 case TARGET_NR_atomic_cmpxchg_32:
12596 {
12597 /* should use start_exclusive from main.c */
12598 abi_ulong mem_value;
12599 if (get_user_u32(mem_value, arg6)) {
12600 target_siginfo_t info;
12601 info.si_signo = SIGSEGV;
12602 info.si_errno = 0;
12603 info.si_code = TARGET_SEGV_MAPERR;
12604 info._sifields._sigfault._addr = arg6;
12605 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12606 QEMU_SI_FAULT, &info);
12607 ret = 0xdeadbeef;
12608
12609 }
12610 if (mem_value == arg2)
12611 put_user_u32(arg1, arg6);
12612 ret = mem_value;
12613 break;
12614 }
12615 #endif
12616 #ifdef TARGET_NR_atomic_barrier
12617 case TARGET_NR_atomic_barrier:
12618 {
12619 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12620 ret = 0;
12621 break;
12622 }
12623 #endif
12624
12625 #ifdef TARGET_NR_timer_create
12626 case TARGET_NR_timer_create:
12627 {
12628 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12629
12630 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12631
12632 int clkid = arg1;
12633 int timer_index = next_free_host_timer();
12634
12635 if (timer_index < 0) {
12636 ret = -TARGET_EAGAIN;
12637 } else {
12638 timer_t *phtimer = g_posix_timers + timer_index;
12639
12640 if (arg2) {
12641 phost_sevp = &host_sevp;
12642 ret = target_to_host_sigevent(phost_sevp, arg2);
12643 if (ret != 0) {
12644 break;
12645 }
12646 }
12647
12648 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12649 if (ret) {
12650 phtimer = NULL;
12651 } else {
12652 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12653 goto efault;
12654 }
12655 }
12656 }
12657 break;
12658 }
12659 #endif
12660
12661 #ifdef TARGET_NR_timer_settime
12662 case TARGET_NR_timer_settime:
12663 {
12664 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12665 * struct itimerspec * old_value */
12666 target_timer_t timerid = get_timer_id(arg1);
12667
12668 if (timerid < 0) {
12669 ret = timerid;
12670 } else if (arg3 == 0) {
12671 ret = -TARGET_EINVAL;
12672 } else {
12673 timer_t htimer = g_posix_timers[timerid];
12674 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12675
12676 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12677 goto efault;
12678 }
12679 ret = get_errno(
12680 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12681 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12682 goto efault;
12683 }
12684 }
12685 break;
12686 }
12687 #endif
12688
12689 #ifdef TARGET_NR_timer_gettime
12690 case TARGET_NR_timer_gettime:
12691 {
12692 /* args: timer_t timerid, struct itimerspec *curr_value */
12693 target_timer_t timerid = get_timer_id(arg1);
12694
12695 if (timerid < 0) {
12696 ret = timerid;
12697 } else if (!arg2) {
12698 ret = -TARGET_EFAULT;
12699 } else {
12700 timer_t htimer = g_posix_timers[timerid];
12701 struct itimerspec hspec;
12702 ret = get_errno(timer_gettime(htimer, &hspec));
12703
12704 if (host_to_target_itimerspec(arg2, &hspec)) {
12705 ret = -TARGET_EFAULT;
12706 }
12707 }
12708 break;
12709 }
12710 #endif
12711
12712 #ifdef TARGET_NR_timer_getoverrun
12713 case TARGET_NR_timer_getoverrun:
12714 {
12715 /* args: timer_t timerid */
12716 target_timer_t timerid = get_timer_id(arg1);
12717
12718 if (timerid < 0) {
12719 ret = timerid;
12720 } else {
12721 timer_t htimer = g_posix_timers[timerid];
12722 ret = get_errno(timer_getoverrun(htimer));
12723 }
12724 fd_trans_unregister(ret);
12725 break;
12726 }
12727 #endif
12728
12729 #ifdef TARGET_NR_timer_delete
12730 case TARGET_NR_timer_delete:
12731 {
12732 /* args: timer_t timerid */
12733 target_timer_t timerid = get_timer_id(arg1);
12734
12735 if (timerid < 0) {
12736 ret = timerid;
12737 } else {
12738 timer_t htimer = g_posix_timers[timerid];
12739 ret = get_errno(timer_delete(htimer));
12740 g_posix_timers[timerid] = 0;
12741 }
12742 break;
12743 }
12744 #endif
12745
12746 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12747 case TARGET_NR_timerfd_create:
12748 ret = get_errno(timerfd_create(arg1,
12749 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12750 break;
12751 #endif
12752
12753 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12754 case TARGET_NR_timerfd_gettime:
12755 {
12756 struct itimerspec its_curr;
12757
12758 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12759
12760 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12761 goto efault;
12762 }
12763 }
12764 break;
12765 #endif
12766
12767 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12768 case TARGET_NR_timerfd_settime:
12769 {
12770 struct itimerspec its_new, its_old, *p_new;
12771
12772 if (arg3) {
12773 if (target_to_host_itimerspec(&its_new, arg3)) {
12774 goto efault;
12775 }
12776 p_new = &its_new;
12777 } else {
12778 p_new = NULL;
12779 }
12780
12781 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12782
12783 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12784 goto efault;
12785 }
12786 }
12787 break;
12788 #endif
12789
12790 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12791 case TARGET_NR_ioprio_get:
12792 ret = get_errno(ioprio_get(arg1, arg2));
12793 break;
12794 #endif
12795
12796 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12797 case TARGET_NR_ioprio_set:
12798 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12799 break;
12800 #endif
12801
12802 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12803 case TARGET_NR_setns:
12804 ret = get_errno(setns(arg1, arg2));
12805 break;
12806 #endif
12807 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12808 case TARGET_NR_unshare:
12809 ret = get_errno(unshare(arg1));
12810 break;
12811 #endif
12812 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12813 case TARGET_NR_kcmp:
12814 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12815 break;
12816 #endif
12817 #ifdef TARGET_NR_swapcontext
12818 case TARGET_NR_swapcontext:
12819 /* PowerPC specific. */
12820 ret = do_swapcontext(cpu_env, arg1, arg2, arg3);
12821 break;
12822 #endif
12823
12824 default:
12825 unimplemented:
12826 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12827 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12828 unimplemented_nowarn:
12829 #endif
12830 ret = -TARGET_ENOSYS;
12831 break;
12832 }
12833 fail:
12834 #ifdef DEBUG
12835 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12836 #endif
12837 if(do_strace)
12838 print_syscall_ret(num, ret);
12839 trace_guest_user_syscall_ret(cpu, num, ret);
12840 return ret;
12841 efault:
12842 ret = -TARGET_EFAULT;
12843 goto fail;
12844 }