]>
Commit | Line | Data |
---|---|---|
a47d1dfd DH |
1 | /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ |
2 | ||
3 | /*** | |
4 | This file is part of systemd. | |
5 | ||
6 | Copyright 2014 David Herrmann <dh.herrmann@gmail.com> | |
7 | ||
8 | systemd is free software; you can redistribute it and/or modify it | |
9 | under the terms of the GNU Lesser General Public License as published by | |
10 | the Free Software Foundation; either version 2.1 of the License, or | |
11 | (at your option) any later version. | |
12 | ||
13 | systemd is distributed in the hope that it will be useful, but | |
14 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | Lesser General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU Lesser General Public License | |
19 | along with systemd; If not, see <http://www.gnu.org/licenses/>. | |
20 | ***/ | |
21 | ||
22 | /* | |
23 | * PTY | |
24 | * A PTY object represents a single PTY connection between a master and a | |
25 | * child. The child process is fork()ed so the caller controls what program | |
26 | * will be run. | |
27 | * | |
28 | * Programs like /bin/login tend to perform a vhangup() on their TTY | |
29 | * before running the login procedure. This also causes the pty master | |
30 | * to get a EPOLLHUP event as long as no client has the TTY opened. | |
31 | * This means, we cannot use the TTY connection as reliable way to track | |
32 | * the client. Instead, we _must_ rely on the PID of the client to track | |
33 | * them. | |
34 | * However, this has the side effect that if the client forks and the | |
35 | * parent exits, we loose them and restart the client. But this seems to | |
36 | * be the expected behavior so we implement it here. | |
37 | * | |
38 | * Unfortunately, epoll always polls for EPOLLHUP so as long as the | |
39 | * vhangup() is ongoing, we will _always_ get EPOLLHUP and cannot sleep. | |
40 | * This gets worse if the client closes the TTY but doesn't exit. | |
41 | * Therefore, the fd must be edge-triggered in the epoll-set so we | |
42 | * only get the events once they change. | |
43 | */ | |
44 | ||
45 | #include <errno.h> | |
46 | #include <fcntl.h> | |
a47d1dfd DH |
47 | #include <signal.h> |
48 | #include <stdbool.h> | |
49 | #include <stdint.h> | |
a47d1dfd | 50 | #include <stdlib.h> |
a47d1dfd | 51 | #include <sys/epoll.h> |
a47d1dfd | 52 | #include <sys/ioctl.h> |
a47d1dfd DH |
53 | #include <sys/uio.h> |
54 | #include <sys/wait.h> | |
55 | #include <termios.h> | |
56 | #include <unistd.h> | |
57 | ||
58 | #include "barrier.h" | |
59 | #include "macro.h" | |
a47d1dfd DH |
60 | #include "ring.h" |
61 | #include "util.h" | |
24882e06 LP |
62 | #include "signal-util.h" |
63 | #include "pty.h" | |
a47d1dfd | 64 | |
48fed5c5 | 65 | #define PTY_BUFSIZE 4096 |
a47d1dfd DH |
66 | |
67 | enum { | |
68 | PTY_ROLE_UNKNOWN, | |
69 | PTY_ROLE_PARENT, | |
70 | PTY_ROLE_CHILD, | |
71 | }; | |
72 | ||
73 | struct Pty { | |
74 | unsigned long ref; | |
75 | Barrier barrier; | |
76 | int fd; | |
77 | pid_t child; | |
78 | sd_event_source *fd_source; | |
79 | sd_event_source *child_source; | |
80 | ||
81 | char in_buf[PTY_BUFSIZE]; | |
82 | Ring out_buf; | |
83 | ||
84 | pty_event_t event_fn; | |
85 | void *event_fn_userdata; | |
86 | ||
87 | bool needs_requeue : 1; | |
88 | unsigned int role : 2; | |
89 | }; | |
90 | ||
91 | int pty_new(Pty **out) { | |
92 | _pty_unref_ Pty *pty = NULL; | |
93 | int r; | |
94 | ||
95 | assert_return(out, -EINVAL); | |
96 | ||
97 | pty = new0(Pty, 1); | |
98 | if (!pty) | |
99 | return -ENOMEM; | |
100 | ||
101 | pty->ref = 1; | |
102 | pty->fd = -1; | |
7566e267 | 103 | pty->barrier = (Barrier) BARRIER_NULL; |
a47d1dfd DH |
104 | |
105 | pty->fd = posix_openpt(O_RDWR | O_NOCTTY | O_CLOEXEC | O_NONBLOCK); | |
106 | if (pty->fd < 0) | |
107 | return -errno; | |
108 | ||
109 | /* | |
110 | * The slave-node is initialized to uid/gid of the caller of | |
111 | * posix_openpt(). Only if devpts is mounted with fixed uid/gid this is | |
112 | * skipped. In that case, grantpt() can overwrite these, but then you | |
113 | * have to be root to use chown() (or a pt_chown helper has to be | |
114 | * present). In those cases grantpt() really does something, | |
115 | * otherwise it's a no-op. We call grantpt() here to try supporting | |
116 | * those cases, even though no-one uses that, I guess. If you need other | |
117 | * access-rights, set them yourself after this call returns (no, this is | |
118 | * not racy, it looks racy, but races regarding your own UID are never | |
119 | * important as an attacker could ptrace you; and the slave-pty is also | |
120 | * still locked). | |
121 | */ | |
122 | r = grantpt(pty->fd); | |
123 | if (r < 0) | |
124 | return -errno; | |
125 | ||
7566e267 | 126 | r = barrier_create(&pty->barrier); |
a47d1dfd DH |
127 | if (r < 0) |
128 | return r; | |
129 | ||
130 | *out = pty; | |
131 | pty = NULL; | |
132 | return 0; | |
133 | } | |
134 | ||
135 | Pty *pty_ref(Pty *pty) { | |
136 | if (!pty || pty->ref < 1) | |
137 | return NULL; | |
138 | ||
139 | ++pty->ref; | |
140 | return pty; | |
141 | } | |
142 | ||
143 | Pty *pty_unref(Pty *pty) { | |
144 | if (!pty || pty->ref < 1 || --pty->ref > 0) | |
145 | return NULL; | |
146 | ||
147 | pty_close(pty); | |
148 | pty->child_source = sd_event_source_unref(pty->child_source); | |
149 | barrier_destroy(&pty->barrier); | |
150 | ring_clear(&pty->out_buf); | |
151 | free(pty); | |
152 | ||
153 | return NULL; | |
154 | } | |
155 | ||
156 | Barrier *pty_get_barrier(Pty *pty) { | |
157 | assert(pty); | |
158 | return &pty->barrier; | |
159 | } | |
160 | ||
161 | bool pty_is_unknown(Pty *pty) { | |
162 | return pty && pty->role == PTY_ROLE_UNKNOWN; | |
163 | } | |
164 | ||
165 | bool pty_is_parent(Pty *pty) { | |
166 | return pty && pty->role == PTY_ROLE_PARENT; | |
167 | } | |
168 | ||
169 | bool pty_is_child(Pty *pty) { | |
170 | return pty && pty->role == PTY_ROLE_CHILD; | |
171 | } | |
172 | ||
173 | bool pty_has_child(Pty *pty) { | |
174 | return pty_is_parent(pty) && pty->child > 0; | |
175 | } | |
176 | ||
177 | pid_t pty_get_child(Pty *pty) { | |
178 | return pty_has_child(pty) ? pty->child : -ECHILD; | |
179 | } | |
180 | ||
181 | bool pty_is_open(Pty *pty) { | |
182 | return pty && pty->fd >= 0; | |
183 | } | |
184 | ||
185 | int pty_get_fd(Pty *pty) { | |
186 | assert_return(pty, -EINVAL); | |
187 | ||
188 | return pty_is_open(pty) ? pty->fd : -EPIPE; | |
189 | } | |
190 | ||
191 | int pty_make_child(Pty *pty) { | |
611b312b | 192 | _cleanup_free_ char *slave_name = NULL; |
a47d1dfd DH |
193 | int r, fd; |
194 | ||
195 | assert_return(pty, -EINVAL); | |
196 | assert_return(pty_is_unknown(pty), -EALREADY); | |
197 | ||
611b312b | 198 | r = ptsname_malloc(pty->fd, &slave_name); |
a47d1dfd DH |
199 | if (r < 0) |
200 | return -errno; | |
201 | ||
202 | fd = open(slave_name, O_RDWR | O_CLOEXEC | O_NOCTTY); | |
203 | if (fd < 0) | |
204 | return -errno; | |
205 | ||
206 | safe_close(pty->fd); | |
207 | pty->fd = fd; | |
208 | pty->child = getpid(); | |
209 | pty->role = PTY_ROLE_CHILD; | |
210 | barrier_set_role(&pty->barrier, BARRIER_CHILD); | |
211 | ||
212 | return 0; | |
213 | } | |
214 | ||
215 | int pty_make_parent(Pty *pty, pid_t child) { | |
216 | assert_return(pty, -EINVAL); | |
217 | assert_return(pty_is_unknown(pty), -EALREADY); | |
218 | ||
219 | pty->child = child; | |
220 | pty->role = PTY_ROLE_PARENT; | |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
225 | int pty_unlock(Pty *pty) { | |
226 | assert_return(pty, -EINVAL); | |
227 | assert_return(pty_is_unknown(pty) || pty_is_parent(pty), -EINVAL); | |
228 | assert_return(pty_is_open(pty), -ENODEV); | |
229 | ||
230 | return unlockpt(pty->fd) < 0 ? -errno : 0; | |
231 | } | |
232 | ||
233 | int pty_setup_child(Pty *pty) { | |
234 | struct termios attr; | |
235 | pid_t pid; | |
236 | int r; | |
237 | ||
238 | assert_return(pty, -EINVAL); | |
239 | assert_return(pty_is_child(pty), -EINVAL); | |
240 | assert_return(pty_is_open(pty), -EALREADY); | |
241 | ||
242 | r = sigprocmask_many(SIG_SETMASK, -1); | |
243 | if (r < 0) | |
244 | return r; | |
245 | ||
246 | r = reset_all_signal_handlers(); | |
247 | if (r < 0) | |
248 | return r; | |
249 | ||
250 | pid = setsid(); | |
251 | if (pid < 0 && errno != EPERM) | |
252 | return -errno; | |
253 | ||
254 | r = ioctl(pty->fd, TIOCSCTTY, 0); | |
255 | if (r < 0) | |
256 | return -errno; | |
257 | ||
258 | r = tcgetattr(pty->fd, &attr); | |
259 | if (r < 0) | |
260 | return -errno; | |
261 | ||
262 | /* erase character should be normal backspace, PLEASEEE! */ | |
263 | attr.c_cc[VERASE] = 010; | |
264 | /* always set UTF8 flag */ | |
265 | attr.c_iflag |= IUTF8; | |
266 | ||
267 | r = tcsetattr(pty->fd, TCSANOW, &attr); | |
268 | if (r < 0) | |
269 | return -errno; | |
270 | ||
271 | if (dup2(pty->fd, STDIN_FILENO) != STDIN_FILENO || | |
272 | dup2(pty->fd, STDOUT_FILENO) != STDOUT_FILENO || | |
273 | dup2(pty->fd, STDERR_FILENO) != STDERR_FILENO) | |
274 | return -errno; | |
275 | ||
276 | /* only close FD if it's not a std-fd */ | |
277 | pty->fd = (pty->fd > 2) ? safe_close(pty->fd) : -1; | |
278 | ||
279 | return 0; | |
280 | } | |
281 | ||
282 | void pty_close(Pty *pty) { | |
283 | if (!pty_is_open(pty)) | |
284 | return; | |
285 | ||
286 | pty->fd_source = sd_event_source_unref(pty->fd_source); | |
287 | pty->fd = safe_close(pty->fd); | |
288 | } | |
289 | ||
290 | /* | |
291 | * Drain input-queue and dispatch data via the event-handler. Returns <0 on | |
292 | * error, 0 if queue is empty and 1 if we couldn't empty the input queue fast | |
293 | * enough and there's still data left. | |
294 | */ | |
295 | static int pty_dispatch_read(Pty *pty) { | |
296 | unsigned int i; | |
297 | ssize_t len; | |
298 | int r; | |
299 | ||
300 | /* | |
301 | * We're edge-triggered, means we need to read the whole queue. This, | |
302 | * however, might cause us to stall if the writer is faster than we | |
48fed5c5 DH |
303 | * are. Therefore, try reading as much as 8 times (32KiB) and only |
304 | * bail out then. | |
a47d1dfd DH |
305 | */ |
306 | ||
48fed5c5 | 307 | for (i = 0; i < 8; ++i) { |
a47d1dfd DH |
308 | len = read(pty->fd, pty->in_buf, sizeof(pty->in_buf) - 1); |
309 | if (len < 0) { | |
310 | if (errno == EINTR) | |
311 | continue; | |
312 | ||
313 | return (errno == EAGAIN) ? 0 : -errno; | |
314 | } else if (len == 0) { | |
315 | continue; | |
316 | } | |
317 | ||
318 | /* set terminating zero for debugging safety */ | |
319 | pty->in_buf[len] = 0; | |
320 | r = pty->event_fn(pty, pty->event_fn_userdata, PTY_DATA, pty->in_buf, len); | |
321 | if (r < 0) | |
322 | return r; | |
323 | } | |
324 | ||
325 | /* still data left, make sure we're queued again */ | |
326 | pty->needs_requeue = true; | |
327 | ||
328 | return 1; | |
329 | } | |
330 | ||
331 | /* | |
332 | * Drain output-queue by writing data to the pty. Returns <0 on error, 0 if the | |
333 | * output queue is empty now and 1 if we couldn't empty the output queue fast | |
334 | * enough and there's still data left. | |
335 | */ | |
336 | static int pty_dispatch_write(Pty *pty) { | |
337 | struct iovec vec[2]; | |
338 | unsigned int i; | |
339 | ssize_t len; | |
340 | size_t num; | |
341 | ||
342 | /* | |
343 | * Same as pty_dispatch_read(), we're edge-triggered so we need to call | |
344 | * write() until either all data is written or it returns EAGAIN. We | |
345 | * call it twice and if it still writes successfully, we reschedule. | |
346 | */ | |
347 | ||
348 | for (i = 0; i < 2; ++i) { | |
349 | num = ring_peek(&pty->out_buf, vec); | |
350 | if (num < 1) | |
351 | return 0; | |
352 | ||
353 | len = writev(pty->fd, vec, (int)num); | |
354 | if (len < 0) { | |
355 | if (errno == EINTR) | |
356 | continue; | |
357 | ||
358 | return (errno == EAGAIN) ? 1 : -errno; | |
359 | } else if (len == 0) { | |
360 | continue; | |
361 | } | |
362 | ||
363 | ring_pull(&pty->out_buf, (size_t)len); | |
364 | } | |
365 | ||
366 | /* still data left, make sure we're queued again */ | |
367 | if (ring_get_size(&pty->out_buf) > 0) { | |
368 | pty->needs_requeue = true; | |
369 | return 1; | |
370 | } | |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
375 | static int pty_fd_fn(sd_event_source *source, int fd, uint32_t revents, void *userdata) { | |
376 | Pty *pty = userdata; | |
377 | int r_hup = 0, r_write = 0, r_read = 0, r; | |
378 | ||
379 | /* | |
380 | * Whenever we encounter I/O errors, we have to make sure to drain the | |
381 | * input queue first, before we handle any HUP. A child might send us | |
382 | * a message and immediately close the queue. We must not handle the | |
383 | * HUP first or we loose data. | |
384 | * Therefore, if we read a message successfully, we always return | |
385 | * success and wait for the next event-loop iteration. Furthermore, | |
386 | * whenever there is a write-error, we must try reading from the input | |
387 | * queue even if EPOLLIN is not set. The input might have arrived in | |
388 | * between epoll_wait() and write(). Therefore, write-errors are only | |
389 | * ever handled if the input-queue is empty. In all other cases they | |
390 | * are ignored until either reading fails or the input queue is empty. | |
391 | */ | |
392 | ||
393 | if (revents & (EPOLLHUP | EPOLLERR)) | |
394 | r_hup = -EPIPE; | |
395 | ||
396 | if (revents & EPOLLOUT) | |
397 | r_write = pty_dispatch_write(pty); | |
398 | ||
399 | /* Awesome! Kernel signals HUP without IN but queues are not empty.. */ | |
400 | if ((revents & EPOLLIN) || r_hup < 0 || r_write < 0) { | |
401 | r_read = pty_dispatch_read(pty); | |
402 | if (r_read > 0) | |
403 | return 0; /* still data left to fetch next round */ | |
404 | } | |
405 | ||
406 | if (r_hup < 0 || r_write < 0 || r_read < 0) { | |
407 | /* PTY closed and input-queue drained */ | |
408 | pty_close(pty); | |
409 | r = pty->event_fn(pty, pty->event_fn_userdata, PTY_HUP, NULL, 0); | |
410 | if (r < 0) | |
411 | return r; | |
412 | } | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
417 | static int pty_fd_prepare_fn(sd_event_source *source, void *userdata) { | |
418 | Pty *pty = userdata; | |
419 | int r; | |
420 | ||
421 | if (pty->needs_requeue) { | |
422 | /* | |
423 | * We're edge-triggered. In case we couldn't handle all events | |
424 | * or in case new write-data is queued, we set needs_requeue. | |
425 | * Before going asleep, we set the io-events *again*. sd-event | |
426 | * notices that we're edge-triggered and forwards the call to | |
427 | * the kernel even if the events didn't change. The kernel will | |
428 | * check the events and re-queue us on the ready queue in case | |
429 | * an event is pending. | |
430 | */ | |
431 | r = sd_event_source_set_io_events(source, EPOLLHUP | EPOLLERR | EPOLLIN | EPOLLOUT | EPOLLET); | |
432 | if (r >= 0) | |
433 | pty->needs_requeue = false; | |
434 | } | |
435 | ||
436 | return 0; | |
437 | } | |
438 | ||
439 | static int pty_child_fn(sd_event_source *source, const siginfo_t *si, void *userdata) { | |
440 | Pty *pty = userdata; | |
441 | int r; | |
442 | ||
443 | pty->child = 0; | |
444 | ||
445 | r = pty->event_fn(pty, pty->event_fn_userdata, PTY_CHILD, si, sizeof(*si)); | |
446 | if (r < 0) | |
447 | return r; | |
448 | ||
449 | return 0; | |
450 | } | |
451 | ||
452 | int pty_attach_event(Pty *pty, sd_event *event, pty_event_t event_fn, void *event_fn_userdata) { | |
453 | int r; | |
454 | ||
455 | assert_return(pty, -EINVAL); | |
456 | assert_return(event, -EINVAL); | |
457 | assert_return(event_fn, -EINVAL); | |
458 | assert_return(pty_is_parent(pty), -EINVAL); | |
459 | ||
460 | pty_detach_event(pty); | |
461 | ||
462 | if (pty_is_open(pty)) { | |
463 | r = sd_event_add_io(event, | |
464 | &pty->fd_source, | |
465 | pty->fd, | |
466 | EPOLLHUP | EPOLLERR | EPOLLIN | EPOLLOUT | EPOLLET, | |
467 | pty_fd_fn, | |
468 | pty); | |
469 | if (r < 0) | |
470 | goto error; | |
471 | ||
472 | r = sd_event_source_set_prepare(pty->fd_source, pty_fd_prepare_fn); | |
473 | if (r < 0) | |
474 | goto error; | |
475 | } | |
476 | ||
477 | if (pty_has_child(pty)) { | |
478 | r = sd_event_add_child(event, | |
479 | &pty->child_source, | |
480 | pty->child, | |
481 | WEXITED, | |
482 | pty_child_fn, | |
483 | pty); | |
484 | if (r < 0) | |
485 | goto error; | |
486 | } | |
487 | ||
488 | pty->event_fn = event_fn; | |
489 | pty->event_fn_userdata = event_fn_userdata; | |
490 | ||
491 | return 0; | |
492 | ||
493 | error: | |
494 | pty_detach_event(pty); | |
495 | return r; | |
496 | } | |
497 | ||
498 | void pty_detach_event(Pty *pty) { | |
499 | if (!pty) | |
500 | return; | |
501 | ||
502 | pty->child_source = sd_event_source_unref(pty->child_source); | |
503 | pty->fd_source = sd_event_source_unref(pty->fd_source); | |
504 | pty->event_fn = NULL; | |
505 | pty->event_fn_userdata = NULL; | |
506 | } | |
507 | ||
508 | int pty_write(Pty *pty, const void *buf, size_t size) { | |
509 | bool was_empty; | |
510 | int r; | |
511 | ||
512 | assert_return(pty, -EINVAL); | |
513 | assert_return(pty_is_open(pty), -ENODEV); | |
514 | assert_return(pty_is_parent(pty), -ENODEV); | |
515 | ||
516 | if (size < 1) | |
517 | return 0; | |
518 | ||
519 | /* | |
520 | * Push @buf[0..@size] into the output ring-buffer. In case the | |
521 | * ring-buffer wasn't empty beforehand, we're already waiting for | |
522 | * EPOLLOUT and we're done. If it was empty, we have to re-queue the | |
523 | * FD for EPOLLOUT as we're edge-triggered and wouldn't get any new | |
524 | * EPOLLOUT event. | |
525 | */ | |
526 | ||
527 | was_empty = ring_get_size(&pty->out_buf) < 1; | |
528 | ||
529 | r = ring_push(&pty->out_buf, buf, size); | |
530 | if (r < 0) | |
531 | return r; | |
532 | ||
533 | if (was_empty) | |
534 | pty->needs_requeue = true; | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | int pty_signal(Pty *pty, int sig) { | |
540 | assert_return(pty, -EINVAL); | |
541 | assert_return(pty_is_open(pty), -ENODEV); | |
542 | assert_return(pty_is_parent(pty), -ENODEV); | |
543 | ||
544 | return ioctl(pty->fd, TIOCSIG, sig) < 0 ? -errno : 0; | |
545 | } | |
546 | ||
547 | int pty_resize(Pty *pty, unsigned short term_width, unsigned short term_height) { | |
aa0fff7f LP |
548 | struct winsize ws = { |
549 | .ws_col = term_width, | |
550 | .ws_row = term_height, | |
551 | }; | |
a47d1dfd DH |
552 | |
553 | assert_return(pty, -EINVAL); | |
554 | assert_return(pty_is_open(pty), -ENODEV); | |
555 | assert_return(pty_is_parent(pty), -ENODEV); | |
556 | ||
a47d1dfd DH |
557 | /* |
558 | * This will send SIGWINCH to the pty slave foreground process group. | |
559 | * We will also get one, but we don't need it. | |
560 | */ | |
561 | return ioctl(pty->fd, TIOCSWINSZ, &ws) < 0 ? -errno : 0; | |
562 | } | |
563 | ||
564 | pid_t pty_fork(Pty **out, sd_event *event, pty_event_t event_fn, void *event_fn_userdata, unsigned short initial_term_width, unsigned short initial_term_height) { | |
565 | _pty_unref_ Pty *pty = NULL; | |
566 | int r; | |
567 | pid_t pid; | |
568 | ||
569 | assert_return(out, -EINVAL); | |
570 | assert_return((event && event_fn) || (!event && !event_fn), -EINVAL); | |
571 | ||
572 | r = pty_new(&pty); | |
573 | if (r < 0) | |
574 | return r; | |
575 | ||
576 | r = pty_unlock(pty); | |
577 | if (r < 0) | |
578 | return r; | |
579 | ||
580 | pid = fork(); | |
581 | if (pid < 0) | |
582 | return -errno; | |
583 | ||
584 | if (pid == 0) { | |
585 | /* child */ | |
586 | ||
587 | r = pty_make_child(pty); | |
588 | if (r < 0) | |
589 | _exit(-r); | |
590 | ||
591 | r = pty_setup_child(pty); | |
592 | if (r < 0) | |
593 | _exit(-r); | |
594 | ||
595 | /* sync with parent */ | |
596 | if (!barrier_place_and_sync(&pty->barrier)) | |
597 | _exit(1); | |
598 | ||
599 | /* fallthrough and return the child's PTY object */ | |
600 | } else { | |
601 | /* parent */ | |
602 | ||
603 | r = pty_make_parent(pty, pid); | |
604 | if (r < 0) | |
605 | goto parent_error; | |
606 | ||
607 | r = pty_resize(pty, initial_term_width, initial_term_height); | |
608 | if (r < 0) | |
609 | goto parent_error; | |
610 | ||
611 | if (event) { | |
612 | r = pty_attach_event(pty, event, event_fn, event_fn_userdata); | |
613 | if (r < 0) | |
614 | goto parent_error; | |
615 | } | |
616 | ||
617 | /* sync with child */ | |
618 | if (!barrier_place_and_sync(&pty->barrier)) { | |
619 | r = -ECHILD; | |
620 | goto parent_error; | |
621 | } | |
622 | ||
623 | /* fallthrough and return the parent's PTY object */ | |
624 | } | |
625 | ||
626 | *out = pty; | |
627 | pty = NULL; | |
628 | return pid; | |
629 | ||
630 | parent_error: | |
631 | barrier_abort(&pty->barrier); | |
632 | waitpid(pty->child, NULL, 0); | |
633 | pty->child = 0; | |
634 | return r; | |
635 | } |