]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/experimental/io_context
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / experimental / io_context
1 // <experimental/io_service> -*- C++ -*-
2
3 // Copyright (C) 2015-2024 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file experimental/io_context
26 * This is a TS C++ Library header.
27 * @ingroup networking-ts
28 */
29
30 #ifndef _GLIBCXX_EXPERIMENTAL_IO_SERVICE
31 #define _GLIBCXX_EXPERIMENTAL_IO_SERVICE 1
32
33 #pragma GCC system_header
34
35 #include <bits/requires_hosted.h> // experimental is currently omitted
36
37 #if __cplusplus >= 201402L
38
39 #include <atomic>
40 #include <forward_list>
41 #include <functional>
42 #include <system_error>
43 #include <thread>
44 #include <vector>
45 #include <experimental/netfwd>
46 #include <experimental/executor>
47 #include <bits/chrono.h>
48 #if _GLIBCXX_HAVE_UNISTD_H
49 # include <unistd.h>
50 #endif
51 #ifdef _GLIBCXX_HAVE_POLL_H
52 # include <poll.h>
53 #endif
54 #ifdef _GLIBCXX_HAVE_FCNTL_H
55 # include <fcntl.h>
56 #endif
57
58 namespace std _GLIBCXX_VISIBILITY(default)
59 {
60 _GLIBCXX_BEGIN_NAMESPACE_VERSION
61 namespace experimental
62 {
63 namespace net
64 {
65 inline namespace v1
66 {
67
68 /** @addtogroup networking-ts
69 * @{
70 */
71
72 class __socket_impl;
73
74 /// An ExecutionContext for I/O operations.
75 class io_context : public execution_context
76 {
77 public:
78 // types:
79
80 /// An executor for an io_context.
81 class executor_type
82 {
83 public:
84 // construct / copy / destroy:
85
86 executor_type(const executor_type& __other) noexcept = default;
87 executor_type(executor_type&& __other) noexcept = default;
88
89 executor_type& operator=(const executor_type& __other) noexcept = default;
90 executor_type& operator=(executor_type&& __other) noexcept = default;
91
92 // executor operations:
93
94 bool running_in_this_thread() const noexcept
95 {
96 #ifdef _GLIBCXX_HAS_GTHREADS
97 lock_guard<execution_context::mutex_type> __lock(_M_ctx->_M_mtx);
98 auto __end = _M_ctx->_M_call_stack.end();
99 return std::find(_M_ctx->_M_call_stack.begin(), __end,
100 this_thread::get_id()) != __end;
101 #else
102 return _M_ctx->_M_run_count != 0;
103 #endif
104 }
105
106 io_context& context() const noexcept { return *_M_ctx; }
107
108 void on_work_started() const noexcept { ++_M_ctx->_M_work_count; }
109 void on_work_finished() const noexcept { --_M_ctx->_M_work_count; }
110
111 template<typename _Func, typename _ProtoAllocator>
112 void
113 dispatch(_Func&& __f, const _ProtoAllocator& __a) const
114 {
115 if (running_in_this_thread())
116 decay_t<_Func>{std::forward<_Func>(__f)}();
117 else
118 post(std::forward<_Func>(__f), __a);
119 }
120
121 template<typename _Func, typename _ProtoAllocator>
122 void
123 post(_Func&& __f, const _ProtoAllocator& __a) const
124 {
125 lock_guard<execution_context::mutex_type> __lock(_M_ctx->_M_mtx);
126 // TODO (re-use functionality in system_context)
127 _M_ctx->_M_reactor._M_notify();
128 }
129
130 template<typename _Func, typename _ProtoAllocator>
131 void
132 defer(_Func&& __f, const _ProtoAllocator& __a) const
133 { post(std::forward<_Func>(__f), __a); }
134
135 private:
136 friend io_context;
137
138 explicit
139 executor_type(io_context& __ctx) : _M_ctx(std::addressof(__ctx)) { }
140
141 io_context* _M_ctx;
142 };
143
144 using count_type = size_t;
145
146 // construct / copy / destroy:
147
148 io_context() : _M_work_count(0) { }
149
150 explicit
151 io_context(int __concurrency_hint) : _M_work_count(0) { }
152
153 io_context(const io_context&) = delete;
154 io_context& operator=(const io_context&) = delete;
155
156 // io_context operations:
157
158 executor_type get_executor() noexcept { return executor_type(*this); }
159
160 count_type
161 run()
162 {
163 count_type __n = 0;
164 while (run_one())
165 if (__n != numeric_limits<count_type>::max())
166 ++__n;
167 return __n;
168 }
169
170 template<typename _Rep, typename _Period>
171 count_type
172 run_for(const chrono::duration<_Rep, _Period>& __rel_time)
173 { return run_until(chrono::steady_clock::now() + __rel_time); }
174
175 template<typename _Clock, typename _Duration>
176 count_type
177 run_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
178 {
179 count_type __n = 0;
180 while (run_one_until(__abs_time))
181 if (__n != numeric_limits<count_type>::max())
182 ++__n;
183 return __n;
184 }
185
186 count_type
187 run_one()
188 { return _M_do_one(chrono::milliseconds{-1}); }
189
190 template<typename _Rep, typename _Period>
191 count_type
192 run_one_for(const chrono::duration<_Rep, _Period>& __rel_time)
193 { return run_one_until(chrono::steady_clock::now() + __rel_time); }
194
195 template<typename _Clock, typename _Duration>
196 count_type
197 run_one_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
198 {
199 auto __now = _Clock::now();
200 while (__now < __abs_time)
201 {
202 using namespace std::chrono;
203 auto __ms = duration_cast<milliseconds>(__abs_time - __now);
204 if (_M_do_one(__ms))
205 return 1;
206 __now = _Clock::now();
207 }
208 return 0;
209 }
210
211 count_type
212 poll()
213 {
214 count_type __n = 0;
215 while (poll_one())
216 if (__n != numeric_limits<count_type>::max())
217 ++__n;
218 return __n;
219 }
220
221 count_type
222 poll_one()
223 { return _M_do_one(chrono::milliseconds{0}); }
224
225 void stop()
226 {
227 lock_guard<execution_context::mutex_type> __lock(_M_mtx);
228 _M_stopped = true;
229 _M_reactor._M_notify();
230 }
231
232 bool stopped() const noexcept
233 {
234 lock_guard<execution_context::mutex_type> __lock(_M_mtx);
235 return _M_stopped;
236 }
237
238 void restart()
239 {
240 _M_stopped = false;
241 }
242
243 private:
244
245 template<typename _Clock, typename _WaitTraits>
246 friend class basic_waitable_timer;
247
248 friend __socket_impl;
249
250 template<typename _Protocol>
251 friend class __basic_socket_impl;
252
253 template<typename _Protocol>
254 friend class basic_socket;
255
256 template<typename _Protocol>
257 friend class basic_datagram_socket;
258
259 template<typename _Protocol>
260 friend class basic_stream_socket;
261
262 template<typename _Protocol>
263 friend class basic_socket_acceptor;
264
265 count_type
266 _M_outstanding_work() const
267 { return _M_work_count + !_M_ops.empty(); }
268
269 struct __timer_queue_base : execution_context::service
270 {
271 // return milliseconds until next timer expires, or milliseconds::max()
272 virtual chrono::milliseconds _M_next() const = 0;
273 virtual bool run_one() = 0;
274
275 protected:
276 explicit
277 __timer_queue_base(execution_context& __ctx) : service(__ctx)
278 {
279 auto& __ioc = static_cast<io_context&>(__ctx);
280 lock_guard<execution_context::mutex_type> __lock(__ioc._M_mtx);
281 __ioc._M_timers.push_back(this);
282 }
283
284 mutable execution_context::mutex_type _M_qmtx;
285 };
286
287 template<typename _Timer, typename _Key = typename _Timer::_Key>
288 struct __timer_queue : __timer_queue_base
289 {
290 using key_type = __timer_queue;
291
292 explicit
293 __timer_queue(execution_context& __ctx) : __timer_queue_base(__ctx)
294 { }
295
296 void shutdown() noexcept { }
297
298 io_context& context() noexcept
299 { return static_cast<io_context&>(service::context()); }
300
301 // Start an asynchronous wait.
302 void
303 push(const _Timer& __t, function<void(error_code)> __h)
304 {
305 context().get_executor().on_work_started();
306 lock_guard<execution_context::mutex_type> __lock(_M_qmtx);
307 _M_queue.emplace(__t, _M_next_id++, std::move(__h));
308 // no need to notify reactor unless this timer went to the front?
309 }
310
311 // Cancel all outstanding waits for __t
312 size_t
313 cancel(const _Timer& __t)
314 {
315 lock_guard<execution_context::mutex_type> __lock(_M_qmtx);
316 size_t __count = 0;
317 auto __last = _M_queue.end();
318 for (auto __it = _M_queue.begin(), __end = __last; __it != __end;
319 ++__it)
320 {
321 if (__it->_M_key == __t._M_key.get())
322 {
323 __it->cancel();
324 __last = __it;
325 ++__count;
326 }
327 }
328 if (__count)
329 _M_queue._M_sort_to(__last);
330 return __count;
331 }
332
333 // Cancel oldest outstanding wait for __t
334 bool
335 cancel_one(const _Timer& __t)
336 {
337 lock_guard<execution_context::mutex_type> __lock(_M_qmtx);
338 const auto __end = _M_queue.end();
339 auto __oldest = __end;
340 for (auto __it = _M_queue.begin(); __it != __end; ++__it)
341 if (__it->_M_key == __t._M_key.get())
342 if (__oldest == __end || __it->_M_id < __oldest->_M_id)
343 __oldest = __it;
344 if (__oldest == __end)
345 return false;
346 __oldest->cancel();
347 _M_queue._M_sort_to(__oldest);
348 return true;
349 }
350
351 chrono::milliseconds
352 _M_next() const override
353 {
354 typename _Timer::time_point __exp;
355 {
356 lock_guard<execution_context::mutex_type> __lock(_M_qmtx);
357 if (_M_queue.empty())
358 return chrono::milliseconds::max(); // no pending timers
359 if (_M_queue.top()._M_key == nullptr)
360 return chrono::milliseconds::zero(); // cancelled, run now
361 __exp = _M_queue.top()._M_expiry;
362 }
363 auto __dur = _Timer::traits_type::to_wait_duration(__exp);
364 if (__dur < __dur.zero())
365 __dur = __dur.zero();
366 return chrono::duration_cast<chrono::milliseconds>(__dur);
367 }
368
369 private:
370
371 bool run_one() override
372 {
373 auto __now = _Timer::clock_type::now();
374 function<void(error_code)> __h;
375 error_code __ec;
376 {
377 lock_guard<execution_context::mutex_type> __lock(_M_qmtx);
378
379 if (_M_queue.top()._M_key == nullptr) // cancelled
380 {
381 __h = std::move(_M_queue.top()._M_h);
382 __ec = std::make_error_code(errc::operation_canceled);
383 _M_queue.pop();
384 }
385 else if (_M_queue.top()._M_expiry <= _Timer::clock_type::now())
386 {
387 __h = std::move(_M_queue.top()._M_h);
388 _M_queue.pop();
389 }
390 }
391 if (__h)
392 {
393 __h(__ec);
394 context().get_executor().on_work_finished();
395 return true;
396 }
397 return false;
398 }
399
400 using __timer_id_type = uint64_t;
401
402 struct __pending_timer
403 {
404 __pending_timer(const _Timer& __t, uint64_t __id,
405 function<void(error_code)> __h)
406 : _M_expiry(__t.expiry()), _M_key(__t._M_key.get()), _M_id(__id),
407 _M_h(std::move(__h))
408 { }
409
410 typename _Timer::time_point _M_expiry;
411 _Key* _M_key;
412 __timer_id_type _M_id;
413 function<void(error_code)> _M_h;
414
415 void cancel() { _M_expiry = _M_expiry.min(); _M_key = nullptr; }
416
417 bool
418 operator<(const __pending_timer& __rhs) const
419 { return _M_expiry < __rhs._M_expiry; }
420 };
421
422 struct __queue : priority_queue<__pending_timer>
423 {
424 using iterator =
425 typename priority_queue<__pending_timer>::container_type::iterator;
426
427 // expose begin/end/erase for direct access to underlying container
428 iterator begin() { return this->c.begin(); }
429 iterator end() { return this->c.end(); }
430 iterator erase(iterator __it) { return this->c.erase(__it); }
431
432 void
433 _M_sort_to(iterator __it)
434 { std::stable_sort(this->c.begin(), ++__it); }
435 };
436
437 __queue _M_queue;
438 __timer_id_type _M_next_id = 0;
439 };
440
441 template<typename _Timer, typename _CompletionHandler>
442 void
443 async_wait(const _Timer& __timer, _CompletionHandler&& __h)
444 {
445 auto& __queue = use_service<__timer_queue<_Timer>>(*this);
446 __queue.push(__timer, std::move(__h));
447 _M_reactor._M_notify();
448 }
449
450 // Cancel all wait operations initiated by __timer.
451 template<typename _Timer>
452 size_t
453 cancel(const _Timer& __timer)
454 {
455 if (!has_service<__timer_queue<_Timer>>(*this))
456 return 0;
457
458 auto __c = use_service<__timer_queue<_Timer>>(*this).cancel(__timer);
459 if (__c != 0)
460 _M_reactor._M_notify();
461 return __c;
462 }
463
464 // Cancel the oldest wait operation initiated by __timer.
465 template<typename _Timer>
466 size_t
467 cancel_one(const _Timer& __timer)
468 {
469 if (!has_service<__timer_queue<_Timer>>(*this))
470 return 0;
471
472 if (use_service<__timer_queue<_Timer>>(*this).cancel_one(__timer))
473 {
474 _M_reactor._M_notify();
475 return 1;
476 }
477 return 0;
478 }
479
480 // The caller must know what the wait-type __w will be interpreted.
481 // In the current implementation the reactor is based on <poll.h>
482 // so the parameter must be one of POLLIN, POLLOUT or POLLERR.
483 template<typename _Op>
484 void
485 async_wait(int __fd, int __w, _Op&& __op)
486 {
487 lock_guard<execution_context::mutex_type> __lock(_M_mtx);
488 // TODO need push_back, use std::list not std::forward_list
489 auto __tail = _M_ops.before_begin(), __it = _M_ops.begin();
490 while (__it != _M_ops.end())
491 {
492 ++__it;
493 ++__tail;
494 }
495 using __type = __async_operation_impl<_Op>;
496 _M_ops.emplace_after(__tail,
497 make_unique<__type>(std::move(__op), __fd, __w));
498 _M_reactor._M_fd_interest(__fd, __w);
499 }
500
501 void _M_add_fd(int __fd) { _M_reactor._M_add_fd(__fd); }
502 void _M_remove_fd(int __fd) { _M_reactor._M_remove_fd(__fd); }
503
504 void cancel(int __fd, error_code&)
505 {
506 lock_guard<execution_context::mutex_type> __lock(_M_mtx);
507 const auto __end = _M_ops.end();
508 auto __it = _M_ops.begin();
509 auto __prev = _M_ops.before_begin();
510 while (__it != __end && (*__it)->_M_is_cancelled())
511 {
512 ++__it;
513 ++__prev;
514 }
515 auto __cancelled = __prev;
516 while (__it != __end)
517 {
518 if ((*__it)->_M_fd == __fd)
519 {
520 (*__it)->cancel();
521 ++__it;
522 _M_ops.splice_after(__cancelled, _M_ops, __prev);
523 ++__cancelled;
524 }
525 else
526 {
527 ++__it;
528 ++__prev;
529 }
530 }
531 _M_reactor._M_not_interested(__fd);
532 }
533
534 struct __async_operation
535 {
536 __async_operation(int __fd, int __ev) : _M_fd(__fd), _M_ev(__ev) { }
537
538 virtual ~__async_operation() = default;
539
540 int _M_fd;
541 short _M_ev;
542
543 void cancel() { _M_fd = -1; }
544 bool _M_is_cancelled() const { return _M_fd == -1; }
545 virtual void run(io_context&) = 0;
546 };
547
548 template<typename _Op>
549 struct __async_operation_impl : __async_operation
550 {
551 __async_operation_impl(_Op&& __op, int __fd, int __ev)
552 : __async_operation{__fd, __ev}, _M_op(std::move(__op)) { }
553
554 _Op _M_op;
555
556 void run(io_context& __ctx)
557 {
558 if (_M_is_cancelled())
559 _M_op(std::make_error_code(errc::operation_canceled));
560 else
561 _M_op(error_code{});
562 }
563 };
564
565 #ifdef _GLIBCXX_HAS_GTHREADS
566 atomic<count_type> _M_work_count;
567 #else
568 count_type _M_work_count;
569 #endif
570 mutable execution_context::mutex_type _M_mtx;
571 queue<function<void()>> _M_op;
572 bool _M_stopped = false;
573
574 struct __monitor
575 {
576 __monitor(io_context& __c) : _M_ctx(__c)
577 {
578 #ifdef _GLIBCXX_HAS_GTHREADS
579 lock_guard<execution_context::mutex_type> __lock(_M_ctx._M_mtx);
580 _M_ctx._M_call_stack.push_back(this_thread::get_id());
581 #else
582 _M_ctx._M_run_count++;
583 #endif
584 }
585
586 ~__monitor()
587 {
588 #ifdef _GLIBCXX_HAS_GTHREADS
589 lock_guard<execution_context::mutex_type> __lock(_M_ctx._M_mtx);
590 _M_ctx._M_call_stack.pop_back();
591 #else
592 _M_ctx._M_run_count--;
593 #endif
594 if (_M_ctx._M_outstanding_work() == 0)
595 {
596 _M_ctx._M_stopped = true;
597 _M_ctx._M_reactor._M_notify();
598 }
599 }
600
601 __monitor(__monitor&&) = delete;
602
603 io_context& _M_ctx;
604 };
605
606 bool
607 _M_do_one(chrono::milliseconds __timeout)
608 {
609 const bool __block = __timeout != chrono::milliseconds::zero();
610
611 __reactor::__fdvec __fds;
612
613 __monitor __mon{*this};
614
615 __timer_queue_base* __timerq = nullptr;
616 unique_ptr<__async_operation> __async_op;
617
618 while (true)
619 {
620 if (__timerq)
621 {
622 if (__timerq->run_one())
623 return true;
624 else
625 __timerq = nullptr;
626 }
627
628 if (__async_op)
629 {
630 __async_op->run(*this);
631 // TODO need to unregister __async_op
632 return true;
633 }
634
635 chrono::milliseconds __ms{0};
636
637 {
638 lock_guard<execution_context::mutex_type> __lock(_M_mtx);
639
640 if (_M_stopped)
641 return false;
642
643 // find first timer with something to do
644 for (auto __q : _M_timers)
645 {
646 auto __next = __q->_M_next();
647 if (__next == __next.zero()) // ready to run immediately
648 {
649 __timerq = __q;
650 __ms = __next;
651 break;
652 }
653 else if (__next != __next.max() && __block
654 && (__next < __ms || __timerq == nullptr))
655 {
656 __timerq = __q;
657 __ms = __next;
658 }
659 }
660
661 if (__timerq && __ms == __ms.zero())
662 continue; // restart loop to run a timer immediately
663
664 if (!_M_ops.empty() && _M_ops.front()->_M_is_cancelled())
665 {
666 _M_ops.front().swap(__async_op);
667 _M_ops.pop_front();
668 continue;
669 }
670
671 // TODO run any posted items
672
673 if (__block)
674 {
675 if (__timerq == nullptr)
676 __ms = __timeout;
677 else if (__ms.zero() <= __timeout && __timeout < __ms)
678 __ms = __timeout;
679 else if (__ms.count() > numeric_limits<int>::max())
680 __ms = chrono::milliseconds{numeric_limits<int>::max()};
681 }
682 // else __ms == 0 and poll() will return immediately
683
684 }
685
686 auto __res = _M_reactor.wait(__fds, __ms);
687
688 if (__res == __reactor::_S_retry)
689 continue;
690
691 if (__res == __reactor::_S_timeout)
692 {
693 if (__timerq == nullptr)
694 return false;
695 else
696 continue; // timed out, so restart loop and process the timer
697 }
698
699 __timerq = nullptr;
700
701 if (__fds.empty()) // nothing to do
702 return false;
703
704 lock_guard<execution_context::mutex_type> __lock(_M_mtx);
705 for (auto __it = _M_ops.begin(), __end = _M_ops.end(),
706 __prev = _M_ops.before_begin(); __it != __end; ++__it, ++__prev)
707 {
708 auto& __op = **__it;
709 auto __pos = std::lower_bound(__fds.begin(), __fds.end(),
710 __op._M_fd,
711 [](const auto& __p, int __fd) { return __p.fd < __fd; });
712 if (__pos != __fds.end() && __pos->fd == __op._M_fd
713 && __pos->revents & __op._M_ev)
714 {
715 __it->swap(__async_op);
716 _M_ops.erase_after(__prev);
717 break; // restart loop and run op
718 }
719 }
720 }
721 }
722
723 struct __reactor
724 {
725 #ifdef _GLIBCXX_HAVE_POLL_H
726 __reactor() : _M_fds(1)
727 {
728 int __pipe[2];
729 if (::pipe(__pipe) == -1)
730 __throw_system_error(errno);
731 if (::fcntl(__pipe[0], F_SETFL, O_NONBLOCK) == -1
732 || ::fcntl(__pipe[1], F_SETFL, O_NONBLOCK) == -1)
733 {
734 int __e = errno;
735 ::close(__pipe[0]);
736 ::close(__pipe[1]);
737 __throw_system_error(__e);
738 }
739 _M_fds.back().events = POLLIN;
740 _M_fds.back().fd = __pipe[0];
741 _M_notify_wr = __pipe[1];
742 }
743
744 ~__reactor()
745 {
746 ::close(_M_fds.back().fd);
747 ::close(_M_notify_wr);
748 }
749 #endif
750
751 // write a notification byte to the pipe (ignoring errors)
752 void _M_notify()
753 {
754 int __n;
755 do {
756 __n = ::write(_M_notify_wr, "", 1);
757 } while (__n == -1 && errno == EINTR);
758 }
759
760 // read all notification bytes from the pipe
761 void _M_on_notify()
762 {
763 // Drain the pipe.
764 char __buf[64];
765 ssize_t __n;
766 do {
767 __n = ::read(_M_fds.back().fd, __buf, sizeof(__buf));
768 } while (__n != -1 || errno == EINTR);
769 }
770
771 void
772 _M_add_fd(int __fd)
773 {
774 auto __pos = _M_lower_bound(__fd);
775 if (__pos->fd == __fd)
776 __throw_system_error((int)errc::invalid_argument);
777 _M_fds.insert(__pos, __fdvec::value_type{})->fd = __fd;
778 _M_notify();
779 }
780
781 void
782 _M_remove_fd(int __fd)
783 {
784 auto __pos = _M_lower_bound(__fd);
785 if (__pos->fd == __fd)
786 _M_fds.erase(__pos);
787 // else bug!
788 _M_notify();
789 }
790
791 void
792 _M_fd_interest(int __fd, int __w)
793 {
794 auto __pos = _M_lower_bound(__fd);
795 if (__pos->fd == __fd)
796 __pos->events |= __w;
797 // else bug!
798 _M_notify();
799 }
800
801 void
802 _M_not_interested(int __fd)
803 {
804 auto __pos = _M_lower_bound(__fd);
805 if (__pos->fd == __fd)
806 __pos->events = 0;
807 _M_notify();
808 }
809
810 #ifdef _GLIBCXX_HAVE_POLL_H
811 using __fdvec = vector<::pollfd>;
812 #else
813 struct dummy_pollfd { int fd = -1; short events = 0, revents = 0; };
814 using __fdvec = vector<dummy_pollfd>;
815 #endif
816
817 // Find first element p such that !(p.fd < __fd)
818 // N.B. always returns a dereferencable iterator.
819 __fdvec::iterator
820 _M_lower_bound(int __fd)
821 {
822 return std::lower_bound(_M_fds.begin(), _M_fds.end() - 1,
823 __fd, [](const auto& __p, int __fd) { return __p.fd < __fd; });
824 }
825
826 enum __status { _S_retry, _S_timeout, _S_ok, _S_error };
827
828 __status
829 wait(__fdvec& __fds, chrono::milliseconds __timeout)
830 {
831 #ifdef _GLIBCXX_HAVE_POLL_H
832 // XXX not thread-safe!
833 __fds = _M_fds; // take snapshot to pass to poll()
834
835 int __res = ::poll(__fds.data(), __fds.size(), __timeout.count());
836
837 if (__res == -1)
838 {
839 __fds.clear();
840 if (errno == EINTR)
841 return _S_retry;
842 return _S_error; // XXX ???
843 }
844 else if (__res == 0)
845 {
846 __fds.clear();
847 return _S_timeout;
848 }
849 else if (__fds.back().revents != 0) // something changed, restart
850 {
851 __fds.clear();
852 _M_on_notify();
853 return _S_retry;
854 }
855
856 auto __part = std::stable_partition(__fds.begin(), __fds.end() - 1,
857 [](const __fdvec::value_type& __p) { return __p.revents != 0; });
858 __fds.erase(__part, __fds.end());
859
860 return _S_ok;
861 #else
862 (void) __timeout;
863 __fds.clear();
864 return _S_error;
865 #endif
866 }
867
868 __fdvec _M_fds; // _M_fds.back() is the read end of the self-pipe
869 int _M_notify_wr; // write end of the self-pipe
870 };
871
872 __reactor _M_reactor;
873
874 vector<__timer_queue_base*> _M_timers;
875 forward_list<unique_ptr<__async_operation>> _M_ops;
876
877 #ifdef _GLIBCXX_HAS_GTHREADS
878 vector<thread::id> _M_call_stack;
879 #else
880 int _M_run_count = 0;
881 #endif
882 };
883
884 inline bool
885 operator==(const io_context::executor_type& __a,
886 const io_context::executor_type& __b) noexcept
887 {
888 // https://github.com/chriskohlhoff/asio-tr2/issues/201
889 using executor_type = io_context::executor_type;
890 return std::addressof(executor_type(__a).context())
891 == std::addressof(executor_type(__b).context());
892 }
893
894 inline bool
895 operator!=(const io_context::executor_type& __a,
896 const io_context::executor_type& __b) noexcept
897 { return !(__a == __b); }
898
899 template<> struct is_executor<io_context::executor_type> : true_type {};
900
901 /// @}
902
903 } // namespace v1
904 } // namespace net
905 } // namespace experimental
906 _GLIBCXX_END_NAMESPACE_VERSION
907 } // namespace std
908
909 #endif // C++14
910
911 #endif // _GLIBCXX_EXPERIMENTAL_IO_SERVICE