]> git.ipfire.org Git - thirdparty/gcc.git/blame - libstdc++-v3/include/experimental/io_context
Update copyright years.
[thirdparty/gcc.git] / libstdc++-v3 / include / experimental / io_context
CommitLineData
e5989e71
JW
1// <experimental/io_service> -*- C++ -*-
2
8d9254fc 3// Copyright (C) 2015-2020 Free Software Foundation, Inc.
e5989e71
JW
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
3084625d 25/** @file experimental/io_context
e5989e71 26 * This is a TS C++ Library header.
3084625d 27 * @ingroup networking-ts
e5989e71
JW
28 */
29
30#ifndef _GLIBCXX_EXPERIMENTAL_IO_SERVICE
31#define _GLIBCXX_EXPERIMENTAL_IO_SERVICE 1
32
33#pragma GCC system_header
34
35#if __cplusplus >= 201402L
36
37#include <atomic>
38#include <chrono>
39#include <forward_list>
40#include <functional>
41#include <system_error>
42#include <thread>
43#include <experimental/netfwd>
44#include <experimental/executor>
82a0f2fd
JW
45#if _GLIBCXX_HAVE_UNISTD_H
46# include <unistd.h>
47#endif
48#ifdef _GLIBCXX_HAVE_POLL_H
49# include <poll.h>
50#endif
51#ifdef _GLIBCXX_HAVE_FCNTL_H
52# include <fcntl.h>
53#endif
e5989e71
JW
54
55namespace std _GLIBCXX_VISIBILITY(default)
56{
a70a4be9 57_GLIBCXX_BEGIN_NAMESPACE_VERSION
e5989e71
JW
58namespace experimental
59{
60namespace net
61{
62inline namespace v1
63{
e5989e71 64
3084625d
JW
65 /** @addtogroup networking-ts
66 * @{
e5989e71
JW
67 */
68
69 class __socket_impl;
70
71 /// An ExecutionContext for I/O operations.
72 class io_context : public execution_context
73 {
74 public:
75 // types:
76
77 /// An executor for an io_context.
78 class executor_type
79 {
80 public:
81 // construct / copy / destroy:
82
83 executor_type(const executor_type& __other) noexcept = default;
84 executor_type(executor_type&& __other) noexcept = default;
85
86 executor_type& operator=(const executor_type& __other) noexcept = default;
87 executor_type& operator=(executor_type&& __other) noexcept = default;
88
89 // executor operations:
90
91 bool running_in_this_thread() const noexcept
92 {
93 lock_guard<mutex> __lock(_M_ctx->_M_mtx);
94 auto __end = _M_ctx->_M_call_stack.end();
95 return std::find(_M_ctx->_M_call_stack.begin(), __end,
96 this_thread::get_id()) != __end;
97 }
98
99 io_context& context() const noexcept { return *_M_ctx; }
100
101 void on_work_started() const noexcept { ++_M_ctx->_M_work_count; }
102 void on_work_finished() const noexcept { --_M_ctx->_M_work_count; }
103
104 template<typename _Func, typename _ProtoAllocator>
105 void
106 dispatch(_Func&& __f, const _ProtoAllocator& __a) const
107 {
108 if (running_in_this_thread())
109 decay_t<_Func>{std::forward<_Func>(__f)}();
110 else
111 post(std::forward<_Func>(__f), __a);
112 }
113
114 template<typename _Func, typename _ProtoAllocator>
115 void
116 post(_Func&& __f, const _ProtoAllocator& __a) const
117 {
118 lock_guard<mutex> __lock(_M_ctx->_M_mtx);
119 // TODO (re-use functionality in system_context)
120 _M_ctx->_M_reactor._M_notify();
121 }
122
123 template<typename _Func, typename _ProtoAllocator>
124 void
125 defer(_Func&& __f, const _ProtoAllocator& __a) const
126 { post(std::forward<_Func>(__f), __a); }
127
128 private:
129 friend io_context;
130
131 explicit
132 executor_type(io_context& __ctx) : _M_ctx(std::addressof(__ctx)) { }
133
134 io_context* _M_ctx;
135 };
136
137 using count_type = size_t;
138
139 // construct / copy / destroy:
140
141 io_context() : _M_work_count(0) { }
142
143 explicit
144 io_context(int __concurrency_hint) : _M_work_count(0) { }
145
146 io_context(const io_context&) = delete;
147 io_context& operator=(const io_context&) = delete;
148
149 // io_context operations:
150
151 executor_type get_executor() noexcept { return executor_type(*this); }
152
153 count_type
154 run()
155 {
156 count_type __n = 0;
157 while (run_one())
158 if (__n != numeric_limits<count_type>::max())
159 ++__n;
160 return __n;
161 }
162
163 template<typename _Rep, typename _Period>
164 count_type
165 run_for(const chrono::duration<_Rep, _Period>& __rel_time)
166 { return run_until(chrono::steady_clock::now() + __rel_time); }
167
168 template<typename _Clock, typename _Duration>
169 count_type
170 run_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
171 {
172 count_type __n = 0;
173 while (run_one_until(__abs_time))
174 if (__n != numeric_limits<count_type>::max())
175 ++__n;
176 return __n;
177 }
178
179 count_type
180 run_one()
181 { return _M_do_one(chrono::milliseconds{-1}); }
182
183 template<typename _Rep, typename _Period>
184 count_type
185 run_one_for(const chrono::duration<_Rep, _Period>& __rel_time)
186 { return run_one_until(chrono::steady_clock::now() + __rel_time); }
187
188 template<typename _Clock, typename _Duration>
189 count_type
190 run_one_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
191 {
192 auto __now = _Clock::now();
193 while (__now < __abs_time)
194 {
195 using namespace std::chrono;
196 auto __ms = duration_cast<milliseconds>(__abs_time - __now);
197 if (_M_do_one(__ms))
198 return 1;
199 __now = _Clock::now();
200 }
201 return 0;
202 }
203
204 count_type
205 poll()
206 {
207 count_type __n = 0;
208 while (poll_one())
209 if (__n != numeric_limits<count_type>::max())
210 ++__n;
211 return __n;
212 }
213
214 count_type
215 poll_one()
216 { return _M_do_one(chrono::milliseconds{0}); }
217
218 void stop()
219 {
220 lock_guard<mutex> __lock(_M_mtx);
221 _M_stopped = true;
222 _M_reactor._M_notify();
223 }
224
225 bool stopped() const noexcept
226 {
227 lock_guard<mutex> __lock(_M_mtx);
228 return _M_stopped;
229 }
230
231 void restart()
232 {
233 _M_stopped = false;
234 }
235
236 private:
237
238 template<typename _Clock, typename _WaitTraits>
239 friend class basic_waitable_timer;
240
241 friend __socket_impl;
242
243 template<typename _Protocol>
244 friend class __basic_socket_impl;
245
246 template<typename _Protocol>
247 friend class basic_socket;
248
249 template<typename _Protocol>
250 friend class basic_datagram_socket;
251
252 template<typename _Protocol>
253 friend class basic_stream_socket;
254
255 template<typename _Protocol>
256 friend class basic_socket_acceptor;
257
258 count_type
259 _M_outstanding_work() const
260 { return _M_work_count + !_M_ops.empty(); }
261
262 struct __timer_queue_base : execution_context::service
263 {
264 // return milliseconds until next timer expires, or milliseconds::max()
265 virtual chrono::milliseconds _M_next() const = 0;
266 virtual bool run_one() = 0;
267
268 protected:
269 explicit
270 __timer_queue_base(execution_context& __ctx) : service(__ctx)
271 {
272 auto& __ioc = static_cast<io_context&>(__ctx);
273 lock_guard<mutex> __lock(__ioc._M_mtx);
274 __ioc._M_timers.push_back(this);
275 }
276
277 mutable mutex _M_qmtx;
278 };
279
280 template<typename _Timer, typename _Key = typename _Timer::_Key>
281 struct __timer_queue : __timer_queue_base
282 {
283 using key_type = __timer_queue;
284
285 explicit
286 __timer_queue(execution_context& __ctx) : __timer_queue_base(__ctx)
287 { }
288
289 void shutdown() noexcept { }
290
291 io_context& context() noexcept
292 { return static_cast<io_context&>(service::context()); }
293
294 // Start an asynchronous wait.
295 void
296 push(const _Timer& __t, function<void(error_code)> __h)
297 {
298 context().get_executor().on_work_started();
299 lock_guard<mutex> __lock(_M_qmtx);
300 _M_queue.emplace(__t, _M_next_id++, std::move(__h));
301 // no need to notify reactor unless this timer went to the front?
302 }
303
304 // Cancel all outstanding waits for __t
305 size_t
306 cancel(const _Timer& __t)
307 {
308 lock_guard<mutex> __lock(_M_qmtx);
309 size_t __count = 0;
310 auto __last = _M_queue.end();
311 for (auto __it = _M_queue.begin(), __end = __last; __it != __end;
312 ++__it)
313 {
314 if (__it->_M_key == __t._M_key.get())
315 {
316 __it->cancel();
317 __last = __it;
318 ++__count;
319 }
320 }
321 if (__count)
322 _M_queue._M_sort_to(__last);
323 return __count;
324 }
325
326 // Cancel oldest outstanding wait for __t
327 bool
328 cancel_one(const _Timer& __t)
329 {
330 lock_guard<mutex> __lock(_M_qmtx);
331 const auto __end = _M_queue.end();
332 auto __oldest = __end;
333 for (auto __it = _M_queue.begin(); __it != __end; ++__it)
334 if (__it->_M_key == __t._M_key.get())
335 if (__oldest == __end || __it->_M_id < __oldest->_M_id)
336 __oldest = __it;
337 if (__oldest == __end)
338 return false;
339 __oldest->cancel();
340 _M_queue._M_sort_to(__oldest);
341 return true;
342 }
343
344 chrono::milliseconds
345 _M_next() const override
346 {
347 typename _Timer::time_point __exp;
348 {
349 lock_guard<mutex> __lock(_M_qmtx);
350 if (_M_queue.empty())
351 return chrono::milliseconds::max(); // no pending timers
352 if (_M_queue.top()._M_key == nullptr)
353 return chrono::milliseconds::zero(); // cancelled, run now
354 __exp = _M_queue.top()._M_expiry;
355 }
356 auto __dur = _Timer::traits_type::to_wait_duration(__exp);
357 if (__dur < __dur.zero())
358 __dur = __dur.zero();
359 return chrono::duration_cast<chrono::milliseconds>(__dur);
360 }
361
362 private:
363
364 bool run_one() override
365 {
366 auto __now = _Timer::clock_type::now();
367 function<void(error_code)> __h;
368 error_code __ec;
369 {
370 lock_guard<mutex> __lock(_M_qmtx);
371
372 if (_M_queue.top()._M_key == nullptr) // cancelled
373 {
374 __h = std::move(_M_queue.top()._M_h);
375 __ec = std::make_error_code(errc::operation_canceled);
376 _M_queue.pop();
377 }
378 else if (_M_queue.top()._M_expiry <= _Timer::clock_type::now())
379 {
380 __h = std::move(_M_queue.top()._M_h);
381 _M_queue.pop();
382 }
383 }
384 if (__h)
385 {
386 __h(__ec);
387 context().get_executor().on_work_finished();
388 return true;
389 }
390 return false;
391 }
392
393 using __timer_id_type = uint64_t;
394
395 struct __pending_timer
396 {
397 __pending_timer(const _Timer& __t, uint64_t __id,
398 function<void(error_code)> __h)
399 : _M_expiry(__t.expiry()), _M_key(__t._M_key.get()), _M_id(__id),
400 _M_h(std::move(__h))
401 { }
402
403 typename _Timer::time_point _M_expiry;
404 _Key* _M_key;
405 __timer_id_type _M_id;
406 function<void(error_code)> _M_h;
407
408 void cancel() { _M_expiry = _M_expiry.min(); _M_key = nullptr; }
409
410 bool
411 operator<(const __pending_timer& __rhs) const
412 { return _M_expiry < __rhs._M_expiry; }
413 };
414
415 struct __queue : priority_queue<__pending_timer>
416 {
417 using iterator =
418 typename priority_queue<__pending_timer>::container_type::iterator;
419
420 // expose begin/end/erase for direct access to underlying container
421 iterator begin() { return this->c.begin(); }
422 iterator end() { return this->c.end(); }
423 iterator erase(iterator __it) { return this->c.erase(__it); }
424
425 void
426 _M_sort_to(iterator __it)
427 { std::stable_sort(this->c.begin(), ++__it); }
428 };
429
430 __queue _M_queue;
431 __timer_id_type _M_next_id = 0;
432 };
433
434 template<typename _Timer, typename _CompletionHandler>
435 void
436 async_wait(const _Timer& __timer, _CompletionHandler&& __h)
437 {
438 auto& __queue = use_service<__timer_queue<_Timer>>(*this);
439 __queue.push(__timer, std::move(__h));
440 _M_reactor._M_notify();
441 }
442
443 // Cancel all wait operations initiated by __timer.
444 template<typename _Timer>
445 size_t
446 cancel(const _Timer& __timer)
447 {
448 if (!has_service<__timer_queue<_Timer>>(*this))
449 return 0;
450
451 auto __c = use_service<__timer_queue<_Timer>>(*this).cancel(__timer);
452 if (__c != 0)
453 _M_reactor._M_notify();
454 return __c;
455 }
456
457 // Cancel the oldest wait operation initiated by __timer.
458 template<typename _Timer>
459 size_t
460 cancel_one(const _Timer& __timer)
461 {
462 if (!has_service<__timer_queue<_Timer>>(*this))
463 return 0;
464
465 if (use_service<__timer_queue<_Timer>>(*this).cancel_one(__timer))
466 {
467 _M_reactor._M_notify();
468 return 1;
469 }
470 return 0;
471 }
472
473 template<typename _Op>
474 void
475 async_wait(int __fd, int __w, _Op&& __op)
476 {
477 lock_guard<mutex> __lock(_M_mtx);
478 // TODO need push_back, use std::list not std::forward_list
479 auto __tail = _M_ops.before_begin(), __it = _M_ops.begin();
480 while (__it != _M_ops.end())
481 {
482 ++__it;
483 ++__tail;
484 }
485 using __type = __async_operation_impl<_Op>;
486 _M_ops.emplace_after(__tail,
487 make_unique<__type>(std::move(__op), __fd, __w));
488 _M_reactor._M_fd_interest(__fd, __w);
489 }
490
491 void _M_add_fd(int __fd) { _M_reactor._M_add_fd(__fd); }
492 void _M_remove_fd(int __fd) { _M_reactor._M_remove_fd(__fd); }
493
494 void cancel(int __fd, error_code&)
495 {
496 lock_guard<mutex> __lock(_M_mtx);
497 const auto __end = _M_ops.end();
498 auto __it = _M_ops.begin();
499 auto __prev = _M_ops.before_begin();
500 while (__it != __end && (*__it)->_M_is_cancelled())
501 {
502 ++__it;
503 ++__prev;
504 }
505 auto __cancelled = __prev;
506 while (__it != __end)
507 {
508 if ((*__it)->_M_fd == __fd)
509 {
510 (*__it)->cancel();
511 ++__it;
512 _M_ops.splice_after(__cancelled, _M_ops, __prev);
513 ++__cancelled;
514 }
515 else
516 {
517 ++__it;
518 ++__prev;
519 }
520 }
521 _M_reactor._M_not_interested(__fd);
522 }
523
524 struct __async_operation
525 {
526 __async_operation(int __fd, int __ev) : _M_fd(__fd), _M_ev(__ev) { }
527
528 virtual ~__async_operation() = default;
529
530 int _M_fd;
531 short _M_ev;
532
533 void cancel() { _M_fd = -1; }
534 bool _M_is_cancelled() const { return _M_fd == -1; }
535 virtual void run(io_context&) = 0;
536 };
537
538 template<typename _Op>
539 struct __async_operation_impl : __async_operation
540 {
541 __async_operation_impl(_Op&& __op, int __fd, int __ev)
542 : __async_operation{__fd, __ev}, _M_op(std::move(__op)) { }
543
544 _Op _M_op;
545
546 void run(io_context& __ctx)
547 {
548 if (_M_is_cancelled())
549 _M_op(std::make_error_code(errc::operation_canceled));
550 else
551 _M_op(error_code{});
552 }
553 };
554
555 atomic<count_type> _M_work_count;
556 mutable mutex _M_mtx;
557 queue<function<void()>> _M_op;
558 bool _M_stopped = false;
559
560 struct __monitor
561 {
562 __monitor(io_context& __c) : _M_ctx(__c)
563 {
564 lock_guard<mutex> __lock(_M_ctx._M_mtx);
565 _M_ctx._M_call_stack.push_back(this_thread::get_id());
566 }
567
568 ~__monitor()
569 {
570 lock_guard<mutex> __lock(_M_ctx._M_mtx);
571 _M_ctx._M_call_stack.pop_back();
572 if (_M_ctx._M_outstanding_work() == 0)
573 {
574 _M_ctx._M_stopped = true;
575 _M_ctx._M_reactor._M_notify();
576 }
577 }
578
579 __monitor(__monitor&&) = delete;
580
581 io_context& _M_ctx;
582 };
583
584 bool
585 _M_do_one(chrono::milliseconds __timeout)
586 {
587 const bool __block = __timeout != chrono::milliseconds::zero();
588
589 __reactor::__fdvec __fds;
590
591 __monitor __mon{*this};
592
593 __timer_queue_base* __timerq = nullptr;
594 unique_ptr<__async_operation> __async_op;
595
596 while (true)
597 {
598 if (__timerq)
599 {
600 if (__timerq->run_one())
601 return true;
602 else
603 __timerq = nullptr;
604 }
605
606 if (__async_op)
607 {
608 __async_op->run(*this);
609 // TODO need to unregister __async_op
610 return true;
611 }
612
613 chrono::milliseconds __ms{0};
614
615 {
616 lock_guard<mutex> __lock(_M_mtx);
617
618 if (_M_stopped)
619 return false;
620
621 // find first timer with something to do
622 for (auto __q : _M_timers)
623 {
624 auto __next = __q->_M_next();
625 if (__next == __next.zero()) // ready to run immediately
626 {
627 __timerq = __q;
628 __ms = __next;
629 break;
630 }
631 else if (__next != __next.max() && __block
632 && (__next < __ms || __timerq == nullptr))
633 {
634 __timerq = __q;
635 __ms = __next;
636 }
637 }
638
639 if (__timerq && __ms == __ms.zero())
640 continue; // restart loop to run a timer immediately
641
642 if (!_M_ops.empty() && _M_ops.front()->_M_is_cancelled())
643 {
644 _M_ops.front().swap(__async_op);
645 _M_ops.pop_front();
646 continue;
647 }
648
649 // TODO run any posted items
650
651 if (__block)
652 {
653 if (__timerq == nullptr)
654 __ms = __timeout;
655 else if (__ms.zero() <= __timeout && __timeout < __ms)
656 __ms = __timeout;
657 else if (__ms.count() > numeric_limits<int>::max())
658 __ms = chrono::milliseconds{numeric_limits<int>::max()};
659 }
660 // else __ms == 0 and poll() will return immediately
661
662 }
663
664 auto __res = _M_reactor.wait(__fds, __ms);
665
666 if (__res == __reactor::_S_retry)
667 continue;
668
669 if (__res == __reactor::_S_timeout)
670 if (__timerq == nullptr)
671 return false;
672 else
673 continue; // timed out, so restart loop and process the timer
674
675 __timerq = nullptr;
676
677 if (__fds.empty()) // nothing to do
678 return false;
679
680 lock_guard<mutex> __lock(_M_mtx);
681 for (auto __it = _M_ops.begin(), __end = _M_ops.end(),
682 __prev = _M_ops.before_begin(); __it != __end; ++__it, ++__prev)
683 {
684 auto& __op = **__it;
685 auto __pos = std::lower_bound(__fds.begin(), __fds.end(),
686 __op._M_fd,
687 [](const auto& __p, int __fd) { return __p.fd < __fd; });
688 if (__pos != __fds.end() && __pos->fd == __op._M_fd
689 && __pos->revents & __op._M_ev)
690 {
691 __it->swap(__async_op);
692 _M_ops.erase_after(__prev);
693 break; // restart loop and run op
694 }
695 }
696 }
697 }
698
699 struct __reactor
700 {
701 __reactor() : _M_fds(1)
702 {
703 int __pipe[2];
704 if (::pipe(__pipe) == -1)
705 __throw_system_error(errno);
706 if (::fcntl(__pipe[0], F_SETFL, O_NONBLOCK) == -1
707 || ::fcntl(__pipe[1], F_SETFL, O_NONBLOCK) == -1)
708 {
709 int __e = errno;
710 ::close(__pipe[0]);
711 ::close(__pipe[1]);
712 __throw_system_error(__e);
713 }
714 _M_fds.back().events = POLLIN;
715 _M_fds.back().fd = __pipe[0];
716 _M_notify_wr = __pipe[1];
717 }
718
719 ~__reactor()
720 {
721 ::close(_M_fds.back().fd);
722 ::close(_M_notify_wr);
723 }
724
725 // write a notification byte to the pipe (ignoring errors)
726 void _M_notify()
727 {
728 int __n;
729 do {
730 __n = ::write(_M_notify_wr, "", 1);
731 } while (__n == -1 && errno == EINTR);
732 }
733
734 // read all notification bytes from the pipe
735 void _M_on_notify()
736 {
737 // Drain the pipe.
738 char __buf[64];
739 ssize_t __n;
740 do {
741 __n = ::read(_M_fds.back().fd, __buf, sizeof(__buf));
742 } while (__n != -1 || errno == EINTR);
743 }
744
745 void
746 _M_add_fd(int __fd)
747 {
748 auto __pos = _M_lower_bound(__fd);
749 if (__pos->fd == __fd)
750 __throw_system_error((int)errc::invalid_argument);
751 _M_fds.insert(__pos, __fdvec::value_type{})->fd = __fd;
752 _M_notify();
753 }
754
755 void
756 _M_remove_fd(int __fd)
757 {
758 auto __pos = _M_lower_bound(__fd);
759 if (__pos->fd == __fd)
760 _M_fds.erase(__pos);
761 // else bug!
762 _M_notify();
763 }
764
765 void
766 _M_fd_interest(int __fd, int __w)
767 {
768 auto __pos = _M_lower_bound(__fd);
769 if (__pos->fd == __fd)
770 __pos->events |= __w;
771 // else bug!
772 _M_notify();
773 }
774
775 void
776 _M_not_interested(int __fd)
777 {
778 auto __pos = _M_lower_bound(__fd);
779 if (__pos->fd == __fd)
780 __pos->events = 0;
781 _M_notify();
782 }
783
82a0f2fd 784# ifdef _GLIBCXX_HAVE_POLL_H
e5989e71
JW
785 using __fdvec = vector<::pollfd>;
786
787 // Find first element p such that !(p.fd < __fd)
788 // N.B. always returns a dereferencable iterator.
789 __fdvec::iterator
790 _M_lower_bound(int __fd)
791 {
792 return std::lower_bound(_M_fds.begin(), _M_fds.end() - 1,
793 __fd, [](const auto& __p, int __fd) { return __p.fd < __fd; });
794 }
795
796 enum __status { _S_retry, _S_timeout, _S_ok, _S_error };
797
798 __status
799 wait(__fdvec& __fds, chrono::milliseconds __timeout)
800 {
801 // XXX not thread-safe!
802 __fds = _M_fds; // take snapshot to pass to poll()
803
804 int __res = ::poll(__fds.data(), __fds.size(), __timeout.count());
805
806 if (__res == -1)
807 {
808 __fds.clear();
809 if (errno == EINTR)
810 return _S_retry;
811 return _S_error; // XXX ???
812 }
813 else if (__res == 0)
814 {
815 __fds.clear();
816 return _S_timeout;
817 }
818 else if (__fds.back().revents != 0) // something changed, restart
819 {
820 __fds.clear();
821 _M_on_notify();
822 return _S_retry;
823 }
824
825 auto __part = std::stable_partition(__fds.begin(), __fds.end() - 1,
826 [](const __fdvec::value_type& __p) { return __p.revents != 0; });
827 __fds.erase(__part, __fds.end());
828
829 return _S_ok;
830 }
831
832 __fdvec _M_fds; // _M_fds.back() is the read end of the self-pipe
82a0f2fd 833#endif
e5989e71
JW
834 int _M_notify_wr; // write end of the self-pipe
835 };
836
837 __reactor _M_reactor;
838
839 vector<__timer_queue_base*> _M_timers;
840 forward_list<unique_ptr<__async_operation>> _M_ops;
841
842 vector<thread::id> _M_call_stack;
843 };
844
845 inline bool
846 operator==(const io_context::executor_type& __a,
847 const io_context::executor_type& __b) noexcept
848 {
849 // https://github.com/chriskohlhoff/asio-tr2/issues/201
850 using executor_type = io_context::executor_type;
851 return std::addressof(executor_type(__a).context())
852 == std::addressof(executor_type(__b).context());
853 }
854
855 inline bool
856 operator!=(const io_context::executor_type& __a,
857 const io_context::executor_type& __b) noexcept
858 { return !(__a == __b); }
859
860 template<> struct is_executor<io_context::executor_type> : true_type {};
861
862 /// @}
863
e5989e71
JW
864} // namespace v1
865} // namespace net
866} // namespace experimental
a70a4be9 867_GLIBCXX_END_NAMESPACE_VERSION
e5989e71
JW
868} // namespace std
869
870#endif // C++14
871
872#endif // _GLIBCXX_EXPERIMENTAL_IO_SERVICE