// -*- C++ -*- // Copyright (C) 2015-2020 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the // terms of the GNU General Public License as published by the // Free Software Foundation; either version 3, or (at your option) // any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // . /** @file experimental/executor * This is a TS C++ Library header. * @ingroup networking-ts */ #ifndef _GLIBCXX_EXPERIMENTAL_EXECUTOR #define _GLIBCXX_EXPERIMENTAL_EXECUTOR 1 #pragma GCC system_header #if __cplusplus >= 201402L #include #include #include #include #include #include #include #include #include #include #include #include #include namespace std _GLIBCXX_VISIBILITY(default) { _GLIBCXX_BEGIN_NAMESPACE_VERSION namespace experimental { namespace net { inline namespace v1 { /** @addtogroup networking-ts * @{ */ /// Customization point for asynchronous operations. template class async_result; /// Convenience utility to help implement asynchronous operations. template class async_completion; template> struct __associated_allocator_impl { using type = _ProtoAlloc; static type _S_get(const _Tp&, const _ProtoAlloc& __a) noexcept { return __a; } }; template struct __associated_allocator_impl<_Tp, _ProtoAlloc, __void_t> { using type = typename _Tp::allocator_type; static type _S_get(const _Tp& __t, const _ProtoAlloc&) noexcept { return __t.get_allocator(); } }; /// Helper to associate an allocator with a type. template> struct associated_allocator : __associated_allocator_impl<_Tp, _ProtoAllocator> { static auto get(const _Tp& __t, const _ProtoAllocator& __a = _ProtoAllocator()) noexcept { using _Impl = __associated_allocator_impl<_Tp, _ProtoAllocator>; return _Impl::_S_get(__t, __a); } }; /// Alias template for associated_allocator. template> using associated_allocator_t = typename associated_allocator<_Tp, _ProtoAllocator>::type; // get_associated_allocator: template inline associated_allocator_t<_Tp> get_associated_allocator(const _Tp& __t) noexcept { return associated_allocator<_Tp>::get(__t); } template inline associated_allocator_t<_Tp, _ProtoAllocator> get_associated_allocator(const _Tp& __t, const _ProtoAllocator& __a) noexcept { return associated_allocator<_Tp, _ProtoAllocator>::get(__t, __a); } enum class fork_event { prepare, parent, child }; /// An extensible, type-safe, polymorphic set of services. class execution_context; class service_already_exists : public logic_error { public: // _GLIBCXX_RESOLVE_LIB_DEFECTS // 3414. service_already_exists has no usable constructors service_already_exists() : logic_error("service already exists") { } }; template struct is_executor; struct executor_arg_t { }; constexpr executor_arg_t executor_arg = executor_arg_t(); /// Trait for determining whether to construct an object with an executor. template struct uses_executor; template> struct __associated_executor_impl { using type = _Executor; static type _S_get(const _Tp&, const _Executor& __e) noexcept { return __e; } }; template struct __associated_executor_impl<_Tp, _Executor, __void_t> { using type = typename _Tp::executor_type; static type _S_get(const _Tp& __t, const _Executor&) noexcept { return __t.get_executor(); } }; /// Helper to associate an executor with a type. template struct associated_executor : __associated_executor_impl<_Tp, _Executor> { static auto get(const _Tp& __t, const _Executor& __e = _Executor()) noexcept { return __associated_executor_impl<_Tp, _Executor>::_S_get(__t, __e); } }; template using associated_executor_t = typename associated_executor<_Tp, _Executor>::type; template using __is_exec_context = is_convertible<_ExecutionContext&, execution_context&>; template using __executor_t = typename _Tp::executor_type; // get_associated_executor: template inline associated_executor_t<_Tp> get_associated_executor(const _Tp& __t) noexcept { return associated_executor<_Tp>::get(__t); } template inline enable_if_t::value, associated_executor_t<_Tp, _Executor>> get_associated_executor(const _Tp& __t, const _Executor& __ex) { return associated_executor<_Tp, _Executor>::get(__t, __ex); } template inline enable_if_t<__is_exec_context<_ExecutionContext>::value, associated_executor_t<_Tp, __executor_t<_ExecutionContext>>> get_associated_executor(const _Tp& __t, _ExecutionContext& __ctx) noexcept { return net::get_associated_executor(__t, __ctx.get_executor()); } /// Helper to bind an executor to an object or function. template class executor_binder; template class async_result, _Signature>; template struct associated_allocator, _ProtoAllocator>; template struct associated_executor, _Executor1>; // bind_executor: template inline enable_if_t::value, executor_binder, _Executor>> bind_executor(const _Executor& __ex, _Tp&& __t) { return { std::forward<_Tp>(__t), __ex }; } template inline enable_if_t<__is_exec_context<_ExecutionContext>::value, executor_binder, __executor_t<_ExecutionContext>>> bind_executor(_ExecutionContext& __ctx, _Tp&& __t) { return { __ctx.get_executor(), forward<_Tp>(__t) }; } /// A scope-guard type to record when work is started and finished. template class executor_work_guard; // make_work_guard: template inline enable_if_t::value, executor_work_guard<_Executor>> make_work_guard(const _Executor& __ex) { return executor_work_guard<_Executor>(__ex); } template inline enable_if_t<__is_exec_context<_ExecutionContext>::value, executor_work_guard<__executor_t<_ExecutionContext>>> make_work_guard(_ExecutionContext& __ctx) { return net::make_work_guard(__ctx.get_executor()); } template inline enable_if_t<__not_<__or_, __is_exec_context<_Tp>>>::value, executor_work_guard>> make_work_guard(const _Tp& __t) { return net::get_associated_executor(__t); } template auto make_work_guard(const _Tp& __t, _Up&& __u) -> decltype(net::make_work_guard( net::get_associated_executor(__t, forward<_Up>(__u)))) { return net::make_work_guard( net::get_associated_executor(__t, forward<_Up>(__u))); } /// Allows function objects to execute on any thread. class system_executor; /// The execution context associated with system_executor objects. class system_context; inline bool operator==(const system_executor&, const system_executor&) { return true; } inline bool operator!=(const system_executor&, const system_executor&) { return false; } /// Exception thrown by empty executors. class bad_executor; /// Polymorphic wrapper for types satisfying the Executor requirements. class executor; bool operator==(const executor&, const executor&) noexcept; bool operator==(const executor&, nullptr_t) noexcept; bool operator==(nullptr_t, const executor&) noexcept; bool operator!=(const executor&, const executor&) noexcept; bool operator!=(const executor&, nullptr_t) noexcept; bool operator!=(nullptr_t, const executor&) noexcept; void swap(executor&, executor&) noexcept; // dispatch: template __deduced_t<_CompletionToken, void()> dispatch(_CompletionToken&& __token); template __deduced_t<_CompletionToken, void()> dispatch(const _Executor& __ex, _CompletionToken&& __token); template __deduced_t<_CompletionToken, void()> dispatch(_ExecutionContext& __ctx, _CompletionToken&& __token); // post: template __deduced_t<_CompletionToken, void()> post(_CompletionToken&& __token); template enable_if_t::value, __deduced_t<_CompletionToken, void()>> post(const _Executor& __ex, _CompletionToken&& __token); template enable_if_t<__is_exec_context<_ExecutionContext>::value, __deduced_t<_CompletionToken, void()>> post(_ExecutionContext& __ctx, _CompletionToken&& __token); // defer: template __deduced_t<_CompletionToken, void()> defer(_CompletionToken&& __token); template __deduced_t<_CompletionToken, void()> defer(const _Executor& __ex, _CompletionToken&& __token); template __deduced_t<_CompletionToken, void()> defer(_ExecutionContext& __ctx, _CompletionToken&& __token); template class strand; template bool operator==(const strand<_Executor>& __a, const strand<_Executor>& __b); template bool operator!=(const strand<_Executor>& __a, const strand<_Executor>& __b) { return !(__a == __b); } template class async_result { public: typedef _CompletionToken completion_handler_type; typedef void return_type; explicit async_result(completion_handler_type&) {} async_result(const async_result&) = delete; async_result& operator=(const async_result&) = delete; return_type get() {} }; template class async_completion { using __result_type = async_result, _Signature>; public: using completion_handler_type = typename __result_type::completion_handler_type; private: using __handler_type = conditional_t< is_same<_CompletionToken, completion_handler_type>::value, completion_handler_type&, completion_handler_type>; public: explicit async_completion(_CompletionToken& __t) : completion_handler(std::forward<__handler_type>(__t)), result(completion_handler) { } async_completion(const async_completion&) = delete; async_completion& operator=(const async_completion&) = delete; __handler_type completion_handler; __result_type result; }; class execution_context { public: class service { protected: // construct / copy / destroy: explicit service(execution_context& __owner) : _M_context(__owner) { } service(const service&) = delete; service& operator=(const service&) = delete; virtual ~service() { } // TODO should not be inline // service observers: execution_context& context() const noexcept { return _M_context; } private: // service operations: virtual void shutdown() noexcept = 0; virtual void notify_fork(fork_event) { } friend class execution_context; execution_context& _M_context; }; // construct / copy / destroy: execution_context() { } execution_context(const execution_context&) = delete; execution_context& operator=(const execution_context&) = delete; virtual ~execution_context() { shutdown(); destroy(); } // execution context operations: void notify_fork(fork_event __e) { auto __l = [=](auto& __svc) { __svc._M_ptr->notify_fork(__e); }; if (__e == fork_event::prepare) std::for_each(_M_services.rbegin(), _M_services.rend(), __l); else std::for_each(_M_services.begin(), _M_services.end(), __l); } protected: // execution context protected operations: void shutdown() { std::for_each(_M_services.rbegin(), _M_services.rend(), [=](auto& __svc) { if (__svc._M_active) { __svc._M_ptr->shutdown(); __svc._M_active = false; } }); } void destroy() { while (_M_services.size()) _M_services.pop_back(); _M_keys.clear(); } protected: template static void _S_deleter(service* __svc) { delete static_cast<_Service*>(__svc); } struct _ServicePtr { template explicit _ServicePtr(_Service* __svc) : _M_ptr(__svc, &_S_deleter<_Service>), _M_active(true) { } std::unique_ptr _M_ptr; bool _M_active; }; mutable std::mutex _M_mutex; // Sorted in order of beginning of service object lifetime. std::list<_ServicePtr> _M_services; template service* _M_add_svc(_Args&&... __args) { _M_services.push_back( _ServicePtr{new _Service{*this, std::forward<_Args>(__args)...}} ); return _M_services.back()._M_ptr.get(); } using __key_type = void(*)(); template static __key_type _S_key() { return reinterpret_cast<__key_type>(&_S_key<_Key>); } std::unordered_map<__key_type, service*> _M_keys; template friend typename _Service::key_type& use_service(execution_context&); template friend _Service& make_service(execution_context&, _Args&&...); template friend bool has_service(const execution_context&) noexcept; }; // service access: template typename _Service::key_type& use_service(execution_context& __ctx) { using _Key = typename _Service::key_type; static_assert(is_base_of::value, "a service type must derive from execution_context::service"); static_assert(is_base_of<_Key, _Service>::value, "a service type must match or derive from its key_type"); auto __key = execution_context::_S_key<_Key>(); std::lock_guard __lock(__ctx._M_mutex); auto& __svc = __ctx._M_keys[__key]; if (__svc == nullptr) { __try { __svc = __ctx._M_add_svc<_Service>(); } __catch(...) { __ctx._M_keys.erase(__key); __throw_exception_again; } } return static_cast<_Key&>(*__svc); } template _Service& make_service(execution_context& __ctx, _Args&&... __args) { using _Key = typename _Service::key_type; static_assert(is_base_of::value, "a service type must derive from execution_context::service"); static_assert(is_base_of<_Key, _Service>::value, "a service type must match or derive from its key_type"); auto __key = execution_context::_S_key<_Key>(); std::lock_guard __lock(__ctx._M_mutex); auto& __svc = __ctx._M_keys[__key]; if (__svc != nullptr) throw service_already_exists(); __try { __svc = __ctx._M_add_svc<_Service>(std::forward<_Args>(__args)...); } __catch(...) { __ctx._M_keys.erase(__key); __throw_exception_again; } return static_cast<_Service&>(*__svc); } template inline bool has_service(const execution_context& __ctx) noexcept { using _Key = typename _Service::key_type; static_assert(is_base_of::value, "a service type must derive from execution_context::service"); static_assert(is_base_of<_Key, _Service>::value, "a service type must match or derive from its key_type"); std::lock_guard __lock(__ctx._M_mutex); return __ctx._M_keys.count(execution_context::_S_key<_Key>()); } template> struct __is_executor_impl : false_type { }; // Check Executor requirements. template> auto __executor_reqs(_Up* __x = 0, const _Up* __cx = 0, void(*__f)() = 0, const allocator& __a = {}) -> enable_if_t<__is_value_constructible<_Tp>::value, __void_t< decltype(*__cx == *__cx), decltype(*__cx != *__cx), decltype(__x->context()), decltype(__x->on_work_started()), decltype(__x->on_work_finished()), decltype(__x->dispatch(std::move(__f), __a)), decltype(__x->post(std::move(__f), __a)), decltype(__x->defer(std::move(__f), __a)) >>; template struct __is_executor_impl<_Tp, decltype(__executor_reqs<_Tp>())> : true_type { }; template struct is_executor : __is_executor_impl<_Tp> { }; template constexpr bool is_executor_v = is_executor<_Tp>::value; template> struct __uses_executor_impl : false_type { }; template struct __uses_executor_impl<_Tp, _Executor, __void_t> : is_convertible<_Executor, typename _Tp::executor_type> { }; template struct uses_executor : __uses_executor_impl<_Tp, _Executor>::type { }; template constexpr bool uses_executor_v = uses_executor<_Tp, _Executor>::value; template class executor_binder { struct __use_exec { }; public: // types: typedef _Tp target_type; typedef _Executor executor_type; // construct / copy / destroy: executor_binder(_Tp __t, const _Executor& __ex) : executor_binder(__use_exec{}, std::move(__t), __ex) { } executor_binder(const executor_binder&) = default; executor_binder(executor_binder&&) = default; template executor_binder(const executor_binder<_Up, _OtherExecutor>& __other) : executor_binder(__use_exec{}, __other.get(), __other.get_executor()) { } template executor_binder(executor_binder<_Up, _OtherExecutor>&& __other) : executor_binder(__use_exec{}, std::move(__other.get()), __other.get_executor()) { } template executor_binder(executor_arg_t, const _Executor& __ex, const executor_binder<_Up, _OtherExecutor>& __other) : executor_binder(__use_exec{}, __other.get(), __ex) { } template executor_binder(executor_arg_t, const _Executor& __ex, executor_binder<_Up, _OtherExecutor>&& __other) : executor_binder(__use_exec{}, std::move(__other.get()), __ex) { } ~executor_binder(); // executor binder access: _Tp& get() noexcept { return _M_target; } const _Tp& get() const noexcept { return _M_target; } executor_type get_executor() const noexcept { return _M_ex; } // executor binder invocation: template result_of_t<_Tp&(_Args&&...)> operator()(_Args&&... __args) { return std::__invoke(get(), std::forward<_Args>(__args)...); } template result_of_t operator()(_Args&&... __args) const { return std::__invoke(get(), std::forward<_Args>(__args)...); } private: template using __use_exec_cond = __and_, is_constructible<_Tp, executor_arg_t, _Executor, _Up>>; template::value>> executor_binder(__use_exec, _Up&& __u, _Exec&& __ex) : _M_ex(std::forward<_Exec>(__ex)), _M_target(executor_arg, _M_ex, std::forward<_Up>(__u)) { } template::value>> executor_binder(__use_exec, _Up&& __u, const _Exec& __ex) : _M_ex(std::forward<_Exec>(__ex)), _M_target(std::forward<_Up>(__u)) { } _Executor _M_ex; _Tp _M_target; }; template class async_result, _Signature> { using __inner = async_result<_Tp, _Signature>; public: using completion_handler_type = executor_binder; using return_type = typename __inner::return_type; explicit async_result(completion_handler_type& __h) : _M_target(__h.get()) { } async_result(const async_result&) = delete; async_result& operator=(const async_result&) = delete; return_type get() { return _M_target.get(); } private: __inner _M_target; }; template struct associated_allocator, _ProtoAlloc> { typedef associated_allocator_t<_Tp, _ProtoAlloc> type; static type get(const executor_binder<_Tp, _Executor>& __b, const _ProtoAlloc& __a = _ProtoAlloc()) noexcept { return associated_allocator<_Tp, _ProtoAlloc>::get(__b.get(), __a); } }; template struct associated_executor, _Executor1> { typedef _Executor type; static type get(const executor_binder<_Tp, _Executor>& __b, const _Executor1& = _Executor1()) noexcept { return __b.get_executor(); } }; template class executor_work_guard { public: // types: typedef _Executor executor_type; // construct / copy / destroy: explicit executor_work_guard(const executor_type& __ex) noexcept : _M_ex(__ex), _M_owns(true) { _M_ex.on_work_started(); } executor_work_guard(const executor_work_guard& __other) noexcept : _M_ex(__other._M_ex), _M_owns(__other._M_owns) { if (_M_owns) _M_ex.on_work_started(); } executor_work_guard(executor_work_guard&& __other) noexcept : _M_ex(__other._M_ex), _M_owns(__other._M_owns) { __other._M_owns = false; } executor_work_guard& operator=(const executor_work_guard&) = delete; ~executor_work_guard() { if (_M_owns) _M_ex.on_work_finished(); } // executor work guard observers: executor_type get_executor() const noexcept { return _M_ex; } bool owns_work() const noexcept { return _M_owns; } // executor work guard modifiers: void reset() noexcept { if (_M_owns) _M_ex.on_work_finished(); _M_owns = false; } private: _Executor _M_ex; bool _M_owns; }; class system_context : public execution_context { public: // types: typedef system_executor executor_type; // construct / copy / destroy: system_context() = delete; system_context(const system_context&) = delete; system_context& operator=(const system_context&) = delete; ~system_context() { stop(); join(); } // system_context operations: executor_type get_executor() noexcept; void stop() { lock_guard __lock(_M_mtx); _M_stopped = true; _M_cv.notify_all(); } bool stopped() const noexcept { lock_guard __lock(_M_mtx); return _M_stopped; } void join() { if (_M_thread.joinable()) _M_thread.join(); } private: friend system_executor; struct __tag { }; system_context(__tag) { } thread _M_thread; mutable mutex _M_mtx; condition_variable _M_cv; queue> _M_tasks; bool _M_stopped = false; void _M_run() { while (true) { function __f; { unique_lock __lock(_M_mtx); _M_cv.wait(__lock, [this]{ return _M_stopped || !_M_tasks.empty(); }); if (_M_stopped) return; __f = std::move(_M_tasks.front()); _M_tasks.pop(); } __f(); } } void _M_post(std::function __f) { lock_guard __lock(_M_mtx); if (_M_stopped) return; if (!_M_thread.joinable()) _M_thread = std::thread(&system_context::_M_run, this); _M_tasks.push(std::move(__f)); // XXX allocator not used _M_cv.notify_one(); } static system_context& _S_get() noexcept { static system_context __sc(__tag{}); return __sc; } }; class system_executor { public: // executor operations: system_executor() { } system_context& context() const noexcept { return system_context::_S_get(); } void on_work_started() const noexcept { } void on_work_finished() const noexcept { } template void dispatch(_Func&& __f, const _ProtoAlloc& __a) const { decay_t<_Func>{std::forward<_Func>(__f)}(); } template void post(_Func&& __f, const _ProtoAlloc&) const // XXX allocator not used { system_context::_S_get()._M_post(std::forward<_Func>(__f)); } template void defer(_Func&& __f, const _ProtoAlloc& __a) const { post(std::forward<_Func>(__f), __a); } }; inline system_executor system_context::get_executor() noexcept { return {}; } class bad_executor : public std::exception { virtual const char* what() const noexcept { return "bad executor"; } }; inline void __throw_bad_executor() // TODO make non-inline { #if __cpp_exceptions throw bad_executor(); #else __builtin_abort(); #endif } class executor { public: // construct / copy / destroy: executor() noexcept = default; executor(nullptr_t) noexcept { } executor(const executor&) noexcept = default; executor(executor&&) noexcept = default; template executor(_Executor __e) : _M_target(make_shared<_Tgt1<_Executor>>(std::move(__e))) { } template executor(allocator_arg_t, const _ProtoAlloc& __a, _Executor __e) : _M_target(allocate_shared<_Tgt2<_Executor, _ProtoAlloc>>(__a, std::move(__e), __a)) { } executor& operator=(const executor&) noexcept = default; executor& operator=(executor&&) noexcept = default; executor& operator=(nullptr_t) noexcept { _M_target = nullptr; return *this; } template executor& operator=(_Executor __e) { executor(std::move(__e)).swap(*this); return *this; } ~executor() = default; // executor modifiers: void swap(executor& __other) noexcept { _M_target.swap(__other._M_target); } template void assign(_Executor __e, const _Alloc& __a) { executor(allocator_arg, __a, std::move(__e)).swap(*this); } // executor operations: execution_context& context() const noexcept { __glibcxx_assert( _M_target ); return _M_target->context(); } void on_work_started() const noexcept { __glibcxx_assert( _M_target ); return _M_target->on_work_started(); } void on_work_finished() const noexcept { __glibcxx_assert( _M_target ); return _M_target->on_work_finished(); } template void dispatch(_Func&& __f, const _Alloc& __a) const { if (!_M_target) __throw_bad_executor(); // _M_target->dispatch({allocator_arg, __a, std::forward<_Func>(__f)}); _M_target->dispatch(std::forward<_Func>(__f)); } template void post(_Func&& __f, const _Alloc& __a) const { if (!_M_target) __throw_bad_executor(); // _M_target->post({allocator_arg, __a, std::forward<_Func>(__f)}); _M_target->post(std::forward<_Func>(__f)); } template void defer(_Func&& __f, const _Alloc& __a) const { if (!_M_target) __throw_bad_executor(); // _M_target->defer({allocator_arg, __a, std::forward<_Func>(__f)}); _M_target->defer(std::forward<_Func>(__f)); } // executor capacity: explicit operator bool() const noexcept { return static_cast(_M_target); } // executor target access: #if __cpp_rtti const type_info& target_type() const noexcept { if (_M_target) return *static_cast(_M_target->target_type()); return typeid(void); } #endif template _Executor* target() noexcept { void* __p = nullptr; if (_M_target) { if (_M_target->_M_func == &_Tgt1>::_S_func) __p = _M_target->_M_func(_M_target.get(), nullptr); #if __cpp_rtti else __p = _M_target->target(&typeid(_Executor)); #endif } return static_cast<_Executor*>(__p); } template const _Executor* target() const noexcept { const void* __p = nullptr; if (_M_target) { if (_M_target->_M_func == &_Tgt1>::_S_func) return (_Executor*)_M_target->_M_func(_M_target.get(), nullptr); #if __cpp_rtti else __p = _M_target->target(&typeid(_Executor)); #endif } return static_cast(__p); } private: struct _Tgt { virtual void on_work_started() const noexcept = 0; virtual void on_work_finished() const noexcept = 0; virtual execution_context& context() const noexcept = 0; virtual void dispatch(std::function) const = 0; virtual void post(std::function) const = 0; virtual void defer(std::function) const = 0; virtual const void* target_type() const noexcept = 0; virtual void* target(const void*) noexcept = 0; virtual bool _M_equals(_Tgt*) const noexcept = 0; using _Func = void* (_Tgt*, const _Tgt*); _Func* _M_func; // Provides access to target without RTTI }; template struct _Tgt1 : _Tgt { explicit _Tgt1(_Ex&& __ex) : _M_ex(std::move(__ex)) { this->_M_func = &_S_func; } void on_work_started() const noexcept override { _M_ex.on_work_started(); } void on_work_finished() const noexcept override { _M_ex.on_work_finished(); } execution_context& context() const noexcept override { return _M_ex.context(); } void dispatch(std::function __f) const override { _M_ex.dispatch(std::move(__f), allocator()); } void post(std::function __f) const override { _M_ex.post(std::move(__f), allocator()); } void defer(std::function __f) const override { _M_ex.defer(std::move(__f), allocator()); } const void* target_type() const noexcept override { #if __cpp_rtti return &typeid(_Ex); #else return nullptr; #endif } void* target(const void* __ti) noexcept override { #if __cpp_rtti if (*static_cast(__ti) == typeid(_Ex)) return std::__addressof(_M_ex); #endif return nullptr; } bool _M_equals(_Tgt* __tgt) const noexcept override { #if __cpp_rtti if (const void* __p = __tgt->target(&typeid(_Ex))) return *static_cast(__p) == _M_ex; #endif return false; } _Ex _M_ex [[__no_unique_address__]]; static void* _S_func(_Tgt* __p, const _Tgt* __q) noexcept { auto& __ex = static_cast<_Tgt1*>(__p)->_M_ex; if (__q) { if (__ex == static_cast(__q)->_M_ex) return __p; else return nullptr; } else return std::__addressof(__ex); } }; template struct _Tgt2 : _Tgt1<_Ex> { explicit _Tgt2(_Ex&& __ex, const _Alloc& __a) : _Tgt1<_Ex>(std::move(__ex)), _M_alloc(__a) { } void dispatch(std::function __f) const override { this->_M_ex.dispatch(std::move(__f), _M_alloc); } void post(std::function __f) const override { this->_M_ex.post(std::move(__f), _M_alloc); } void defer(std::function __f) const override { this->_M_ex.defer(std::move(__f), _M_alloc); } _Alloc _M_alloc [[__no_unique_address__]]; }; // Partial specialization for std::allocator. // Don't store the allocator. template struct _Tgt2<_Ex, std::allocator<_Tp>> : _Tgt1<_Ex> { }; friend bool operator==(const executor& __a, const executor& __b) noexcept { _Tgt* __ta = __a._M_target.get(); _Tgt* __tb = __b._M_target.get(); if (__ta == __tb) return true; if (!__ta || !__tb) return false; if (__ta->_M_func == __tb->_M_func) return __ta->_M_func(__ta, __tb); return __ta->_M_equals(__tb); } shared_ptr<_Tgt> _M_target; }; template<> struct is_executor : true_type { }; /// executor comparisons inline bool operator==(const executor& __e, nullptr_t) noexcept { return !__e; } inline bool operator==(nullptr_t, const executor& __e) noexcept { return !__e; } inline bool operator!=(const executor& __a, const executor& __b) noexcept { return !(__a == __b); } inline bool operator!=(const executor& __e, nullptr_t) noexcept { return (bool)__e; } inline bool operator!=(nullptr_t, const executor& __e) noexcept { return (bool)__e; } /// Swap two executor objects. inline void swap(executor& __a, executor& __b) noexcept { __a.swap(__b); } template struct __dispatcher { explicit __dispatcher(_CompletionHandler& __h) : _M_h(std::move(__h)), _M_w(net::make_work_guard(_M_h)) { } void operator()() { auto __alloc = net::get_associated_allocator(_M_h); _M_w.get_executor().dispatch(std::move(_M_h), __alloc); _M_w.reset(); } _CompletionHandler _M_h; decltype(net::make_work_guard(_M_h)) _M_w; }; template inline __dispatcher<_CompletionHandler> __make_dispatcher(_CompletionHandler& __h) { return __dispatcher<_CompletionHandler>{__h}; } // dispatch: template inline __deduced_t<_CompletionToken, void()> dispatch(_CompletionToken&& __token) { async_completion<_CompletionToken, void()> __cmpl{__token}; auto __ex = net::get_associated_executor(__cmpl.completion_handler); auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); __ex.dispatch(std::move(__cmpl.completion_handler), __alloc); return __cmpl.result.get(); } template inline enable_if_t::value, __deduced_t<_CompletionToken, void()>> dispatch(const _Executor& __ex, _CompletionToken&& __token) { async_completion<_CompletionToken, void()> __cmpl{__token}; auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); __ex.dispatch(net::__make_dispatcher(__cmpl.completion_handler), __alloc); return __cmpl.result.get(); } template inline enable_if_t<__is_exec_context<_ExecutionContext>::value, __deduced_t<_CompletionToken, void()>> dispatch(_ExecutionContext& __ctx, _CompletionToken&& __token) { return net::dispatch(__ctx.get_executor(), forward<_CompletionToken>(__token)); } // post: template inline __deduced_t<_CompletionToken, void()> post(_CompletionToken&& __token) { async_completion<_CompletionToken, void()> __cmpl{__token}; auto __ex = net::get_associated_executor(__cmpl.completion_handler); auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); __ex.post(std::move(__cmpl.completion_handler), __alloc); return __cmpl.result.get(); } template inline enable_if_t::value, __deduced_t<_CompletionToken, void()>> post(const _Executor& __ex, _CompletionToken&& __token) { async_completion<_CompletionToken, void()> __cmpl{__token}; auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); __ex.post(net::__make_dispatcher(__cmpl.completion_handler), __alloc); return __cmpl.result.get(); } template inline enable_if_t<__is_exec_context<_ExecutionContext>::value, __deduced_t<_CompletionToken, void()>> post(_ExecutionContext& __ctx, _CompletionToken&& __token) { return net::post(__ctx.get_executor(), forward<_CompletionToken>(__token)); } // defer: template inline __deduced_t<_CompletionToken, void()> defer(_CompletionToken&& __token) { async_completion<_CompletionToken, void()> __cmpl{__token}; auto __ex = net::get_associated_executor(__cmpl.completion_handler); auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); __ex.defer(std::move(__cmpl.completion_handler), __alloc); return __cmpl.result.get(); } template inline enable_if_t::value, __deduced_t<_CompletionToken, void()>> defer(const _Executor& __ex, _CompletionToken&& __token) { async_completion<_CompletionToken, void()> __cmpl{__token}; auto __alloc = net::get_associated_allocator(__cmpl.completion_handler); __ex.defer(net::__make_dispatcher(__cmpl.completion_handler), __alloc); return __cmpl.result.get(); } template inline enable_if_t<__is_exec_context<_ExecutionContext>::value, __deduced_t<_CompletionToken, void()>> defer(_ExecutionContext& __ctx, _CompletionToken&& __token) { return net::defer(__ctx.get_executor(), forward<_CompletionToken>(__token)); } template class strand { public: // types: typedef _Executor inner_executor_type; // construct / copy / destroy: strand(); // TODO make state explicit strand(_Executor __ex) : _M_inner_ex(__ex) { } // TODO make state template strand(allocator_arg_t, const _Alloc& __a, _Executor __ex) : _M_inner_ex(__ex) { } // TODO make state strand(const strand& __other) noexcept : _M_state(__other._M_state), _M_inner_ex(__other._M_inner_ex) { } strand(strand&& __other) noexcept : _M_state(std::move(__other._M_state)), _M_inner_ex(std::move(__other._M_inner_ex)) { } template strand(const strand<_OtherExecutor>& __other) noexcept : _M_state(__other._M_state), _M_inner_ex(__other._M_inner_ex) { } template strand(strand<_OtherExecutor>&& __other) noexcept : _M_state(std::move(__other._M_state)), _M_inner_ex(std::move(__other._M_inner_ex)) { } strand& operator=(const strand& __other) noexcept { static_assert(is_copy_assignable<_Executor>::value, "inner executor type must be CopyAssignable"); // TODO lock __other // TODO copy state _M_inner_ex = __other._M_inner_ex; return *this; } strand& operator=(strand&& __other) noexcept { static_assert(is_move_assignable<_Executor>::value, "inner executor type must be MoveAssignable"); // TODO move state _M_inner_ex = std::move(__other._M_inner_ex); return *this; } template strand& operator=(const strand<_OtherExecutor>& __other) noexcept { static_assert(is_convertible<_OtherExecutor, _Executor>::value, "inner executor type must be compatible"); // TODO lock __other // TODO copy state _M_inner_ex = __other._M_inner_ex; return *this; } template strand& operator=(strand<_OtherExecutor>&& __other) noexcept { static_assert(is_convertible<_OtherExecutor, _Executor>::value, "inner executor type must be compatible"); // TODO move state _M_inner_ex = std::move(__other._M_inner_ex); return *this; } ~strand() { // the task queue outlives this object if non-empty // TODO create circular ref in queue? } // strand operations: inner_executor_type get_inner_executor() const noexcept { return _M_inner_ex; } bool running_in_this_thread() const noexcept { return std::this_thread::get_id() == _M_state->_M_running_on; } execution_context& context() const noexcept { return _M_inner_ex.context(); } void on_work_started() const noexcept { _M_inner_ex.on_work_started(); } void on_work_finished() const noexcept { _M_inner_ex.on_work_finished(); } template void dispatch(_Func&& __f, const _Alloc& __a) const { if (running_in_this_thread()) decay_t<_Func>{std::forward<_Func>(__f)}(); else post(std::forward<_Func>(__f), __a); } template void post(_Func&& __f, const _Alloc& __a) const; // TODO template void defer(_Func&& __f, const _Alloc& __a) const { post(std::forward<_Func>(__f), __a); } private: friend bool operator==(const strand& __a, const strand& __b) { return __a._M_state == __b._M_state; } // TODO add synchronised queue struct _State { std::thread::id _M_running_on; }; shared_ptr<_State> _M_state; _Executor _M_inner_ex; }; #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1) // Completion token for asynchronous operations initiated with use_future. template struct __use_future_ct { std::tuple<_Func, _Alloc> _M_t; }; template struct __use_future_ct<_Func, std::allocator<_Tp>> { _Func _M_f; }; template> class use_future_t { public: // use_future_t types: using allocator_type = _ProtoAllocator; // use_future_t members: constexpr use_future_t() noexcept(is_nothrow_default_constructible<_ProtoAllocator>::value) : _M_alloc() { } explicit use_future_t(const _ProtoAllocator& __a) noexcept : _M_alloc(__a) { } template use_future_t<_OtherAllocator> rebind(const _OtherAllocator& __a) const noexcept { return use_future_t<_OtherAllocator>(__a); } allocator_type get_allocator() const noexcept { return _M_alloc; } template auto operator()(_Func&& __f) const { using _Token = __use_future_ct, _ProtoAllocator>; return _Token{ {std::forward<_Func>(__f), _M_alloc} }; } private: _ProtoAllocator _M_alloc; }; template class use_future_t> { public: // use_future_t types: using allocator_type = std::allocator<_Tp>; // use_future_t members: constexpr use_future_t() noexcept = default; explicit use_future_t(const allocator_type& __a) noexcept { } template use_future_t> rebind(const std::allocator<_Up>& __a) const noexcept { return use_future_t>(__a); } allocator_type get_allocator() const noexcept { return {}; } template auto operator()(_Func&& __f) const { using _Token = __use_future_ct, allocator_type>; return _Token{std::forward<_Func>(__f)}; } }; constexpr use_future_t<> use_future = use_future_t<>(); template class async_result<__use_future_ct<_Func, _Alloc>, _Res(_Args...)>; template struct __use_future_ex; // Completion handler for asynchronous operations initiated with use_future. template struct __use_future_ch { template explicit __use_future_ch(__use_future_ct<_Func, _Alloc>&& __token) : _M_f{ std::move(std::get<0>(__token._M_t)) }, _M_promise{ std::get<1>(__token._M_t) } { } template explicit __use_future_ch(__use_future_ct<_Func, std::allocator<_Tp>>&& __token) : _M_f{ std::move(__token._M_f) } { } void operator()(_Args&&... __args) { __try { _M_promise.set_value(_M_f(std::forward<_Args>(__args)...)); } __catch(__cxxabiv1::__forced_unwind&) { __throw_exception_again; } __catch(...) { _M_promise.set_exception(std::current_exception()); } } using __result = result_of_t<_Func(decay_t<_Args>...)>; future<__result> get_future() { return _M_promise.get_future(); } private: template friend struct __use_future_ex; _Func _M_f; mutable promise<__result> _M_promise; }; // Specialization of async_result for operations initiated with use_future. template class async_result<__use_future_ct<_Func, _Alloc>, _Res(_Args...)> { public: using completion_handler_type = __use_future_ch<_Func, _Args...>; using return_type = future; explicit async_result(completion_handler_type& __h) : _M_future(__h.get_future()) { } async_result(const async_result&) = delete; async_result& operator=(const async_result&) = delete; return_type get() { return std::move(_M_future); } private: return_type _M_future; }; template struct __use_future_ex { template __use_future_ex(const _Handler& __h, _Executor __ex) : _M_t(__h._M_promise, __ex) { } template void dispatch(_Fn&& __fn) { __try { std::get<1>(_M_t).dispatch(std::forward<_Fn>(__fn)); } __catch(__cxxabiv1::__forced_unwind&) { __throw_exception_again; } __catch(...) { std::get<0>(_M_t).set_exception(std::current_exception()); } } template void post(_Fn&& __fn) { __try { std::get<1>(_M_t).post(std::forward<_Fn>(__fn)); } __catch(__cxxabiv1::__forced_unwind&) { __throw_exception_again; } __catch(...) { std::get<0>(_M_t).set_exception(std::current_exception()); } } template void defer(_Fn&& __fn) { __try { std::get<1>(_M_t).defer(std::forward<_Fn>(__fn)); } __catch(__cxxabiv1::__forced_unwind&) { __throw_exception_again; } __catch(...) { std::get<0>(_M_t).set_exception(std::current_exception()); } } private: tuple&, _Executor> _M_t; }; template struct associated_executor<__use_future_ch<_Func, _Args...>, _Executor> { private: using __handler = __use_future_ch<_Func, _Args...>; using type = __use_future_ex; static type get(const __handler& __h, const _Executor& __ex) { return { __h, __ex }; } }; #if 0 // [async.use.future.traits] template class handler_type, _Ret(_Args...)> // TODO uglify name { template struct __is_error_result : false_type { }; template struct __is_error_result : true_type { }; template struct __is_error_result : true_type { }; static exception_ptr _S_exptr(exception_ptr& __ex) { return std::move(__ex); } static exception_ptr _S_exptr(const error_code& __ec) { return make_exception_ptr(system_error(__ec)); } template struct _Type; // N == 0 template struct _Type<_IsError> { std::promise _M_promise; void operator()() { _M_promise.set_value(); } }; // N == 1, U0 is error_code or exception_ptr template struct _Type { std::promise _M_promise; template void operator()(_Arg0&& __a0) { if (__a0) _M_promise.set_exception(_S_exptr(__a0)); else _M_promise.set_value(); } }; // N == 1, U0 is not error_code or exception_ptr template struct _Type { std::promise<_UArg0> _M_promise; template void operator()(_Arg0&& __a0) { _M_promise.set_value(std::forward<_Arg0>(__a0)); } }; // N == 2, U0 is error_code or exception_ptr template struct _Type { std::promise<_UArg1> _M_promise; template void operator()(_Arg0&& __a0, _Arg1&& __a1) { if (__a0) _M_promise.set_exception(_S_exptr(__a0)); else _M_promise.set_value(std::forward<_Arg1>(__a1)); } }; // N >= 2, U0 is not error_code or exception_ptr template struct _Type { static_assert(sizeof...(_UArgs) > 1, "wrong partial specialization"); std::promise> _M_promise; template void operator()(_Args&&... __args) { _M_promise.set_value( std::forward_as_tuple(std::forward<_Args>(__args)...)); } }; // N > 2, U0 is error_code or exception_ptr template struct _Type { static_assert(sizeof...(_UArgs) > 1, "wrong partial specialization"); std::promise> _M_promise; template void operator()(_Arg0&& __a0, _Args&&... __args) { if (__a0) _M_promise.set_exception(_S_exptr(__a0)); else _M_promise.set_value( std::forward_as_tuple(std::forward<_Args>(__args)...)); } }; public: using type = _Type<__is_error_result<_Args...>::value, decay_t<_Args>...>; }; template struct async_result, _Ret(_Args...)> { using completion_handler_type = typename handler_type, _Ret(_Args...)>::type; using return_type = void; // XXX TODO ???; explicit async_result(completion_handler_type& __h) : _M_handler(__h) { } auto get() { return _M_handler._M_provider.get_future(); } async_result(const async_result&) = delete; async_result& operator=(const async_result&) = delete; return_type get() { return _M_handler._M_promise.get_future(); } private: completion_handler_type& _M_handler; }; // TODO specialize associated_executor for // async_result, Sig>::completion_handler_type // to use a __use_future_ex // (probably need to move _Type outside of handler_type so we don't have // a non-deduced context) #endif // [async.packaged.task.specializations] template class async_result, _Signature> { public: using completion_handler_type = packaged_task<_Ret(_Args...)>; using return_type = future<_Ret>; explicit async_result(completion_handler_type& __h) : _M_future(__h.get_future()) { } async_result(const async_result&) = delete; async_result& operator=(const async_result&) = delete; return_type get() { return std::move(_M_future); } private: return_type _M_future; }; #endif /// @} } // namespace v1 } // namespace net } // namespace experimental template struct uses_allocator : true_type {}; _GLIBCXX_END_NAMESPACE_VERSION } // namespace std #endif // C++14 #endif // _GLIBCXX_EXPERIMENTAL_EXECUTOR