gather.hpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. //
  2. // Copyright (c) 2022 Klemens Morgenstern (klemens.morgenstern@gmx.net)
  3. //
  4. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  5. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. #ifndef BOOST_COBALT_DETAIL_GATHER_HPP
  8. #define BOOST_COBALT_DETAIL_GATHER_HPP
  9. #include <boost/cobalt/detail/await_result_helper.hpp>
  10. #include <boost/cobalt/detail/exception.hpp>
  11. #include <boost/cobalt/detail/fork.hpp>
  12. #include <boost/cobalt/detail/forward_cancellation.hpp>
  13. #include <boost/cobalt/detail/util.hpp>
  14. #include <boost/cobalt/detail/wrapper.hpp>
  15. #include <boost/cobalt/task.hpp>
  16. #include <boost/cobalt/this_thread.hpp>
  17. #include <boost/asio/associated_cancellation_slot.hpp>
  18. #include <boost/asio/bind_cancellation_slot.hpp>
  19. #include <boost/asio/cancellation_signal.hpp>
  20. #include <boost/core/ignore_unused.hpp>
  21. #include <boost/intrusive_ptr.hpp>
  22. #include <boost/system/result.hpp>
  23. #include <boost/variant2/variant.hpp>
  24. #include <array>
  25. #include <coroutine>
  26. #include <algorithm>
  27. namespace boost::cobalt::detail
  28. {
  29. template<typename ... Args>
  30. struct gather_variadic_impl
  31. {
  32. using tuple_type = std::tuple<decltype(get_awaitable_type(std::declval<Args&&>()))...>;
  33. gather_variadic_impl(Args && ... args)
  34. : args{std::forward<Args>(args)...}
  35. {
  36. }
  37. std::tuple<Args...> args;
  38. constexpr static std::size_t tuple_size = sizeof...(Args);
  39. struct awaitable : fork::static_shared_state<256 * tuple_size>
  40. {
  41. template<std::size_t ... Idx>
  42. awaitable(std::tuple<Args...> & args, std::index_sequence<Idx...>)
  43. : aws(awaitable_type_getter<Args>(std::get<Idx>(args))...)
  44. {
  45. }
  46. tuple_type aws;
  47. std::array<asio::cancellation_signal, tuple_size> cancel;
  48. template<typename T>
  49. using result_store_part = variant2::variant<
  50. variant2::monostate,
  51. void_as_monostate<co_await_result_t<T>>,
  52. std::exception_ptr>;
  53. std::tuple<result_store_part<Args>...> result;
  54. template<std::size_t Idx>
  55. void interrupt_await_step()
  56. {
  57. using type= std::tuple_element_t<Idx, std::tuple<Args...>>;
  58. using t = std::conditional_t<
  59. std::is_reference_v<std::tuple_element_t<Idx, decltype(aws)>>,
  60. co_awaitable_type<type> &,
  61. co_awaitable_type<type> &&>;
  62. if constexpr (interruptible<t>)
  63. static_cast<t>(std::get<Idx>(aws)).interrupt_await();
  64. }
  65. void interrupt_await()
  66. {
  67. mp11::mp_for_each<mp11::mp_iota_c<sizeof...(Args)>>
  68. ([&](auto idx)
  69. {
  70. interrupt_await_step<idx>();
  71. });
  72. }
  73. // GCC doesn't like member funs
  74. template<std::size_t Idx>
  75. static detail::fork await_impl(awaitable & this_)
  76. try
  77. {
  78. auto & aw = std::get<Idx>(this_.aws);
  79. // check manually if we're ready
  80. auto rd = aw.await_ready();
  81. if (!rd)
  82. {
  83. co_await this_.cancel[Idx].slot();
  84. // make sure the executor is set
  85. co_await detail::fork::wired_up;
  86. // do the await - this doesn't call await-ready again
  87. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  88. {
  89. co_await aw;
  90. std::get<Idx>(this_.result).template emplace<1u>();
  91. }
  92. else
  93. std::get<Idx>(this_.result).template emplace<1u>(co_await aw);
  94. }
  95. else
  96. {
  97. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  98. {
  99. aw.await_resume();
  100. std::get<Idx>(this_.result).template emplace<1u>();
  101. }
  102. else
  103. std::get<Idx>(this_.result).template emplace<1u>(aw.await_resume());
  104. }
  105. }
  106. catch(...)
  107. {
  108. std::get<Idx>(this_.result).template emplace<2u>(std::current_exception());
  109. }
  110. std::array<detail::fork(*)(awaitable&), tuple_size> impls {
  111. []<std::size_t ... Idx>(std::index_sequence<Idx...>)
  112. {
  113. return std::array<detail::fork(*)(awaitable&), tuple_size>{&await_impl<Idx>...};
  114. }(std::make_index_sequence<tuple_size>{})
  115. };
  116. detail::fork last_forked;
  117. std::size_t last_index = 0u;
  118. bool await_ready()
  119. {
  120. while (last_index < tuple_size)
  121. {
  122. last_forked = impls[last_index++](*this);
  123. if (!last_forked.done())
  124. return false; // one coro didn't immediately complete!
  125. }
  126. last_forked.release();
  127. return true;
  128. }
  129. template<typename H>
  130. auto await_suspend(
  131. std::coroutine_handle<H> h
  132. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  133. , const boost::source_location & loc = BOOST_CURRENT_LOCATION
  134. #endif
  135. )
  136. {
  137. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  138. this->loc = loc;
  139. #endif
  140. this->exec = &cobalt::detail::get_executor(h);
  141. last_forked.release().resume();
  142. while (last_index < tuple_size)
  143. impls[last_index++](*this).release();
  144. if (!this->outstanding_work()) // already done, resume rightaway.
  145. return false;
  146. // arm the cancel
  147. assign_cancellation(
  148. h,
  149. [&](asio::cancellation_type ct)
  150. {
  151. for (auto & cs : cancel)
  152. cs.emit(ct);
  153. });
  154. this->coro.reset(h.address());
  155. return true;
  156. }
  157. template<typename T>
  158. using result_part = system::result<co_await_result_t<T>, std::exception_ptr>;
  159. #if _MSC_VER
  160. BOOST_NOINLINE
  161. #endif
  162. std::tuple<result_part<Args> ...> await_resume()
  163. {
  164. return mp11::tuple_transform(
  165. []<typename T>(variant2::variant<variant2::monostate, T, std::exception_ptr> & var)
  166. -> system::result<monostate_as_void<T>, std::exception_ptr>
  167. {
  168. BOOST_ASSERT(var.index() != 0u);
  169. if (var.index() == 1u)
  170. {
  171. if constexpr (std::is_same_v<T, variant2::monostate>)
  172. return {system::in_place_value};
  173. else
  174. return {system::in_place_value, std::move(get<1>(var))};
  175. }
  176. else
  177. return {system::in_place_error, std::move(get<2>(var))};
  178. }
  179. , result);
  180. }
  181. };
  182. awaitable operator co_await() &&
  183. {
  184. return awaitable(args, std::make_index_sequence<sizeof...(Args)>{});
  185. }
  186. };
  187. template<typename Range>
  188. struct gather_ranged_impl
  189. {
  190. Range aws;
  191. using result_type = system::result<
  192. co_await_result_t<std::decay_t<decltype(*std::begin(std::declval<Range>()))>>,
  193. std::exception_ptr>;
  194. using result_storage_type = variant2::variant<
  195. variant2::monostate,
  196. void_as_monostate<
  197. co_await_result_t<std::decay_t<decltype(*std::begin(std::declval<Range>()))>>
  198. >,
  199. std::exception_ptr>;
  200. struct awaitable : fork::shared_state
  201. {
  202. using type = std::decay_t<decltype(*std::begin(std::declval<Range>()))>;
  203. #if !defined(BOOST_COBALT_NO_PMR)
  204. pmr::polymorphic_allocator<void> alloc{&resource};
  205. std::conditional_t<awaitable_type<type>, Range &,
  206. pmr::vector<co_awaitable_type<type>>> aws;
  207. pmr::vector<bool> ready{std::size(aws), alloc};
  208. pmr::vector<asio::cancellation_signal> cancel{std::size(aws), alloc};
  209. pmr::vector<result_storage_type> result{cancel.size(), alloc};
  210. #else
  211. std::allocator<void> alloc{};
  212. std::conditional_t<awaitable_type<type>, Range &,
  213. std::vector<co_awaitable_type<type>>> aws;
  214. std::vector<bool> ready{std::size(aws), alloc};
  215. std::vector<asio::cancellation_signal> cancel{std::size(aws), alloc};
  216. std::vector<result_storage_type> result{cancel.size(), alloc};
  217. #endif
  218. awaitable(Range & aws_, std::false_type /* needs operator co_await */)
  219. : fork::shared_state((512 + sizeof(co_awaitable_type<type>)) * std::size(aws_))
  220. , aws{alloc}
  221. , ready{std::size(aws_), alloc}
  222. , cancel{std::size(aws_), alloc}
  223. {
  224. aws.reserve(std::size(aws_));
  225. for (auto && a : aws_)
  226. {
  227. using a_0 = std::decay_t<decltype(a)>;
  228. using a_t = std::conditional_t<
  229. std::is_lvalue_reference_v<Range>, a_0 &, a_0 &&>;
  230. aws.emplace_back(awaitable_type_getter<a_t>(static_cast<a_t>(a)));
  231. }
  232. std::transform(std::begin(this->aws),
  233. std::end(this->aws),
  234. std::begin(ready),
  235. [](auto & aw) {return aw.await_ready();});
  236. }
  237. awaitable(Range & aws, std::true_type /* needs operator co_await */)
  238. : fork::shared_state((512 + sizeof(co_awaitable_type<type>)) * std::size(aws))
  239. , aws(aws)
  240. {
  241. std::transform(std::begin(aws), std::end(aws), std::begin(ready), [](auto & aw) {return aw.await_ready();});
  242. }
  243. awaitable(Range & aws)
  244. : awaitable(aws, std::bool_constant<awaitable_type<type>>{})
  245. {
  246. }
  247. void interrupt_await()
  248. {
  249. using t = std::conditional_t<std::is_reference_v<Range>,
  250. co_awaitable_type<type> &,
  251. co_awaitable_type<type> &&>;
  252. if constexpr (interruptible<t>)
  253. for (auto & aw : aws)
  254. static_cast<t>(aw).interrupt_await();
  255. }
  256. static detail::fork await_impl(awaitable & this_, std::size_t idx)
  257. try
  258. {
  259. auto & aw = *std::next(std::begin(this_.aws), idx);
  260. auto rd = aw.await_ready();
  261. if (!rd)
  262. {
  263. co_await this_.cancel[idx].slot();
  264. co_await detail::fork::wired_up;
  265. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  266. {
  267. co_await aw;
  268. this_.result[idx].template emplace<1u>();
  269. }
  270. else
  271. this_.result[idx].template emplace<1u>(co_await aw);
  272. }
  273. else
  274. {
  275. if constexpr (std::is_void_v<decltype(aw.await_resume())>)
  276. {
  277. aw.await_resume();
  278. this_.result[idx].template emplace<1u>();
  279. }
  280. else
  281. this_.result[idx].template emplace<1u>(aw.await_resume());
  282. }
  283. }
  284. catch(...)
  285. {
  286. this_.result[idx].template emplace<2u>(std::current_exception());
  287. }
  288. detail::fork last_forked;
  289. std::size_t last_index = 0u;
  290. bool await_ready()
  291. {
  292. while (last_index < cancel.size())
  293. {
  294. last_forked = await_impl(*this, last_index++);
  295. if (!last_forked.done())
  296. return false; // one coro didn't immediately complete!
  297. }
  298. last_forked.release();
  299. return true;
  300. }
  301. template<typename H>
  302. auto await_suspend(
  303. std::coroutine_handle<H> h
  304. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  305. , const boost::source_location & loc = BOOST_CURRENT_LOCATION
  306. #endif
  307. )
  308. {
  309. #if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
  310. this->loc = loc;
  311. #endif
  312. exec = &detail::get_executor(h);
  313. last_forked.release().resume();
  314. while (last_index < cancel.size())
  315. await_impl(*this, last_index++).release();
  316. if (!this->outstanding_work()) // already done, resume rightaway.
  317. return false;
  318. // arm the cancel
  319. assign_cancellation(
  320. h,
  321. [&](asio::cancellation_type ct)
  322. {
  323. for (auto & cs : cancel)
  324. cs.emit(ct);
  325. });
  326. this->coro.reset(h.address());
  327. return true;
  328. }
  329. #if _MSC_VER
  330. BOOST_NOINLINE
  331. #endif
  332. auto await_resume()
  333. {
  334. #if !defined(BOOST_COBALT_NO_PMR)
  335. pmr::vector<result_type> res{result.size(), this_thread::get_allocator()};
  336. #else
  337. std::vector<result_type> res(result.size());
  338. #endif
  339. std::transform(
  340. result.begin(), result.end(), res.begin(),
  341. [](result_storage_type & res) -> result_type
  342. {
  343. BOOST_ASSERT(res.index() != 0u);
  344. if (res.index() == 1u)
  345. {
  346. if constexpr (std::is_void_v<typename result_type::value_type>)
  347. return system::in_place_value;
  348. else
  349. return {system::in_place_value, std::move(get<1u>(res))};
  350. }
  351. else
  352. return {system::in_place_error, get<2u>(res)};
  353. });
  354. return res;
  355. }
  356. };
  357. awaitable operator co_await() && {return awaitable{aws};}
  358. };
  359. }
  360. #endif //BOOST_COBALT_DETAIL_GATHER_HPP