connection_base.hpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /* Copyright (c) 2018-2023 Marcelo Zimbres Silva (mzimbres@gmail.com)
  2. *
  3. * Distributed under the Boost Software License, Version 1.0. (See
  4. * accompanying file LICENSE.txt)
  5. */
  6. #ifndef BOOST_REDIS_CONNECTION_BASE_HPP
  7. #define BOOST_REDIS_CONNECTION_BASE_HPP
  8. #include <boost/redis/adapter/adapt.hpp>
  9. #include <boost/redis/detail/helper.hpp>
  10. #include <boost/redis/error.hpp>
  11. #include <boost/redis/operation.hpp>
  12. #include <boost/redis/request.hpp>
  13. #include <boost/redis/resp3/type.hpp>
  14. #include <boost/redis/config.hpp>
  15. #include <boost/redis/detail/runner.hpp>
  16. #include <boost/redis/usage.hpp>
  17. #include <boost/system.hpp>
  18. #include <boost/asio/basic_stream_socket.hpp>
  19. #include <boost/asio/bind_executor.hpp>
  20. #include <boost/asio/experimental/parallel_group.hpp>
  21. #include <boost/asio/ip/tcp.hpp>
  22. #include <boost/asio/steady_timer.hpp>
  23. #include <boost/asio/write.hpp>
  24. #include <boost/assert.hpp>
  25. #include <boost/core/ignore_unused.hpp>
  26. #include <boost/asio/ssl/stream.hpp>
  27. #include <boost/asio/read_until.hpp>
  28. #include <boost/asio/buffer.hpp>
  29. #include <boost/asio/experimental/channel.hpp>
  30. #include <algorithm>
  31. #include <array>
  32. #include <chrono>
  33. #include <deque>
  34. #include <memory>
  35. #include <string_view>
  36. #include <type_traits>
  37. #include <functional>
  38. namespace boost::redis::detail
  39. {
  40. template <class DynamicBuffer>
  41. std::string_view buffer_view(DynamicBuffer buf) noexcept
  42. {
  43. char const* start = static_cast<char const*>(buf.data(0, buf.size()).data());
  44. return std::string_view{start, std::size(buf)};
  45. }
  46. template <class AsyncReadStream, class DynamicBuffer>
  47. class append_some_op {
  48. private:
  49. AsyncReadStream& stream_;
  50. DynamicBuffer buf_;
  51. std::size_t size_ = 0;
  52. std::size_t tmp_ = 0;
  53. asio::coroutine coro_{};
  54. public:
  55. append_some_op(AsyncReadStream& stream, DynamicBuffer buf, std::size_t size)
  56. : stream_ {stream}
  57. , buf_ {std::move(buf)}
  58. , size_{size}
  59. { }
  60. template <class Self>
  61. void operator()( Self& self
  62. , system::error_code ec = {}
  63. , std::size_t n = 0)
  64. {
  65. BOOST_ASIO_CORO_REENTER (coro_)
  66. {
  67. tmp_ = buf_.size();
  68. buf_.grow(size_);
  69. BOOST_ASIO_CORO_YIELD
  70. stream_.async_read_some(buf_.data(tmp_, size_), std::move(self));
  71. if (ec) {
  72. self.complete(ec, 0);
  73. return;
  74. }
  75. buf_.shrink(buf_.size() - tmp_ - n);
  76. self.complete({}, n);
  77. }
  78. }
  79. };
  80. template <class AsyncReadStream, class DynamicBuffer, class CompletionToken>
  81. auto
  82. async_append_some(
  83. AsyncReadStream& stream,
  84. DynamicBuffer buffer,
  85. std::size_t size,
  86. CompletionToken&& token)
  87. {
  88. return asio::async_compose
  89. < CompletionToken
  90. , void(system::error_code, std::size_t)
  91. >(append_some_op<AsyncReadStream, DynamicBuffer> {stream, buffer, size}, token, stream);
  92. }
  93. template <class Conn>
  94. struct exec_op {
  95. using req_info_type = typename Conn::req_info;
  96. using adapter_type = typename Conn::adapter_type;
  97. Conn* conn_ = nullptr;
  98. std::shared_ptr<req_info_type> info_ = nullptr;
  99. asio::coroutine coro{};
  100. template <class Self>
  101. void operator()(Self& self , system::error_code ec = {})
  102. {
  103. BOOST_ASIO_CORO_REENTER (coro)
  104. {
  105. // Check whether the user wants to wait for the connection to
  106. // be stablished.
  107. if (info_->req_->get_config().cancel_if_not_connected && !conn_->is_open()) {
  108. BOOST_ASIO_CORO_YIELD
  109. asio::post(std::move(self));
  110. return self.complete(error::not_connected, 0);
  111. }
  112. conn_->add_request_info(info_);
  113. EXEC_OP_WAIT:
  114. BOOST_ASIO_CORO_YIELD
  115. info_->async_wait(std::move(self));
  116. BOOST_ASSERT(ec == asio::error::operation_aborted);
  117. if (info_->ec_) {
  118. self.complete(info_->ec_, 0);
  119. return;
  120. }
  121. if (info_->stop_requested()) {
  122. // Don't have to call remove_request as it has already
  123. // been by cancel(exec).
  124. return self.complete(ec, 0);
  125. }
  126. if (is_cancelled(self)) {
  127. if (info_->is_written()) {
  128. using c_t = asio::cancellation_type;
  129. auto const c = self.get_cancellation_state().cancelled();
  130. if ((c & c_t::terminal) != c_t::none) {
  131. // Cancellation requires closing the connection
  132. // otherwise it stays in inconsistent state.
  133. conn_->cancel(operation::run);
  134. return self.complete(ec, 0);
  135. } else {
  136. // Can't implement other cancelation types, ignoring.
  137. self.get_cancellation_state().clear();
  138. // TODO: Find out a better way to ignore
  139. // cancelation.
  140. goto EXEC_OP_WAIT;
  141. }
  142. } else {
  143. // Cancelation can be honored.
  144. conn_->remove_request(info_);
  145. self.complete(ec, 0);
  146. return;
  147. }
  148. }
  149. self.complete(info_->ec_, info_->read_size_);
  150. }
  151. }
  152. };
  153. template <class Conn, class Logger>
  154. struct run_op {
  155. Conn* conn = nullptr;
  156. Logger logger_;
  157. asio::coroutine coro{};
  158. template <class Self>
  159. void operator()( Self& self
  160. , std::array<std::size_t, 2> order = {}
  161. , system::error_code ec0 = {}
  162. , system::error_code ec1 = {})
  163. {
  164. BOOST_ASIO_CORO_REENTER (coro)
  165. {
  166. conn->reset();
  167. BOOST_ASIO_CORO_YIELD
  168. asio::experimental::make_parallel_group(
  169. [this](auto token) { return conn->reader(logger_, token);},
  170. [this](auto token) { return conn->writer(logger_, token);}
  171. ).async_wait(
  172. asio::experimental::wait_for_one(),
  173. std::move(self));
  174. if (is_cancelled(self)) {
  175. logger_.trace("run-op: canceled. Exiting ...");
  176. self.complete(asio::error::operation_aborted);
  177. return;
  178. }
  179. logger_.on_run(ec0, ec1);
  180. switch (order[0]) {
  181. case 0: self.complete(ec0); break;
  182. case 1: self.complete(ec1); break;
  183. default: BOOST_ASSERT(false);
  184. }
  185. }
  186. }
  187. };
  188. template <class Conn, class Logger>
  189. struct writer_op {
  190. Conn* conn_;
  191. Logger logger_;
  192. asio::coroutine coro{};
  193. template <class Self>
  194. void operator()( Self& self
  195. , system::error_code ec = {}
  196. , std::size_t n = 0)
  197. {
  198. ignore_unused(n);
  199. BOOST_ASIO_CORO_REENTER (coro) for (;;)
  200. {
  201. while (conn_->coalesce_requests()) {
  202. if (conn_->use_ssl())
  203. BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
  204. else
  205. BOOST_ASIO_CORO_YIELD asio::async_write(conn_->next_layer().next_layer(), asio::buffer(conn_->write_buffer_), std::move(self));
  206. logger_.on_write(ec, conn_->write_buffer_);
  207. if (ec) {
  208. logger_.trace("writer-op: error. Exiting ...");
  209. conn_->cancel(operation::run);
  210. self.complete(ec);
  211. return;
  212. }
  213. if (is_cancelled(self)) {
  214. logger_.trace("writer-op: canceled. Exiting ...");
  215. self.complete(asio::error::operation_aborted);
  216. return;
  217. }
  218. conn_->on_write();
  219. // A socket.close() may have been called while a
  220. // successful write might had already been queued, so we
  221. // have to check here before proceeding.
  222. if (!conn_->is_open()) {
  223. logger_.trace("writer-op: canceled (2). Exiting ...");
  224. self.complete({});
  225. return;
  226. }
  227. }
  228. BOOST_ASIO_CORO_YIELD
  229. conn_->writer_timer_.async_wait(std::move(self));
  230. if (!conn_->is_open() || is_cancelled(self)) {
  231. logger_.trace("writer-op: canceled (3). Exiting ...");
  232. // Notice this is not an error of the op, stoping was
  233. // requested from the outside, so we complete with
  234. // success.
  235. self.complete({});
  236. return;
  237. }
  238. }
  239. }
  240. };
  241. template <class Conn, class Logger>
  242. struct reader_op {
  243. using parse_result = typename Conn::parse_result;
  244. using parse_ret_type = typename Conn::parse_ret_type;
  245. Conn* conn_;
  246. Logger logger_;
  247. parse_ret_type res_{parse_result::resp, 0};
  248. asio::coroutine coro{};
  249. template <class Self>
  250. void operator()( Self& self
  251. , system::error_code ec = {}
  252. , std::size_t n = 0)
  253. {
  254. ignore_unused(n);
  255. BOOST_ASIO_CORO_REENTER (coro) for (;;)
  256. {
  257. // Appends some data to the buffer if necessary.
  258. if ((res_.first == parse_result::needs_more) || std::empty(conn_->read_buffer_)) {
  259. if (conn_->use_ssl()) {
  260. BOOST_ASIO_CORO_YIELD
  261. async_append_some(
  262. conn_->next_layer(),
  263. conn_->dbuf_,
  264. conn_->get_suggested_buffer_growth(),
  265. std::move(self));
  266. } else {
  267. BOOST_ASIO_CORO_YIELD
  268. async_append_some(
  269. conn_->next_layer().next_layer(),
  270. conn_->dbuf_,
  271. conn_->get_suggested_buffer_growth(),
  272. std::move(self));
  273. }
  274. logger_.on_read(ec, n);
  275. // EOF is not treated as error.
  276. if (ec == asio::error::eof) {
  277. logger_.trace("reader-op: EOF received. Exiting ...");
  278. conn_->cancel(operation::run);
  279. return self.complete({}); // EOFINAE: EOF is not an error.
  280. }
  281. // The connection is not viable after an error.
  282. if (ec) {
  283. logger_.trace("reader-op: error. Exiting ...");
  284. conn_->cancel(operation::run);
  285. self.complete(ec);
  286. return;
  287. }
  288. // Somebody might have canceled implicitly or explicitly
  289. // while we were suspended and after queueing so we have to
  290. // check.
  291. if (!conn_->is_open() || is_cancelled(self)) {
  292. logger_.trace("reader-op: canceled. Exiting ...");
  293. self.complete(ec);
  294. return;
  295. }
  296. }
  297. res_ = conn_->on_read(buffer_view(conn_->dbuf_), ec);
  298. if (ec) {
  299. logger_.trace("reader-op: parse error. Exiting ...");
  300. conn_->cancel(operation::run);
  301. self.complete(ec);
  302. return;
  303. }
  304. if (res_.first == parse_result::push) {
  305. if (!conn_->receive_channel_.try_send(ec, res_.second)) {
  306. BOOST_ASIO_CORO_YIELD
  307. conn_->receive_channel_.async_send(ec, res_.second, std::move(self));
  308. }
  309. if (ec) {
  310. logger_.trace("reader-op: error. Exiting ...");
  311. conn_->cancel(operation::run);
  312. self.complete(ec);
  313. return;
  314. }
  315. if (!conn_->is_open() || is_cancelled(self)) {
  316. logger_.trace("reader-op: canceled (2). Exiting ...");
  317. self.complete(asio::error::operation_aborted);
  318. return;
  319. }
  320. }
  321. }
  322. }
  323. };
  324. /** @brief Base class for high level Redis asynchronous connections.
  325. * @ingroup high-level-api
  326. *
  327. * @tparam Executor The executor type.
  328. *
  329. */
  330. template <class Executor>
  331. class connection_base {
  332. public:
  333. /// Executor type
  334. using executor_type = Executor;
  335. /// Type of the next layer
  336. using next_layer_type = asio::ssl::stream<asio::basic_stream_socket<asio::ip::tcp, Executor>>;
  337. using clock_type = std::chrono::steady_clock;
  338. using clock_traits_type = asio::wait_traits<clock_type>;
  339. using timer_type = asio::basic_waitable_timer<clock_type, clock_traits_type, executor_type>;
  340. using this_type = connection_base<Executor>;
  341. /// Constructs from an executor.
  342. connection_base(
  343. executor_type ex,
  344. asio::ssl::context::method method,
  345. std::size_t max_read_size)
  346. : ctx_{method}
  347. , stream_{std::make_unique<next_layer_type>(ex, ctx_)}
  348. , writer_timer_{ex}
  349. , receive_channel_{ex, 256}
  350. , runner_{ex, {}}
  351. , dbuf_{read_buffer_, max_read_size}
  352. {
  353. set_receive_response(ignore);
  354. writer_timer_.expires_at((std::chrono::steady_clock::time_point::max)());
  355. }
  356. /// Returns the ssl context.
  357. auto const& get_ssl_context() const noexcept
  358. { return ctx_;}
  359. /// Returns the ssl context.
  360. auto& get_ssl_context() noexcept
  361. { return ctx_;}
  362. /// Resets the underlying stream.
  363. void reset_stream()
  364. {
  365. stream_ = std::make_unique<next_layer_type>(writer_timer_.get_executor(), ctx_);
  366. }
  367. /// Returns a reference to the next layer.
  368. auto& next_layer() noexcept { return *stream_; }
  369. /// Returns a const reference to the next layer.
  370. auto const& next_layer() const noexcept { return *stream_; }
  371. /// Returns the associated executor.
  372. auto get_executor() {return writer_timer_.get_executor();}
  373. /// Cancels specific operations.
  374. void cancel(operation op)
  375. {
  376. runner_.cancel(op);
  377. if (op == operation::all) {
  378. cancel_impl(operation::run);
  379. cancel_impl(operation::receive);
  380. cancel_impl(operation::exec);
  381. return;
  382. }
  383. cancel_impl(op);
  384. }
  385. template <class Response, class CompletionToken>
  386. auto async_exec(request const& req, Response& resp, CompletionToken token)
  387. {
  388. using namespace boost::redis::adapter;
  389. auto f = boost_redis_adapt(resp);
  390. BOOST_ASSERT_MSG(req.get_expected_responses() <= f.get_supported_response_size(), "Request and response have incompatible sizes.");
  391. auto info = std::make_shared<req_info>(req, f, get_executor());
  392. return asio::async_compose
  393. < CompletionToken
  394. , void(system::error_code, std::size_t)
  395. >(exec_op<this_type>{this, info}, token, writer_timer_);
  396. }
  397. template <class Response, class CompletionToken>
  398. [[deprecated("Set the response with set_receive_response and use the other overload.")]]
  399. auto async_receive(Response& response, CompletionToken token)
  400. {
  401. set_receive_response(response);
  402. return receive_channel_.async_receive(std::move(token));
  403. }
  404. template <class CompletionToken>
  405. auto async_receive(CompletionToken token)
  406. { return receive_channel_.async_receive(std::move(token)); }
  407. std::size_t receive(system::error_code& ec)
  408. {
  409. std::size_t size = 0;
  410. auto f = [&](system::error_code const& ec2, std::size_t n)
  411. {
  412. ec = ec2;
  413. size = n;
  414. };
  415. auto const res = receive_channel_.try_receive(f);
  416. if (ec)
  417. return 0;
  418. if (!res)
  419. ec = error::sync_receive_push_failed;
  420. return size;
  421. }
  422. template <class Logger, class CompletionToken>
  423. auto async_run(config const& cfg, Logger l, CompletionToken token)
  424. {
  425. runner_.set_config(cfg);
  426. l.set_prefix(runner_.get_config().log_prefix);
  427. return runner_.async_run(*this, l, std::move(token));
  428. }
  429. template <class Response>
  430. void set_receive_response(Response& response)
  431. {
  432. using namespace boost::redis::adapter;
  433. auto g = boost_redis_adapt(response);
  434. receive_adapter_ = adapter::detail::make_adapter_wrapper(g);
  435. }
  436. usage get_usage() const noexcept
  437. { return usage_; }
  438. private:
  439. using receive_channel_type = asio::experimental::channel<executor_type, void(system::error_code, std::size_t)>;
  440. using runner_type = runner<executor_type>;
  441. using adapter_type = std::function<void(std::size_t, resp3::basic_node<std::string_view> const&, system::error_code&)>;
  442. using receiver_adapter_type = std::function<void(resp3::basic_node<std::string_view> const&, system::error_code&)>;
  443. auto use_ssl() const noexcept
  444. { return runner_.get_config().use_ssl;}
  445. auto cancel_on_conn_lost() -> std::size_t
  446. {
  447. // Must return false if the request should be removed.
  448. auto cond = [](auto const& ptr)
  449. {
  450. BOOST_ASSERT(ptr != nullptr);
  451. if (ptr->is_written()) {
  452. return !ptr->req_->get_config().cancel_if_unresponded;
  453. } else {
  454. return !ptr->req_->get_config().cancel_on_connection_lost;
  455. }
  456. };
  457. auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), cond);
  458. auto const ret = std::distance(point, std::end(reqs_));
  459. std::for_each(point, std::end(reqs_), [](auto const& ptr) {
  460. ptr->stop();
  461. });
  462. reqs_.erase(point, std::end(reqs_));
  463. std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
  464. return ptr->reset_status();
  465. });
  466. return ret;
  467. }
  468. auto cancel_unwritten_requests() -> std::size_t
  469. {
  470. auto f = [](auto const& ptr)
  471. {
  472. BOOST_ASSERT(ptr != nullptr);
  473. return ptr->is_written();
  474. };
  475. auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), f);
  476. auto const ret = std::distance(point, std::end(reqs_));
  477. std::for_each(point, std::end(reqs_), [](auto const& ptr) {
  478. ptr->stop();
  479. });
  480. reqs_.erase(point, std::end(reqs_));
  481. return ret;
  482. }
  483. void cancel_impl(operation op)
  484. {
  485. switch (op) {
  486. case operation::exec:
  487. {
  488. cancel_unwritten_requests();
  489. } break;
  490. case operation::run:
  491. {
  492. close();
  493. writer_timer_.cancel();
  494. receive_channel_.cancel();
  495. cancel_on_conn_lost();
  496. } break;
  497. case operation::receive:
  498. {
  499. receive_channel_.cancel();
  500. } break;
  501. default: /* ignore */;
  502. }
  503. }
  504. void on_write()
  505. {
  506. // We have to clear the payload right after writing it to use it
  507. // as a flag that informs there is no ongoing write.
  508. write_buffer_.clear();
  509. // Notice this must come before the for-each below.
  510. cancel_push_requests();
  511. // There is small optimization possible here: traverse only the
  512. // partition of unwritten requests instead of them all.
  513. std::for_each(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
  514. BOOST_ASSERT_MSG(ptr != nullptr, "Expects non-null pointer.");
  515. if (ptr->is_staged())
  516. ptr->mark_written();
  517. });
  518. }
  519. struct req_info {
  520. public:
  521. using node_type = resp3::basic_node<std::string_view>;
  522. using wrapped_adapter_type = std::function<void(node_type const&, system::error_code&)>;
  523. enum class action
  524. {
  525. stop,
  526. proceed,
  527. none,
  528. };
  529. explicit req_info(request const& req, adapter_type adapter, executor_type ex)
  530. : timer_{ex}
  531. , action_{action::none}
  532. , req_{&req}
  533. , adapter_{}
  534. , expected_responses_{req.get_expected_responses()}
  535. , status_{status::none}
  536. , ec_{{}}
  537. , read_size_{0}
  538. {
  539. timer_.expires_at((std::chrono::steady_clock::time_point::max)());
  540. adapter_ = [this, adapter](node_type const& nd, system::error_code& ec)
  541. {
  542. auto const i = req_->get_expected_responses() - expected_responses_;
  543. adapter(i, nd, ec);
  544. };
  545. }
  546. auto proceed()
  547. {
  548. timer_.cancel();
  549. action_ = action::proceed;
  550. }
  551. void stop()
  552. {
  553. timer_.cancel();
  554. action_ = action::stop;
  555. }
  556. [[nodiscard]] auto is_waiting_write() const noexcept
  557. { return !is_written() && !is_staged(); }
  558. [[nodiscard]] auto is_written() const noexcept
  559. { return status_ == status::written; }
  560. [[nodiscard]] auto is_staged() const noexcept
  561. { return status_ == status::staged; }
  562. void mark_written() noexcept
  563. { status_ = status::written; }
  564. void mark_staged() noexcept
  565. { status_ = status::staged; }
  566. void reset_status() noexcept
  567. { status_ = status::none; }
  568. [[nodiscard]] auto stop_requested() const noexcept
  569. { return action_ == action::stop;}
  570. template <class CompletionToken>
  571. auto async_wait(CompletionToken token)
  572. {
  573. return timer_.async_wait(std::move(token));
  574. }
  575. //private:
  576. enum class status
  577. { none
  578. , staged
  579. , written
  580. };
  581. timer_type timer_;
  582. action action_;
  583. request const* req_;
  584. wrapped_adapter_type adapter_;
  585. // Contains the number of commands that haven't been read yet.
  586. std::size_t expected_responses_;
  587. status status_;
  588. system::error_code ec_;
  589. std::size_t read_size_;
  590. };
  591. void remove_request(std::shared_ptr<req_info> const& info)
  592. {
  593. reqs_.erase(std::remove(std::begin(reqs_), std::end(reqs_), info));
  594. }
  595. using reqs_type = std::deque<std::shared_ptr<req_info>>;
  596. template <class, class> friend struct reader_op;
  597. template <class, class> friend struct writer_op;
  598. template <class, class> friend struct run_op;
  599. template <class> friend struct exec_op;
  600. template <class, class, class> friend struct run_all_op;
  601. void cancel_push_requests()
  602. {
  603. auto point = std::stable_partition(std::begin(reqs_), std::end(reqs_), [](auto const& ptr) {
  604. return !(ptr->is_staged() && ptr->req_->get_expected_responses() == 0);
  605. });
  606. std::for_each(point, std::end(reqs_), [](auto const& ptr) {
  607. ptr->proceed();
  608. });
  609. reqs_.erase(point, std::end(reqs_));
  610. }
  611. [[nodiscard]] bool is_writing() const noexcept
  612. {
  613. return !write_buffer_.empty();
  614. }
  615. void add_request_info(std::shared_ptr<req_info> const& info)
  616. {
  617. reqs_.push_back(info);
  618. if (info->req_->has_hello_priority()) {
  619. auto rend = std::partition_point(std::rbegin(reqs_), std::rend(reqs_), [](auto const& e) {
  620. return e->is_waiting_write();
  621. });
  622. std::rotate(std::rbegin(reqs_), std::rbegin(reqs_) + 1, rend);
  623. }
  624. if (is_open() && !is_writing())
  625. writer_timer_.cancel();
  626. }
  627. template <class CompletionToken, class Logger>
  628. auto reader(Logger l, CompletionToken&& token)
  629. {
  630. return asio::async_compose
  631. < CompletionToken
  632. , void(system::error_code)
  633. >(reader_op<this_type, Logger>{this, l}, token, writer_timer_);
  634. }
  635. template <class CompletionToken, class Logger>
  636. auto writer(Logger l, CompletionToken&& token)
  637. {
  638. return asio::async_compose
  639. < CompletionToken
  640. , void(system::error_code)
  641. >(writer_op<this_type, Logger>{this, l}, token, writer_timer_);
  642. }
  643. template <class Logger, class CompletionToken>
  644. auto async_run_lean(config const& cfg, Logger l, CompletionToken token)
  645. {
  646. runner_.set_config(cfg);
  647. l.set_prefix(runner_.get_config().log_prefix);
  648. return asio::async_compose
  649. < CompletionToken
  650. , void(system::error_code)
  651. >(run_op<this_type, Logger>{this, l}, token, writer_timer_);
  652. }
  653. [[nodiscard]] bool coalesce_requests()
  654. {
  655. // Coalesces the requests and marks them staged. After a
  656. // successful write staged requests will be marked as written.
  657. auto const point = std::partition_point(std::cbegin(reqs_), std::cend(reqs_), [](auto const& ri) {
  658. return !ri->is_waiting_write();
  659. });
  660. std::for_each(point, std::cend(reqs_), [this](auto const& ri) {
  661. // Stage the request.
  662. write_buffer_ += ri->req_->payload();
  663. ri->mark_staged();
  664. usage_.commands_sent += ri->expected_responses_;
  665. });
  666. usage_.bytes_sent += std::size(write_buffer_);
  667. return point != std::cend(reqs_);
  668. }
  669. bool is_waiting_response() const noexcept
  670. {
  671. return !std::empty(reqs_) && reqs_.front()->is_written();
  672. }
  673. void close()
  674. {
  675. if (stream_->next_layer().is_open()) {
  676. system::error_code ec;
  677. stream_->next_layer().close(ec);
  678. }
  679. }
  680. auto is_open() const noexcept { return stream_->next_layer().is_open(); }
  681. auto& lowest_layer() noexcept { return stream_->lowest_layer(); }
  682. auto is_next_push()
  683. {
  684. // We handle unsolicited events in the following way
  685. //
  686. // 1. Its resp3 type is a push.
  687. //
  688. // 2. A non-push type is received with an empty requests
  689. // queue. I have noticed this is possible (e.g. -MISCONF).
  690. // I expect them to have type push so we can distinguish
  691. // them from responses to commands, but it is a
  692. // simple-error. If we are lucky enough to receive them
  693. // when the command queue is empty we can treat them as
  694. // server pushes, otherwise it is impossible to handle
  695. // them properly
  696. //
  697. // 3. The request does not expect any response but we got
  698. // one. This may happen if for example, subscribe with
  699. // wrong syntax.
  700. //
  701. // Useful links:
  702. //
  703. // - https://github.com/redis/redis/issues/11784
  704. // - https://github.com/redis/redis/issues/6426
  705. //
  706. BOOST_ASSERT(!read_buffer_.empty());
  707. return
  708. (resp3::to_type(read_buffer_.front()) == resp3::type::push)
  709. || reqs_.empty()
  710. || (!reqs_.empty() && reqs_.front()->expected_responses_ == 0)
  711. || !is_waiting_response(); // Added to deal with MONITOR.
  712. }
  713. auto get_suggested_buffer_growth() const noexcept
  714. {
  715. return parser_.get_suggested_buffer_growth(4096);
  716. }
  717. enum class parse_result { needs_more, push, resp };
  718. using parse_ret_type = std::pair<parse_result, std::size_t>;
  719. parse_ret_type on_finish_parsing(parse_result t)
  720. {
  721. if (t == parse_result::push) {
  722. usage_.pushes_received += 1;
  723. usage_.push_bytes_received += parser_.get_consumed();
  724. } else {
  725. usage_.responses_received += 1;
  726. usage_.response_bytes_received += parser_.get_consumed();
  727. }
  728. on_push_ = false;
  729. dbuf_.consume(parser_.get_consumed());
  730. auto const res = std::make_pair(t, parser_.get_consumed());
  731. parser_.reset();
  732. return res;
  733. }
  734. parse_ret_type on_read(std::string_view data, system::error_code& ec)
  735. {
  736. // We arrive here in two states:
  737. //
  738. // 1. While we are parsing a message. In this case we
  739. // don't want to determine the type of the message in the
  740. // buffer (i.e. response vs push) but leave it untouched
  741. // until the parsing of a complete message ends.
  742. //
  743. // 2. On a new message, in which case we have to determine
  744. // whether the next messag is a push or a response.
  745. //
  746. if (!on_push_) // Prepare for new message.
  747. on_push_ = is_next_push();
  748. if (on_push_) {
  749. if (!resp3::parse(parser_, data, receive_adapter_, ec))
  750. return std::make_pair(parse_result::needs_more, 0);
  751. if (ec)
  752. return std::make_pair(parse_result::push, 0);
  753. return on_finish_parsing(parse_result::push);
  754. }
  755. BOOST_ASSERT_MSG(is_waiting_response(), "Not waiting for a response (using MONITOR command perhaps?)");
  756. BOOST_ASSERT(!reqs_.empty());
  757. BOOST_ASSERT(reqs_.front() != nullptr);
  758. BOOST_ASSERT(reqs_.front()->expected_responses_ != 0);
  759. if (!resp3::parse(parser_, data, reqs_.front()->adapter_, ec))
  760. return std::make_pair(parse_result::needs_more, 0);
  761. if (ec) {
  762. reqs_.front()->ec_ = ec;
  763. reqs_.front()->proceed();
  764. return std::make_pair(parse_result::resp, 0);
  765. }
  766. reqs_.front()->read_size_ += parser_.get_consumed();
  767. if (--reqs_.front()->expected_responses_ == 0) {
  768. // Done with this request.
  769. reqs_.front()->proceed();
  770. reqs_.pop_front();
  771. }
  772. return on_finish_parsing(parse_result::resp);
  773. }
  774. void reset()
  775. {
  776. write_buffer_.clear();
  777. read_buffer_.clear();
  778. parser_.reset();
  779. on_push_ = false;
  780. }
  781. asio::ssl::context ctx_;
  782. std::unique_ptr<next_layer_type> stream_;
  783. // Notice we use a timer to simulate a condition-variable. It is
  784. // also more suitable than a channel and the notify operation does
  785. // not suspend.
  786. timer_type writer_timer_;
  787. receive_channel_type receive_channel_;
  788. runner_type runner_;
  789. receiver_adapter_type receive_adapter_;
  790. using dyn_buffer_type = asio::dynamic_string_buffer<char, std::char_traits<char>, std::allocator<char>>;
  791. std::string read_buffer_;
  792. dyn_buffer_type dbuf_;
  793. std::string write_buffer_;
  794. reqs_type reqs_;
  795. resp3::parser parser_{};
  796. bool on_push_ = false;
  797. usage usage_;
  798. };
  799. } // boost::redis::detail
  800. #endif // BOOST_REDIS_CONNECTION_BASE_HPP