write.hpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. //
  2. // Copyright (c) 2016-2019 Vinnie Falco (vinnie dot falco at gmail dot com)
  3. //
  4. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  5. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // Official repository: https://github.com/boostorg/beast
  8. //
  9. #ifndef BOOST_BEAST_WEBSOCKET_IMPL_WRITE_HPP
  10. #define BOOST_BEAST_WEBSOCKET_IMPL_WRITE_HPP
  11. #include <boost/beast/websocket/detail/mask.hpp>
  12. #include <boost/beast/core/async_base.hpp>
  13. #include <boost/beast/core/bind_handler.hpp>
  14. #include <boost/beast/core/buffer_traits.hpp>
  15. #include <boost/beast/core/buffers_cat.hpp>
  16. #include <boost/beast/core/buffers_prefix.hpp>
  17. #include <boost/beast/core/buffers_range.hpp>
  18. #include <boost/beast/core/buffers_suffix.hpp>
  19. #include <boost/beast/core/flat_static_buffer.hpp>
  20. #include <boost/beast/core/stream_traits.hpp>
  21. #include <boost/beast/core/detail/bind_continuation.hpp>
  22. #include <boost/beast/core/detail/clamp.hpp>
  23. #include <boost/beast/core/detail/config.hpp>
  24. #include <boost/beast/websocket/detail/frame.hpp>
  25. #include <boost/beast/websocket/impl/stream_impl.hpp>
  26. #include <boost/asio/coroutine.hpp>
  27. #include <boost/assert.hpp>
  28. #include <boost/config.hpp>
  29. #include <boost/throw_exception.hpp>
  30. #include <algorithm>
  31. #include <memory>
  32. namespace boost {
  33. namespace beast {
  34. namespace websocket {
  35. template<class NextLayer, bool deflateSupported>
  36. template<class Handler, class Buffers>
  37. class stream<NextLayer, deflateSupported>::write_some_op
  38. : public beast::async_base<
  39. Handler, beast::executor_type<stream>>
  40. , public asio::coroutine
  41. {
  42. enum
  43. {
  44. do_nomask_nofrag,
  45. do_nomask_frag,
  46. do_mask_nofrag,
  47. do_mask_frag,
  48. do_deflate
  49. };
  50. boost::weak_ptr<impl_type> wp_;
  51. buffers_suffix<Buffers> cb_;
  52. detail::frame_header fh_;
  53. detail::prepared_key key_;
  54. std::size_t bytes_transferred_ = 0;
  55. std::size_t remain_;
  56. std::size_t in_;
  57. int how_;
  58. bool fin_;
  59. bool more_ = false; // for ubsan
  60. bool cont_ = false;
  61. public:
  62. static constexpr int id = 2; // for soft_mutex
  63. template<class Handler_>
  64. write_some_op(
  65. Handler_&& h,
  66. boost::shared_ptr<impl_type> const& sp,
  67. bool fin,
  68. Buffers const& bs)
  69. : beast::async_base<Handler,
  70. beast::executor_type<stream>>(
  71. std::forward<Handler_>(h),
  72. sp->stream().get_executor())
  73. , wp_(sp)
  74. , cb_(bs)
  75. , fin_(fin)
  76. {
  77. auto& impl = *sp;
  78. // Set up the outgoing frame header
  79. if(! impl.wr_cont)
  80. {
  81. impl.begin_msg();
  82. fh_.rsv1 = impl.wr_compress;
  83. }
  84. else
  85. {
  86. fh_.rsv1 = false;
  87. }
  88. fh_.rsv2 = false;
  89. fh_.rsv3 = false;
  90. fh_.op = impl.wr_cont ?
  91. detail::opcode::cont : impl.wr_opcode;
  92. fh_.mask =
  93. impl.role == role_type::client;
  94. // Choose a write algorithm
  95. if(impl.wr_compress)
  96. {
  97. how_ = do_deflate;
  98. }
  99. else if(! fh_.mask)
  100. {
  101. if(! impl.wr_frag)
  102. {
  103. how_ = do_nomask_nofrag;
  104. }
  105. else
  106. {
  107. BOOST_ASSERT(impl.wr_buf_size != 0);
  108. remain_ = buffer_bytes(cb_);
  109. if(remain_ > impl.wr_buf_size)
  110. how_ = do_nomask_frag;
  111. else
  112. how_ = do_nomask_nofrag;
  113. }
  114. }
  115. else
  116. {
  117. if(! impl.wr_frag)
  118. {
  119. how_ = do_mask_nofrag;
  120. }
  121. else
  122. {
  123. BOOST_ASSERT(impl.wr_buf_size != 0);
  124. remain_ = buffer_bytes(cb_);
  125. if(remain_ > impl.wr_buf_size)
  126. how_ = do_mask_frag;
  127. else
  128. how_ = do_mask_nofrag;
  129. }
  130. }
  131. (*this)({}, 0, false);
  132. }
  133. void operator()(
  134. error_code ec = {},
  135. std::size_t bytes_transferred = 0,
  136. bool cont = true);
  137. };
  138. template<class NextLayer, bool deflateSupported>
  139. template<class Buffers, class Handler>
  140. void
  141. stream<NextLayer, deflateSupported>::
  142. write_some_op<Buffers, Handler>::
  143. operator()(
  144. error_code ec,
  145. std::size_t bytes_transferred,
  146. bool cont)
  147. {
  148. using beast::detail::clamp;
  149. std::size_t n;
  150. net::mutable_buffer b;
  151. auto sp = wp_.lock();
  152. if(! sp)
  153. {
  154. ec = net::error::operation_aborted;
  155. bytes_transferred_ = 0;
  156. return this->complete(cont, ec, bytes_transferred_);
  157. }
  158. auto& impl = *sp;
  159. BOOST_ASIO_CORO_REENTER(*this)
  160. {
  161. // Acquire the write lock
  162. if(! impl.wr_block.try_lock(this))
  163. {
  164. do_suspend:
  165. BOOST_ASIO_CORO_YIELD
  166. impl.op_wr.emplace(std::move(*this));
  167. impl.wr_block.lock(this);
  168. BOOST_ASIO_CORO_YIELD
  169. net::post(std::move(*this));
  170. BOOST_ASSERT(impl.wr_block.is_locked(this));
  171. }
  172. if(impl.check_stop_now(ec))
  173. goto upcall;
  174. //------------------------------------------------------------------
  175. if(how_ == do_nomask_nofrag)
  176. {
  177. // send a single frame
  178. fh_.fin = fin_;
  179. fh_.len = buffer_bytes(cb_);
  180. impl.wr_fb.clear();
  181. detail::write<flat_static_buffer_base>(
  182. impl.wr_fb, fh_);
  183. impl.wr_cont = ! fin_;
  184. BOOST_ASIO_CORO_YIELD
  185. net::async_write(impl.stream(),
  186. buffers_cat(impl.wr_fb.data(), cb_),
  187. beast::detail::bind_continuation(std::move(*this)));
  188. bytes_transferred_ += clamp(fh_.len);
  189. if(impl.check_stop_now(ec))
  190. goto upcall;
  191. goto upcall;
  192. }
  193. //------------------------------------------------------------------
  194. if(how_ == do_nomask_frag)
  195. {
  196. // send multiple frames
  197. for(;;)
  198. {
  199. n = clamp(remain_, impl.wr_buf_size);
  200. fh_.len = n;
  201. remain_ -= n;
  202. fh_.fin = fin_ ? remain_ == 0 : false;
  203. impl.wr_fb.clear();
  204. detail::write<flat_static_buffer_base>(
  205. impl.wr_fb, fh_);
  206. impl.wr_cont = ! fin_;
  207. // Send frame
  208. BOOST_ASIO_CORO_YIELD
  209. net::async_write(impl.stream(), buffers_cat(
  210. impl.wr_fb.data(),
  211. buffers_prefix(clamp(fh_.len), cb_)),
  212. beast::detail::bind_continuation(std::move(*this)));
  213. n = clamp(fh_.len); // restore `n` on yield
  214. bytes_transferred_ += n;
  215. if(impl.check_stop_now(ec))
  216. goto upcall;
  217. if(remain_ == 0)
  218. break;
  219. cb_.consume(n);
  220. fh_.op = detail::opcode::cont;
  221. // Give up the write lock in between each frame
  222. // so that outgoing control frames might be sent.
  223. impl.wr_block.unlock(this);
  224. if( impl.op_close.maybe_invoke()
  225. || impl.op_idle_ping.maybe_invoke()
  226. || impl.op_rd.maybe_invoke()
  227. || impl.op_ping.maybe_invoke())
  228. {
  229. BOOST_ASSERT(impl.wr_block.is_locked());
  230. goto do_suspend;
  231. }
  232. impl.wr_block.lock(this);
  233. }
  234. goto upcall;
  235. }
  236. //------------------------------------------------------------------
  237. if(how_ == do_mask_nofrag)
  238. {
  239. // send a single frame using multiple writes
  240. remain_ = beast::buffer_bytes(cb_);
  241. fh_.fin = fin_;
  242. fh_.len = remain_;
  243. fh_.key = impl.create_mask();
  244. detail::prepare_key(key_, fh_.key);
  245. impl.wr_fb.clear();
  246. detail::write<flat_static_buffer_base>(
  247. impl.wr_fb, fh_);
  248. n = clamp(remain_, impl.wr_buf_size);
  249. net::buffer_copy(net::buffer(
  250. impl.wr_buf.get(), n), cb_);
  251. detail::mask_inplace(net::buffer(
  252. impl.wr_buf.get(), n), key_);
  253. remain_ -= n;
  254. impl.wr_cont = ! fin_;
  255. // write frame header and some payload
  256. BOOST_ASIO_CORO_YIELD
  257. net::async_write(impl.stream(), buffers_cat(
  258. impl.wr_fb.data(),
  259. net::buffer(impl.wr_buf.get(), n)),
  260. beast::detail::bind_continuation(std::move(*this)));
  261. // VFALCO What about consuming the buffer on error?
  262. bytes_transferred_ +=
  263. bytes_transferred - impl.wr_fb.size();
  264. if(impl.check_stop_now(ec))
  265. goto upcall;
  266. while(remain_ > 0)
  267. {
  268. cb_.consume(impl.wr_buf_size);
  269. n = clamp(remain_, impl.wr_buf_size);
  270. net::buffer_copy(net::buffer(
  271. impl.wr_buf.get(), n), cb_);
  272. detail::mask_inplace(net::buffer(
  273. impl.wr_buf.get(), n), key_);
  274. remain_ -= n;
  275. // write more payload
  276. BOOST_ASIO_CORO_YIELD
  277. net::async_write(impl.stream(),
  278. net::buffer(impl.wr_buf.get(), n),
  279. beast::detail::bind_continuation(std::move(*this)));
  280. bytes_transferred_ += bytes_transferred;
  281. if(impl.check_stop_now(ec))
  282. goto upcall;
  283. }
  284. goto upcall;
  285. }
  286. //------------------------------------------------------------------
  287. if(how_ == do_mask_frag)
  288. {
  289. // send multiple frames
  290. for(;;)
  291. {
  292. n = clamp(remain_, impl.wr_buf_size);
  293. remain_ -= n;
  294. fh_.len = n;
  295. fh_.key = impl.create_mask();
  296. fh_.fin = fin_ ? remain_ == 0 : false;
  297. detail::prepare_key(key_, fh_.key);
  298. net::buffer_copy(net::buffer(
  299. impl.wr_buf.get(), n), cb_);
  300. detail::mask_inplace(net::buffer(
  301. impl.wr_buf.get(), n), key_);
  302. impl.wr_fb.clear();
  303. detail::write<flat_static_buffer_base>(
  304. impl.wr_fb, fh_);
  305. impl.wr_cont = ! fin_;
  306. // Send frame
  307. BOOST_ASIO_CORO_YIELD
  308. net::async_write(impl.stream(), buffers_cat(
  309. impl.wr_fb.data(),
  310. net::buffer(impl.wr_buf.get(), n)),
  311. beast::detail::bind_continuation(std::move(*this)));
  312. n = bytes_transferred - impl.wr_fb.size();
  313. bytes_transferred_ += n;
  314. if(impl.check_stop_now(ec))
  315. goto upcall;
  316. if(remain_ == 0)
  317. break;
  318. cb_.consume(n);
  319. fh_.op = detail::opcode::cont;
  320. // Give up the write lock in between each frame
  321. // so that outgoing control frames might be sent.
  322. impl.wr_block.unlock(this);
  323. if( impl.op_close.maybe_invoke()
  324. || impl.op_idle_ping.maybe_invoke()
  325. || impl.op_rd.maybe_invoke()
  326. || impl.op_ping.maybe_invoke())
  327. {
  328. BOOST_ASSERT(impl.wr_block.is_locked());
  329. goto do_suspend;
  330. }
  331. impl.wr_block.lock(this);
  332. }
  333. goto upcall;
  334. }
  335. //------------------------------------------------------------------
  336. if(how_ == do_deflate)
  337. {
  338. // send compressed frames
  339. for(;;)
  340. {
  341. b = net::buffer(impl.wr_buf.get(),
  342. impl.wr_buf_size);
  343. more_ = impl.deflate(b, cb_, fin_, in_, ec);
  344. if(impl.check_stop_now(ec))
  345. goto upcall;
  346. n = buffer_bytes(b);
  347. if(n == 0)
  348. {
  349. // The input was consumed, but there is
  350. // no output due to compression latency.
  351. BOOST_ASSERT(! fin_);
  352. BOOST_ASSERT(buffer_bytes(cb_) == 0);
  353. goto upcall;
  354. }
  355. if(fh_.mask)
  356. {
  357. fh_.key = impl.create_mask();
  358. detail::prepared_key key;
  359. detail::prepare_key(key, fh_.key);
  360. detail::mask_inplace(b, key);
  361. }
  362. fh_.fin = ! more_;
  363. fh_.len = n;
  364. impl.wr_fb.clear();
  365. detail::write<
  366. flat_static_buffer_base>(impl.wr_fb, fh_);
  367. impl.wr_cont = ! fin_;
  368. // Send frame
  369. BOOST_ASIO_CORO_YIELD
  370. net::async_write(impl.stream(), buffers_cat(
  371. impl.wr_fb.data(), b),
  372. beast::detail::bind_continuation(std::move(*this)));
  373. bytes_transferred_ += in_;
  374. if(impl.check_stop_now(ec))
  375. goto upcall;
  376. if(more_)
  377. {
  378. fh_.op = detail::opcode::cont;
  379. fh_.rsv1 = false;
  380. // Give up the write lock in between each frame
  381. // so that outgoing control frames might be sent.
  382. impl.wr_block.unlock(this);
  383. if( impl.op_close.maybe_invoke()
  384. || impl.op_idle_ping.maybe_invoke()
  385. || impl.op_rd.maybe_invoke()
  386. || impl.op_ping.maybe_invoke())
  387. {
  388. BOOST_ASSERT(impl.wr_block.is_locked());
  389. goto do_suspend;
  390. }
  391. impl.wr_block.lock(this);
  392. }
  393. else
  394. {
  395. if(fh_.fin)
  396. impl.do_context_takeover_write(impl.role);
  397. goto upcall;
  398. }
  399. }
  400. }
  401. //--------------------------------------------------------------------------
  402. upcall:
  403. impl.wr_block.unlock(this);
  404. impl.op_close.maybe_invoke()
  405. || impl.op_idle_ping.maybe_invoke()
  406. || impl.op_rd.maybe_invoke()
  407. || impl.op_ping.maybe_invoke();
  408. this->complete(cont, ec, bytes_transferred_);
  409. }
  410. }
  411. template<class NextLayer, bool deflateSupported>
  412. struct stream<NextLayer, deflateSupported>::
  413. run_write_some_op
  414. {
  415. template<
  416. class WriteHandler,
  417. class ConstBufferSequence>
  418. void
  419. operator()(
  420. WriteHandler&& h,
  421. boost::shared_ptr<impl_type> const& sp,
  422. bool fin,
  423. ConstBufferSequence const& b)
  424. {
  425. // If you get an error on the following line it means
  426. // that your handler does not meet the documented type
  427. // requirements for the handler.
  428. static_assert(
  429. beast::detail::is_invocable<WriteHandler,
  430. void(error_code, std::size_t)>::value,
  431. "WriteHandler type requirements not met");
  432. write_some_op<
  433. typename std::decay<WriteHandler>::type,
  434. ConstBufferSequence>(
  435. std::forward<WriteHandler>(h),
  436. sp,
  437. fin,
  438. b);
  439. }
  440. };
  441. //------------------------------------------------------------------------------
  442. template<class NextLayer, bool deflateSupported>
  443. template<class ConstBufferSequence>
  444. std::size_t
  445. stream<NextLayer, deflateSupported>::
  446. write_some(bool fin, ConstBufferSequence const& buffers)
  447. {
  448. static_assert(is_sync_stream<next_layer_type>::value,
  449. "SyncStream type requirements not met");
  450. static_assert(net::is_const_buffer_sequence<
  451. ConstBufferSequence>::value,
  452. "ConstBufferSequence type requirements not met");
  453. error_code ec;
  454. auto const bytes_transferred =
  455. write_some(fin, buffers, ec);
  456. if(ec)
  457. BOOST_THROW_EXCEPTION(system_error{ec});
  458. return bytes_transferred;
  459. }
  460. template<class NextLayer, bool deflateSupported>
  461. template<class ConstBufferSequence>
  462. std::size_t
  463. stream<NextLayer, deflateSupported>::
  464. write_some(bool fin,
  465. ConstBufferSequence const& buffers, error_code& ec)
  466. {
  467. static_assert(is_sync_stream<next_layer_type>::value,
  468. "SyncStream type requirements not met");
  469. static_assert(net::is_const_buffer_sequence<
  470. ConstBufferSequence>::value,
  471. "ConstBufferSequence type requirements not met");
  472. using beast::detail::clamp;
  473. auto& impl = *impl_;
  474. std::size_t bytes_transferred = 0;
  475. ec = {};
  476. if(impl.check_stop_now(ec))
  477. return bytes_transferred;
  478. detail::frame_header fh;
  479. if(! impl.wr_cont)
  480. {
  481. impl.begin_msg();
  482. fh.rsv1 = impl.wr_compress;
  483. }
  484. else
  485. {
  486. fh.rsv1 = false;
  487. }
  488. fh.rsv2 = false;
  489. fh.rsv3 = false;
  490. fh.op = impl.wr_cont ?
  491. detail::opcode::cont : impl.wr_opcode;
  492. fh.mask = impl.role == role_type::client;
  493. auto remain = buffer_bytes(buffers);
  494. if(impl.wr_compress)
  495. {
  496. buffers_suffix<
  497. ConstBufferSequence> cb(buffers);
  498. for(;;)
  499. {
  500. auto b = net::buffer(
  501. impl.wr_buf.get(), impl.wr_buf_size);
  502. auto const more = impl.deflate(
  503. b, cb, fin, bytes_transferred, ec);
  504. if(impl.check_stop_now(ec))
  505. return bytes_transferred;
  506. auto const n = buffer_bytes(b);
  507. if(n == 0)
  508. {
  509. // The input was consumed, but there
  510. // is no output due to compression
  511. // latency.
  512. BOOST_ASSERT(! fin);
  513. BOOST_ASSERT(buffer_bytes(cb) == 0);
  514. fh.fin = false;
  515. break;
  516. }
  517. if(fh.mask)
  518. {
  519. fh.key = this->impl_->create_mask();
  520. detail::prepared_key key;
  521. detail::prepare_key(key, fh.key);
  522. detail::mask_inplace(b, key);
  523. }
  524. fh.fin = ! more;
  525. fh.len = n;
  526. detail::fh_buffer fh_buf;
  527. detail::write<
  528. flat_static_buffer_base>(fh_buf, fh);
  529. impl.wr_cont = ! fin;
  530. net::write(impl.stream(),
  531. buffers_cat(fh_buf.data(), b), ec);
  532. if(impl.check_stop_now(ec))
  533. return bytes_transferred;
  534. if(! more)
  535. break;
  536. fh.op = detail::opcode::cont;
  537. fh.rsv1 = false;
  538. }
  539. if(fh.fin)
  540. impl.do_context_takeover_write(impl.role);
  541. }
  542. else if(! fh.mask)
  543. {
  544. if(! impl.wr_frag)
  545. {
  546. // no mask, no autofrag
  547. fh.fin = fin;
  548. fh.len = remain;
  549. detail::fh_buffer fh_buf;
  550. detail::write<
  551. flat_static_buffer_base>(fh_buf, fh);
  552. impl.wr_cont = ! fin;
  553. net::write(impl.stream(),
  554. buffers_cat(fh_buf.data(), buffers), ec);
  555. if(impl.check_stop_now(ec))
  556. return bytes_transferred;
  557. bytes_transferred += remain;
  558. }
  559. else
  560. {
  561. // no mask, autofrag
  562. BOOST_ASSERT(impl.wr_buf_size != 0);
  563. buffers_suffix<
  564. ConstBufferSequence> cb{buffers};
  565. for(;;)
  566. {
  567. auto const n = clamp(remain, impl.wr_buf_size);
  568. remain -= n;
  569. fh.len = n;
  570. fh.fin = fin ? remain == 0 : false;
  571. detail::fh_buffer fh_buf;
  572. detail::write<
  573. flat_static_buffer_base>(fh_buf, fh);
  574. impl.wr_cont = ! fin;
  575. net::write(impl.stream(),
  576. beast::buffers_cat(fh_buf.data(),
  577. beast::buffers_prefix(n, cb)), ec);
  578. bytes_transferred += n;
  579. if(impl.check_stop_now(ec))
  580. return bytes_transferred;
  581. if(remain == 0)
  582. break;
  583. fh.op = detail::opcode::cont;
  584. cb.consume(n);
  585. }
  586. }
  587. }
  588. else if(! impl.wr_frag)
  589. {
  590. // mask, no autofrag
  591. fh.fin = fin;
  592. fh.len = remain;
  593. fh.key = this->impl_->create_mask();
  594. detail::prepared_key key;
  595. detail::prepare_key(key, fh.key);
  596. detail::fh_buffer fh_buf;
  597. detail::write<
  598. flat_static_buffer_base>(fh_buf, fh);
  599. buffers_suffix<
  600. ConstBufferSequence> cb{buffers};
  601. {
  602. auto const n =
  603. clamp(remain, impl.wr_buf_size);
  604. auto const b =
  605. net::buffer(impl.wr_buf.get(), n);
  606. net::buffer_copy(b, cb);
  607. cb.consume(n);
  608. remain -= n;
  609. detail::mask_inplace(b, key);
  610. impl.wr_cont = ! fin;
  611. net::write(impl.stream(),
  612. buffers_cat(fh_buf.data(), b), ec);
  613. bytes_transferred += n;
  614. if(impl.check_stop_now(ec))
  615. return bytes_transferred;
  616. }
  617. while(remain > 0)
  618. {
  619. auto const n =
  620. clamp(remain, impl.wr_buf_size);
  621. auto const b =
  622. net::buffer(impl.wr_buf.get(), n);
  623. net::buffer_copy(b, cb);
  624. cb.consume(n);
  625. remain -= n;
  626. detail::mask_inplace(b, key);
  627. net::write(impl.stream(), b, ec);
  628. bytes_transferred += n;
  629. if(impl.check_stop_now(ec))
  630. return bytes_transferred;
  631. }
  632. }
  633. else
  634. {
  635. // mask, autofrag
  636. BOOST_ASSERT(impl.wr_buf_size != 0);
  637. buffers_suffix<
  638. ConstBufferSequence> cb(buffers);
  639. for(;;)
  640. {
  641. fh.key = this->impl_->create_mask();
  642. detail::prepared_key key;
  643. detail::prepare_key(key, fh.key);
  644. auto const n =
  645. clamp(remain, impl.wr_buf_size);
  646. auto const b =
  647. net::buffer(impl.wr_buf.get(), n);
  648. net::buffer_copy(b, cb);
  649. detail::mask_inplace(b, key);
  650. fh.len = n;
  651. remain -= n;
  652. fh.fin = fin ? remain == 0 : false;
  653. impl.wr_cont = ! fh.fin;
  654. detail::fh_buffer fh_buf;
  655. detail::write<
  656. flat_static_buffer_base>(fh_buf, fh);
  657. net::write(impl.stream(),
  658. buffers_cat(fh_buf.data(), b), ec);
  659. bytes_transferred += n;
  660. if(impl.check_stop_now(ec))
  661. return bytes_transferred;
  662. if(remain == 0)
  663. break;
  664. fh.op = detail::opcode::cont;
  665. cb.consume(n);
  666. }
  667. }
  668. return bytes_transferred;
  669. }
  670. template<class NextLayer, bool deflateSupported>
  671. template<class ConstBufferSequence, class WriteHandler>
  672. BOOST_BEAST_ASYNC_RESULT2(WriteHandler)
  673. stream<NextLayer, deflateSupported>::
  674. async_write_some(bool fin,
  675. ConstBufferSequence const& bs, WriteHandler&& handler)
  676. {
  677. static_assert(is_async_stream<next_layer_type>::value,
  678. "AsyncStream type requirements not met");
  679. static_assert(net::is_const_buffer_sequence<
  680. ConstBufferSequence>::value,
  681. "ConstBufferSequence type requirements not met");
  682. return net::async_initiate<
  683. WriteHandler,
  684. void(error_code, std::size_t)>(
  685. run_write_some_op{},
  686. handler,
  687. impl_,
  688. fin,
  689. bs);
  690. }
  691. //------------------------------------------------------------------------------
  692. template<class NextLayer, bool deflateSupported>
  693. template<class ConstBufferSequence>
  694. std::size_t
  695. stream<NextLayer, deflateSupported>::
  696. write(ConstBufferSequence const& buffers)
  697. {
  698. static_assert(is_sync_stream<next_layer_type>::value,
  699. "SyncStream type requirements not met");
  700. static_assert(net::is_const_buffer_sequence<
  701. ConstBufferSequence>::value,
  702. "ConstBufferSequence type requirements not met");
  703. error_code ec;
  704. auto const bytes_transferred = write(buffers, ec);
  705. if(ec)
  706. BOOST_THROW_EXCEPTION(system_error{ec});
  707. return bytes_transferred;
  708. }
  709. template<class NextLayer, bool deflateSupported>
  710. template<class ConstBufferSequence>
  711. std::size_t
  712. stream<NextLayer, deflateSupported>::
  713. write(ConstBufferSequence const& buffers, error_code& ec)
  714. {
  715. static_assert(is_sync_stream<next_layer_type>::value,
  716. "SyncStream type requirements not met");
  717. static_assert(net::is_const_buffer_sequence<
  718. ConstBufferSequence>::value,
  719. "ConstBufferSequence type requirements not met");
  720. return write_some(true, buffers, ec);
  721. }
  722. template<class NextLayer, bool deflateSupported>
  723. template<class ConstBufferSequence, class WriteHandler>
  724. BOOST_BEAST_ASYNC_RESULT2(WriteHandler)
  725. stream<NextLayer, deflateSupported>::
  726. async_write(
  727. ConstBufferSequence const& bs, WriteHandler&& handler)
  728. {
  729. static_assert(is_async_stream<next_layer_type>::value,
  730. "AsyncStream type requirements not met");
  731. static_assert(net::is_const_buffer_sequence<
  732. ConstBufferSequence>::value,
  733. "ConstBufferSequence type requirements not met");
  734. return net::async_initiate<
  735. WriteHandler,
  736. void(error_code, std::size_t)>(
  737. run_write_some_op{},
  738. handler,
  739. impl_,
  740. true,
  741. bs);
  742. }
  743. } // websocket
  744. } // beast
  745. } // boost
  746. #endif