concurrent_table.hpp 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743
  1. /* Fast open-addressing concurrent hash table.
  2. *
  3. * Copyright 2023 Joaquin M Lopez Munoz.
  4. * Distributed under the Boost Software License, Version 1.0.
  5. * (See accompanying file LICENSE_1_0.txt or copy at
  6. * http://www.boost.org/LICENSE_1_0.txt)
  7. *
  8. * See https://www.boost.org/libs/unordered for library home page.
  9. */
  10. #ifndef BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
  11. #define BOOST_UNORDERED_DETAIL_FOA_CONCURRENT_TABLE_HPP
  12. #include <atomic>
  13. #include <boost/assert.hpp>
  14. #include <boost/config.hpp>
  15. #include <boost/core/ignore_unused.hpp>
  16. #include <boost/core/no_exceptions_support.hpp>
  17. #include <boost/core/serialization.hpp>
  18. #include <boost/cstdint.hpp>
  19. #include <boost/mp11/tuple.hpp>
  20. #include <boost/throw_exception.hpp>
  21. #include <boost/unordered/detail/archive_constructed.hpp>
  22. #include <boost/unordered/detail/bad_archive_exception.hpp>
  23. #include <boost/unordered/detail/foa/core.hpp>
  24. #include <boost/unordered/detail/foa/reentrancy_check.hpp>
  25. #include <boost/unordered/detail/foa/rw_spinlock.hpp>
  26. #include <boost/unordered/detail/foa/tuple_rotate_right.hpp>
  27. #include <boost/unordered/detail/serialization_version.hpp>
  28. #include <boost/unordered/detail/static_assert.hpp>
  29. #include <cstddef>
  30. #include <functional>
  31. #include <initializer_list>
  32. #include <iterator>
  33. #include <memory>
  34. #include <new>
  35. #include <type_traits>
  36. #include <tuple>
  37. #include <utility>
  38. #if !defined(BOOST_UNORDERED_DISABLE_PARALLEL_ALGORITHMS)
  39. #if defined(BOOST_UNORDERED_ENABLE_PARALLEL_ALGORITHMS)|| \
  40. !defined(BOOST_NO_CXX17_HDR_EXECUTION)
  41. #define BOOST_UNORDERED_PARALLEL_ALGORITHMS
  42. #endif
  43. #endif
  44. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  45. #include <algorithm>
  46. #include <execution>
  47. #endif
  48. namespace boost{
  49. namespace unordered{
  50. namespace detail{
  51. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  52. template<typename ExecutionPolicy>
  53. using is_execution_policy=std::is_execution_policy<
  54. typename std::remove_cv<
  55. typename std::remove_reference<ExecutionPolicy>::type
  56. >::type
  57. >;
  58. #else
  59. template<typename ExecutionPolicy>
  60. using is_execution_policy=std::false_type;
  61. #endif
  62. namespace foa{
  63. static constexpr std::size_t cacheline_size=64;
  64. template<typename T,std::size_t N>
  65. class cache_aligned_array
  66. {
  67. public:
  68. cache_aligned_array(){for(std::size_t n=0;n<N;)::new (data(n++)) T();}
  69. ~cache_aligned_array(){for(auto n=N;n>0;)data(n--)->~T();}
  70. cache_aligned_array(const cache_aligned_array&)=delete;
  71. cache_aligned_array& operator=(const cache_aligned_array&)=delete;
  72. T& operator[](std::size_t pos)noexcept{return *data(pos);}
  73. private:
  74. static constexpr std::size_t element_offset=
  75. (sizeof(T)+cacheline_size-1)/cacheline_size*cacheline_size;
  76. BOOST_UNORDERED_STATIC_ASSERT(alignof(T)<=cacheline_size);
  77. T* data(std::size_t pos)noexcept
  78. {
  79. return reinterpret_cast<T*>(
  80. (reinterpret_cast<uintptr_t>(&buf)+cacheline_size-1)/
  81. cacheline_size*cacheline_size
  82. +pos*element_offset);
  83. }
  84. unsigned char buf[element_offset*N+cacheline_size-1];
  85. };
  86. template<typename Mutex,std::size_t N>
  87. class multimutex
  88. {
  89. public:
  90. constexpr std::size_t size()const noexcept{return N;}
  91. Mutex& operator[](std::size_t pos)noexcept
  92. {
  93. BOOST_ASSERT(pos<N);
  94. return mutexes[pos];
  95. }
  96. void lock()noexcept{for(std::size_t n=0;n<N;)mutexes[n++].lock();}
  97. void unlock()noexcept{for(auto n=N;n>0;)mutexes[--n].unlock();}
  98. private:
  99. cache_aligned_array<Mutex,N> mutexes;
  100. };
  101. /* std::shared_lock is C++14 */
  102. template<typename Mutex>
  103. class shared_lock
  104. {
  105. public:
  106. shared_lock(Mutex& m_)noexcept:m(m_){m.lock_shared();}
  107. ~shared_lock()noexcept{if(owns)m.unlock_shared();}
  108. /* not used but VS in pre-C++17 mode needs to see it for RVO */
  109. shared_lock(const shared_lock&);
  110. void lock(){BOOST_ASSERT(!owns);m.lock_shared();owns=true;}
  111. void unlock(){BOOST_ASSERT(owns);m.unlock_shared();owns=false;}
  112. private:
  113. Mutex &m;
  114. bool owns=true;
  115. };
  116. /* VS in pre-C++17 mode can't implement RVO for std::lock_guard due to
  117. * its copy constructor being deleted.
  118. */
  119. template<typename Mutex>
  120. class lock_guard
  121. {
  122. public:
  123. lock_guard(Mutex& m_)noexcept:m(m_){m.lock();}
  124. ~lock_guard()noexcept{m.unlock();}
  125. /* not used but VS in pre-C++17 mode needs to see it for RVO */
  126. lock_guard(const lock_guard&);
  127. private:
  128. Mutex &m;
  129. };
  130. /* inspired by boost/multi_index/detail/scoped_bilock.hpp */
  131. template<typename Mutex>
  132. class scoped_bilock
  133. {
  134. public:
  135. scoped_bilock(Mutex& m1,Mutex& m2)noexcept
  136. {
  137. bool mutex_lt=std::less<Mutex*>{}(&m1,&m2);
  138. pm1=mutex_lt?&m1:&m2;
  139. pm1->lock();
  140. if(&m1==&m2){
  141. pm2=nullptr;
  142. }
  143. else{
  144. pm2=mutex_lt?&m2:&m1;
  145. pm2->lock();
  146. }
  147. }
  148. /* not used but VS in pre-C++17 mode needs to see it for RVO */
  149. scoped_bilock(const scoped_bilock&);
  150. ~scoped_bilock()noexcept
  151. {
  152. if(pm2)pm2->unlock();
  153. pm1->unlock();
  154. }
  155. private:
  156. Mutex *pm1,*pm2;
  157. };
  158. /* use atomics for group metadata storage */
  159. template<typename Integral>
  160. struct atomic_integral
  161. {
  162. operator Integral()const{return n.load(std::memory_order_relaxed);}
  163. void operator=(Integral m){n.store(m,std::memory_order_relaxed);}
  164. void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);}
  165. void operator&=(Integral m){n.fetch_and(m,std::memory_order_relaxed);}
  166. atomic_integral& operator=(atomic_integral const& rhs) {
  167. n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed);
  168. return *this;
  169. }
  170. std::atomic<Integral> n;
  171. };
  172. /* Group-level concurrency protection. It provides a rw mutex plus an
  173. * atomic insertion counter for optimistic insertion (see
  174. * unprotected_norehash_emplace_or_visit).
  175. */
  176. struct group_access
  177. {
  178. using mutex_type=rw_spinlock;
  179. using shared_lock_guard=shared_lock<mutex_type>;
  180. using exclusive_lock_guard=lock_guard<mutex_type>;
  181. using insert_counter_type=std::atomic<boost::uint32_t>;
  182. shared_lock_guard shared_access(){return shared_lock_guard{m};}
  183. exclusive_lock_guard exclusive_access(){return exclusive_lock_guard{m};}
  184. insert_counter_type& insert_counter(){return cnt;}
  185. private:
  186. mutex_type m;
  187. insert_counter_type cnt{0};
  188. };
  189. template<std::size_t Size>
  190. group_access* dummy_group_accesses()
  191. {
  192. /* Default group_access array to provide to empty containers without
  193. * incurring dynamic allocation. Mutexes won't actually ever be used,
  194. * (no successful reduced hash match) and insertion counters won't ever
  195. * be incremented (insertions won't succeed as capacity()==0).
  196. */
  197. static group_access accesses[Size];
  198. return accesses;
  199. }
  200. /* subclasses table_arrays to add an additional group_access array */
  201. template<typename Value,typename Group,typename SizePolicy,typename Allocator>
  202. struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy,Allocator>
  203. {
  204. using group_access_allocator_type=
  205. typename boost::allocator_rebind<Allocator,group_access>::type;
  206. using group_access_pointer=
  207. typename boost::allocator_pointer<group_access_allocator_type>::type;
  208. using super=table_arrays<Value,Group,SizePolicy,Allocator>;
  209. concurrent_table_arrays(const super& arrays,group_access_pointer pga):
  210. super{arrays},group_accesses_{pga}{}
  211. group_access* group_accesses()const noexcept{
  212. return boost::to_address(group_accesses_);
  213. }
  214. static concurrent_table_arrays new_(
  215. group_access_allocator_type al,std::size_t n)
  216. {
  217. super x{super::new_(al,n)};
  218. BOOST_TRY{
  219. return new_group_access(al,x);
  220. }
  221. BOOST_CATCH(...){
  222. super::delete_(al,x);
  223. BOOST_RETHROW
  224. }
  225. BOOST_CATCH_END
  226. }
  227. static void set_group_access(
  228. group_access_allocator_type al,concurrent_table_arrays& arrays)
  229. {
  230. set_group_access(
  231. al,arrays,std::is_same<group_access*,group_access_pointer>{});
  232. }
  233. static void set_group_access(
  234. group_access_allocator_type al,
  235. concurrent_table_arrays& arrays,
  236. std::false_type /* fancy pointers */)
  237. {
  238. arrays.group_accesses_=
  239. boost::allocator_allocate(al,arrays.groups_size_mask+1);
  240. for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
  241. ::new (arrays.group_accesses()+i) group_access();
  242. }
  243. }
  244. static void set_group_access(
  245. group_access_allocator_type al,
  246. concurrent_table_arrays& arrays,
  247. std::true_type /* optimize when elements() is null */)
  248. {
  249. if(!arrays.elements()){
  250. arrays.group_accesses_=
  251. dummy_group_accesses<SizePolicy::min_size()>();
  252. } else {
  253. set_group_access(al,arrays,std::false_type{});
  254. }
  255. }
  256. static concurrent_table_arrays new_group_access(
  257. group_access_allocator_type al,const super& x)
  258. {
  259. concurrent_table_arrays arrays{x,nullptr};
  260. set_group_access(al,arrays);
  261. return arrays;
  262. }
  263. static void delete_(
  264. group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept
  265. {
  266. delete_group_access(al,arrays);
  267. super::delete_(al,arrays);
  268. }
  269. static void delete_group_access(
  270. group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept
  271. {
  272. if(arrays.elements()){
  273. boost::allocator_deallocate(
  274. al,arrays.group_accesses_,arrays.groups_size_mask+1);
  275. }
  276. }
  277. group_access_pointer group_accesses_;
  278. };
  279. struct atomic_size_control
  280. {
  281. static constexpr auto atomic_size_t_size=sizeof(std::atomic<std::size_t>);
  282. BOOST_UNORDERED_STATIC_ASSERT(atomic_size_t_size<cacheline_size);
  283. atomic_size_control(std::size_t ml_,std::size_t size_):
  284. pad0_{},ml{ml_},pad1_{},size{size_}{}
  285. atomic_size_control(const atomic_size_control& x):
  286. pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{}
  287. /* padding to avoid false sharing internally and with sorrounding data */
  288. unsigned char pad0_[cacheline_size-atomic_size_t_size];
  289. std::atomic<std::size_t> ml;
  290. unsigned char pad1_[cacheline_size-atomic_size_t_size];
  291. std::atomic<std::size_t> size;
  292. };
  293. /* std::swap can't be used on non-assignable atomics */
  294. inline void
  295. swap_atomic_size_t(std::atomic<std::size_t>& x,std::atomic<std::size_t>& y)
  296. {
  297. std::size_t tmp=x;
  298. x=static_cast<std::size_t>(y);
  299. y=tmp;
  300. }
  301. inline void swap(atomic_size_control& x,atomic_size_control& y)
  302. {
  303. swap_atomic_size_t(x.ml,y.ml);
  304. swap_atomic_size_t(x.size,y.size);
  305. }
  306. /* foa::concurrent_table serves as the foundation for end-user concurrent
  307. * hash containers.
  308. *
  309. * The exposed interface (completed by the wrapping containers) is not that
  310. * of a regular container (in fact, it does not model Container as understood
  311. * by the C++ standard):
  312. *
  313. * - Iterators are not provided as they are not suitable for concurrent
  314. * scenarios.
  315. * - As a consequence, composite operations with regular containers
  316. * (like, for instance, looking up an element and modifying it), must
  317. * be provided natively without any intervening iterator/accesor.
  318. * Visitation is a core concept in this design, either on its own (eg.
  319. * visit(k) locates the element with key k *and* accesses it) or as part
  320. * of a native composite operation (eg. try_emplace_or_visit). Visitation
  321. * is constant or mutating depending on whether the used table function is
  322. * const or not.
  323. * - The API provides member functions for all the meaningful composite
  324. * operations of the form "X (and|or) Y", where X, Y are one of the
  325. * primitives FIND, ACCESS, INSERT or ERASE.
  326. * - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based
  327. * on C++17 stdlib parallel algorithms.
  328. *
  329. * Consult boost::concurrent_flat_(map|set) docs for the full API reference.
  330. * Heterogeneous lookup is suported by default, that is, without checking for
  331. * any ::is_transparent typedefs --this checking is done by the wrapping
  332. * containers.
  333. *
  334. * Thread-safe concurrency is implemented using a two-level lock system:
  335. *
  336. * - A first container-level lock is implemented with an array of
  337. * rw spinlocks acting as a single rw mutex with very little
  338. * cache-coherence traffic on read (each thread is assigned a different
  339. * spinlock in the array). Container-level write locking is only used for
  340. * rehashing and other container-wide operations (assignment, swap, etc.)
  341. * - Each group of slots has an associated rw spinlock. A thread holds
  342. * at most one group lock at any given time. Lookup is implemented in
  343. * a (groupwise) lock-free manner until a reduced hash match is found, in
  344. * which case the relevant group is locked and the slot is double-checked
  345. * for occupancy and compared with the key.
  346. * - Each group has also an associated so-called insertion counter used for
  347. * the following optimistic insertion algorithm:
  348. * - The value of the insertion counter for the initial group in the probe
  349. * sequence is locally recorded (let's call this value c0).
  350. * - Lookup is as described above. If lookup finds no equivalent element,
  351. * search for an available slot for insertion successively locks/unlocks
  352. * each group in the probing sequence.
  353. * - When an available slot is located, it is preemptively occupied (its
  354. * reduced hash value is set) and the insertion counter is atomically
  355. * incremented: if no other thread has incremented the counter during the
  356. * whole operation (which is checked by comparing with c0), then we're
  357. * good to go and complete the insertion, otherwise we roll back and
  358. * start over.
  359. */
  360. template<typename,typename,typename,typename>
  361. class table; /* concurrent/non-concurrent interop */
  362. template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
  363. using concurrent_table_core_impl=table_core<
  364. TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
  365. atomic_size_control,Hash,Pred,Allocator>;
  366. #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
  367. #if defined(BOOST_MSVC)
  368. #pragma warning(push)
  369. #pragma warning(disable:4714) /* marked as __forceinline not inlined */
  370. #endif
  371. template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
  372. class concurrent_table:
  373. concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>
  374. {
  375. using super=concurrent_table_core_impl<TypePolicy,Hash,Pred,Allocator>;
  376. using type_policy=typename super::type_policy;
  377. using group_type=typename super::group_type;
  378. using super::N;
  379. using prober=typename super::prober;
  380. using arrays_type=typename super::arrays_type;
  381. using size_ctrl_type=typename super::size_ctrl_type;
  382. using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>;
  383. friend compatible_nonconcurrent_table;
  384. public:
  385. using key_type=typename super::key_type;
  386. using init_type=typename super::init_type;
  387. using value_type=typename super::value_type;
  388. using element_type=typename super::element_type;
  389. using hasher=typename super::hasher;
  390. using key_equal=typename super::key_equal;
  391. using allocator_type=typename super::allocator_type;
  392. using size_type=typename super::size_type;
  393. static constexpr std::size_t bulk_visit_size=16;
  394. private:
  395. template<typename Value,typename T>
  396. using enable_if_is_value_type=typename std::enable_if<
  397. !std::is_same<init_type,value_type>::value&&
  398. std::is_same<Value,value_type>::value,
  399. T
  400. >::type;
  401. public:
  402. concurrent_table(
  403. std::size_t n=default_bucket_count,const Hash& h_=Hash(),
  404. const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
  405. super{n,h_,pred_,al_}
  406. {}
  407. concurrent_table(const concurrent_table& x):
  408. concurrent_table(x,x.exclusive_access()){}
  409. concurrent_table(concurrent_table&& x):
  410. concurrent_table(std::move(x),x.exclusive_access()){}
  411. concurrent_table(const concurrent_table& x,const Allocator& al_):
  412. concurrent_table(x,al_,x.exclusive_access()){}
  413. concurrent_table(concurrent_table&& x,const Allocator& al_):
  414. concurrent_table(std::move(x),al_,x.exclusive_access()){}
  415. template<typename ArraysType>
  416. concurrent_table(
  417. compatible_nonconcurrent_table&& x,
  418. arrays_holder<ArraysType,Allocator>&& ah):
  419. super{
  420. std::move(x.h()),std::move(x.pred()),std::move(x.al()),
  421. [&x]{return arrays_type::new_group_access(
  422. x.al(),typename arrays_type::super{
  423. x.arrays.groups_size_index,x.arrays.groups_size_mask,
  424. to_pointer<typename arrays_type::group_type_pointer>(
  425. reinterpret_cast<group_type*>(x.arrays.groups())),
  426. x.arrays.elements_});},
  427. size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
  428. {
  429. x.arrays=ah.release();
  430. x.size_ctrl.ml=x.initial_max_load();
  431. x.size_ctrl.size=0;
  432. }
  433. concurrent_table(compatible_nonconcurrent_table&& x):
  434. concurrent_table(std::move(x),x.make_empty_arrays())
  435. {}
  436. ~concurrent_table()=default;
  437. concurrent_table& operator=(const concurrent_table& x)
  438. {
  439. auto lck=exclusive_access(*this,x);
  440. super::operator=(x);
  441. return *this;
  442. }
  443. concurrent_table& operator=(concurrent_table&& x)noexcept(
  444. noexcept(std::declval<super&>() = std::declval<super&&>()))
  445. {
  446. auto lck=exclusive_access(*this,x);
  447. super::operator=(std::move(x));
  448. return *this;
  449. }
  450. concurrent_table& operator=(std::initializer_list<value_type> il) {
  451. auto lck=exclusive_access();
  452. super::clear();
  453. super::noshrink_reserve(il.size());
  454. for (auto const& v : il) {
  455. this->unprotected_emplace(v);
  456. }
  457. return *this;
  458. }
  459. allocator_type get_allocator()const noexcept
  460. {
  461. auto lck=shared_access();
  462. return super::get_allocator();
  463. }
  464. template<typename Key,typename F>
  465. BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)
  466. {
  467. return visit_impl(group_exclusive{},x,std::forward<F>(f));
  468. }
  469. template<typename Key,typename F>
  470. BOOST_FORCEINLINE std::size_t visit(const Key& x,F&& f)const
  471. {
  472. return visit_impl(group_shared{},x,std::forward<F>(f));
  473. }
  474. template<typename Key,typename F>
  475. BOOST_FORCEINLINE std::size_t cvisit(const Key& x,F&& f)const
  476. {
  477. return visit(x,std::forward<F>(f));
  478. }
  479. template<typename FwdIterator,typename F>
  480. BOOST_FORCEINLINE
  481. std::size_t visit(FwdIterator first,FwdIterator last,F&& f)
  482. {
  483. return bulk_visit_impl(group_exclusive{},first,last,std::forward<F>(f));
  484. }
  485. template<typename FwdIterator,typename F>
  486. BOOST_FORCEINLINE
  487. std::size_t visit(FwdIterator first,FwdIterator last,F&& f)const
  488. {
  489. return bulk_visit_impl(group_shared{},first,last,std::forward<F>(f));
  490. }
  491. template<typename FwdIterator,typename F>
  492. BOOST_FORCEINLINE
  493. std::size_t cvisit(FwdIterator first,FwdIterator last,F&& f)const
  494. {
  495. return visit(first,last,std::forward<F>(f));
  496. }
  497. template<typename F> std::size_t visit_all(F&& f)
  498. {
  499. return visit_all_impl(group_exclusive{},std::forward<F>(f));
  500. }
  501. template<typename F> std::size_t visit_all(F&& f)const
  502. {
  503. return visit_all_impl(group_shared{},std::forward<F>(f));
  504. }
  505. template<typename F> std::size_t cvisit_all(F&& f)const
  506. {
  507. return visit_all(std::forward<F>(f));
  508. }
  509. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  510. template<typename ExecutionPolicy,typename F>
  511. void visit_all(ExecutionPolicy&& policy,F&& f)
  512. {
  513. visit_all_impl(
  514. group_exclusive{},
  515. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  516. }
  517. template<typename ExecutionPolicy,typename F>
  518. void visit_all(ExecutionPolicy&& policy,F&& f)const
  519. {
  520. visit_all_impl(
  521. group_shared{},
  522. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  523. }
  524. template<typename ExecutionPolicy,typename F>
  525. void cvisit_all(ExecutionPolicy&& policy,F&& f)const
  526. {
  527. visit_all(std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  528. }
  529. #endif
  530. template<typename F> bool visit_while(F&& f)
  531. {
  532. return visit_while_impl(group_exclusive{},std::forward<F>(f));
  533. }
  534. template<typename F> bool visit_while(F&& f)const
  535. {
  536. return visit_while_impl(group_shared{},std::forward<F>(f));
  537. }
  538. template<typename F> bool cvisit_while(F&& f)const
  539. {
  540. return visit_while(std::forward<F>(f));
  541. }
  542. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  543. template<typename ExecutionPolicy,typename F>
  544. bool visit_while(ExecutionPolicy&& policy,F&& f)
  545. {
  546. return visit_while_impl(
  547. group_exclusive{},
  548. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  549. }
  550. template<typename ExecutionPolicy,typename F>
  551. bool visit_while(ExecutionPolicy&& policy,F&& f)const
  552. {
  553. return visit_while_impl(
  554. group_shared{},
  555. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  556. }
  557. template<typename ExecutionPolicy,typename F>
  558. bool cvisit_while(ExecutionPolicy&& policy,F&& f)const
  559. {
  560. return visit_while(
  561. std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
  562. }
  563. #endif
  564. bool empty()const noexcept{return size()==0;}
  565. std::size_t size()const noexcept
  566. {
  567. auto lck=shared_access();
  568. return unprotected_size();
  569. }
  570. using super::max_size;
  571. template<typename... Args>
  572. BOOST_FORCEINLINE bool emplace(Args&&... args)
  573. {
  574. return construct_and_emplace(std::forward<Args>(args)...);
  575. }
  576. BOOST_FORCEINLINE bool
  577. insert(const init_type& x){return emplace_impl(x);}
  578. BOOST_FORCEINLINE bool
  579. insert(init_type&& x){return emplace_impl(std::move(x));}
  580. /* template<typename=void> tilts call ambiguities in favor of init_type */
  581. template<typename=void>
  582. BOOST_FORCEINLINE bool
  583. insert(const value_type& x){return emplace_impl(x);}
  584. template<typename=void>
  585. BOOST_FORCEINLINE bool
  586. insert(value_type&& x){return emplace_impl(std::move(x));}
  587. template<typename Key,typename... Args>
  588. BOOST_FORCEINLINE bool try_emplace(Key&& x,Args&&... args)
  589. {
  590. return emplace_impl(
  591. try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
  592. }
  593. template<typename Key,typename... Args>
  594. BOOST_FORCEINLINE bool try_emplace_or_visit(Key&& x,Args&&... args)
  595. {
  596. return emplace_or_visit_flast(
  597. group_exclusive{},
  598. try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
  599. }
  600. template<typename Key,typename... Args>
  601. BOOST_FORCEINLINE bool try_emplace_or_cvisit(Key&& x,Args&&... args)
  602. {
  603. return emplace_or_visit_flast(
  604. group_shared{},
  605. try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
  606. }
  607. template<typename... Args>
  608. BOOST_FORCEINLINE bool emplace_or_visit(Args&&... args)
  609. {
  610. return construct_and_emplace_or_visit_flast(
  611. group_exclusive{},std::forward<Args>(args)...);
  612. }
  613. template<typename... Args>
  614. BOOST_FORCEINLINE bool emplace_or_cvisit(Args&&... args)
  615. {
  616. return construct_and_emplace_or_visit_flast(
  617. group_shared{},std::forward<Args>(args)...);
  618. }
  619. template<typename F>
  620. BOOST_FORCEINLINE bool insert_or_visit(const init_type& x,F&& f)
  621. {
  622. return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
  623. }
  624. template<typename F>
  625. BOOST_FORCEINLINE bool insert_or_cvisit(const init_type& x,F&& f)
  626. {
  627. return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
  628. }
  629. template<typename F>
  630. BOOST_FORCEINLINE bool insert_or_visit(init_type&& x,F&& f)
  631. {
  632. return emplace_or_visit_impl(
  633. group_exclusive{},std::forward<F>(f),std::move(x));
  634. }
  635. template<typename F>
  636. BOOST_FORCEINLINE bool insert_or_cvisit(init_type&& x,F&& f)
  637. {
  638. return emplace_or_visit_impl(
  639. group_shared{},std::forward<F>(f),std::move(x));
  640. }
  641. /* SFINAE tilts call ambiguities in favor of init_type */
  642. template<typename Value,typename F>
  643. BOOST_FORCEINLINE auto insert_or_visit(const Value& x,F&& f)
  644. ->enable_if_is_value_type<Value,bool>
  645. {
  646. return emplace_or_visit_impl(group_exclusive{},std::forward<F>(f),x);
  647. }
  648. template<typename Value,typename F>
  649. BOOST_FORCEINLINE auto insert_or_cvisit(const Value& x,F&& f)
  650. ->enable_if_is_value_type<Value,bool>
  651. {
  652. return emplace_or_visit_impl(group_shared{},std::forward<F>(f),x);
  653. }
  654. template<typename Value,typename F>
  655. BOOST_FORCEINLINE auto insert_or_visit(Value&& x,F&& f)
  656. ->enable_if_is_value_type<Value,bool>
  657. {
  658. return emplace_or_visit_impl(
  659. group_exclusive{},std::forward<F>(f),std::move(x));
  660. }
  661. template<typename Value,typename F>
  662. BOOST_FORCEINLINE auto insert_or_cvisit(Value&& x,F&& f)
  663. ->enable_if_is_value_type<Value,bool>
  664. {
  665. return emplace_or_visit_impl(
  666. group_shared{},std::forward<F>(f),std::move(x));
  667. }
  668. template<typename Key>
  669. BOOST_FORCEINLINE std::size_t erase(const Key& x)
  670. {
  671. return erase_if(x,[](const value_type&){return true;});
  672. }
  673. template<typename Key,typename F>
  674. BOOST_FORCEINLINE auto erase_if(const Key& x,F&& f)->typename std::enable_if<
  675. !is_execution_policy<Key>::value,std::size_t>::type
  676. {
  677. auto lck=shared_access();
  678. auto hash=this->hash_for(x);
  679. std::size_t res=0;
  680. unprotected_internal_visit(
  681. group_exclusive{},x,this->position_for(hash),hash,
  682. [&,this](group_type* pg,unsigned int n,element_type* p)
  683. {
  684. if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
  685. super::erase(pg,n,p);
  686. res=1;
  687. }
  688. });
  689. return res;
  690. }
  691. template<typename F>
  692. std::size_t erase_if(F&& f)
  693. {
  694. auto lck=shared_access();
  695. std::size_t res=0;
  696. for_all_elements(
  697. group_exclusive{},
  698. [&,this](group_type* pg,unsigned int n,element_type* p){
  699. if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
  700. super::erase(pg,n,p);
  701. ++res;
  702. }
  703. });
  704. return res;
  705. }
  706. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  707. template<typename ExecutionPolicy,typename F>
  708. auto erase_if(ExecutionPolicy&& policy,F&& f)->typename std::enable_if<
  709. is_execution_policy<ExecutionPolicy>::value,void>::type
  710. {
  711. auto lck=shared_access();
  712. for_all_elements(
  713. group_exclusive{},std::forward<ExecutionPolicy>(policy),
  714. [&,this](group_type* pg,unsigned int n,element_type* p){
  715. if(f(cast_for(group_exclusive{},type_policy::value_from(*p)))){
  716. super::erase(pg,n,p);
  717. }
  718. });
  719. }
  720. #endif
  721. void swap(concurrent_table& x)
  722. noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>())))
  723. {
  724. auto lck=exclusive_access(*this,x);
  725. super::swap(x);
  726. }
  727. void clear()noexcept
  728. {
  729. auto lck=exclusive_access();
  730. super::clear();
  731. }
  732. // TODO: should we accept different allocator too?
  733. template<typename Hash2,typename Pred2>
  734. size_type merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& x)
  735. {
  736. using merge_table_type=concurrent_table<TypePolicy,Hash2,Pred2,Allocator>;
  737. using super2=typename merge_table_type::super;
  738. // for clang
  739. boost::ignore_unused<super2>();
  740. auto lck=exclusive_access(*this,x);
  741. size_type s=super::size();
  742. x.super2::for_all_elements( /* super2::for_all_elements -> unprotected */
  743. [&,this](group_type* pg,unsigned int n,element_type* p){
  744. typename merge_table_type::erase_on_exit e{x,pg,n,p};
  745. if(!unprotected_emplace(type_policy::move(*p)))e.rollback();
  746. });
  747. return size_type{super::size()-s};
  748. }
  749. template<typename Hash2,typename Pred2>
  750. void merge(concurrent_table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
  751. hasher hash_function()const
  752. {
  753. auto lck=shared_access();
  754. return super::hash_function();
  755. }
  756. key_equal key_eq()const
  757. {
  758. auto lck=shared_access();
  759. return super::key_eq();
  760. }
  761. template<typename Key>
  762. BOOST_FORCEINLINE std::size_t count(Key&& x)const
  763. {
  764. return (std::size_t)contains(std::forward<Key>(x));
  765. }
  766. template<typename Key>
  767. BOOST_FORCEINLINE bool contains(Key&& x)const
  768. {
  769. return visit(std::forward<Key>(x),[](const value_type&){})!=0;
  770. }
  771. std::size_t capacity()const noexcept
  772. {
  773. auto lck=shared_access();
  774. return super::capacity();
  775. }
  776. float load_factor()const noexcept
  777. {
  778. auto lck=shared_access();
  779. if(super::capacity()==0)return 0;
  780. else return float(unprotected_size())/
  781. float(super::capacity());
  782. }
  783. using super::max_load_factor;
  784. std::size_t max_load()const noexcept
  785. {
  786. auto lck=shared_access();
  787. return super::max_load();
  788. }
  789. void rehash(std::size_t n)
  790. {
  791. auto lck=exclusive_access();
  792. super::rehash(n);
  793. }
  794. void reserve(std::size_t n)
  795. {
  796. auto lck=exclusive_access();
  797. super::reserve(n);
  798. }
  799. template<typename Predicate>
  800. friend std::size_t erase_if(concurrent_table& x,Predicate&& pr)
  801. {
  802. return x.erase_if(std::forward<Predicate>(pr));
  803. }
  804. friend bool operator==(const concurrent_table& x,const concurrent_table& y)
  805. {
  806. auto lck=exclusive_access(x,y);
  807. return static_cast<const super&>(x)==static_cast<const super&>(y);
  808. }
  809. friend bool operator!=(const concurrent_table& x,const concurrent_table& y)
  810. {
  811. return !(x==y);
  812. }
  813. private:
  814. template<typename,typename,typename,typename> friend class concurrent_table;
  815. using mutex_type=rw_spinlock;
  816. using multimutex_type=multimutex<mutex_type,128>; // TODO: adapt 128 to the machine
  817. using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>;
  818. using exclusive_lock_guard=reentrancy_checked<lock_guard<multimutex_type>>;
  819. using exclusive_bilock_guard=
  820. reentrancy_bichecked<scoped_bilock<multimutex_type>>;
  821. using group_shared_lock_guard=typename group_access::shared_lock_guard;
  822. using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard;
  823. using group_insert_counter_type=typename group_access::insert_counter_type;
  824. concurrent_table(const concurrent_table& x,exclusive_lock_guard):
  825. super{x}{}
  826. concurrent_table(concurrent_table&& x,exclusive_lock_guard):
  827. super{std::move(x)}{}
  828. concurrent_table(
  829. const concurrent_table& x,const Allocator& al_,exclusive_lock_guard):
  830. super{x,al_}{}
  831. concurrent_table(
  832. concurrent_table&& x,const Allocator& al_,exclusive_lock_guard):
  833. super{std::move(x),al_}{}
  834. inline shared_lock_guard shared_access()const
  835. {
  836. thread_local auto id=(++thread_counter)%mutexes.size();
  837. return shared_lock_guard{this,mutexes[id]};
  838. }
  839. inline exclusive_lock_guard exclusive_access()const
  840. {
  841. return exclusive_lock_guard{this,mutexes};
  842. }
  843. static inline exclusive_bilock_guard exclusive_access(
  844. const concurrent_table& x,const concurrent_table& y)
  845. {
  846. return {&x,&y,x.mutexes,y.mutexes};
  847. }
  848. template<typename Hash2,typename Pred2>
  849. static inline exclusive_bilock_guard exclusive_access(
  850. const concurrent_table& x,
  851. const concurrent_table<TypePolicy,Hash2,Pred2,Allocator>& y)
  852. {
  853. return {&x,&y,x.mutexes,y.mutexes};
  854. }
  855. /* Tag-dispatched shared/exclusive group access */
  856. using group_shared=std::false_type;
  857. using group_exclusive=std::true_type;
  858. inline group_shared_lock_guard access(group_shared,std::size_t pos)const
  859. {
  860. return this->arrays.group_accesses()[pos].shared_access();
  861. }
  862. inline group_exclusive_lock_guard access(
  863. group_exclusive,std::size_t pos)const
  864. {
  865. return this->arrays.group_accesses()[pos].exclusive_access();
  866. }
  867. inline group_insert_counter_type& insert_counter(std::size_t pos)const
  868. {
  869. return this->arrays.group_accesses()[pos].insert_counter();
  870. }
  871. /* Const casts value_type& according to the level of group access for
  872. * safe passing to visitation functions. When type_policy is set-like,
  873. * access is always const regardless of group access.
  874. */
  875. static inline const value_type&
  876. cast_for(group_shared,value_type& x){return x;}
  877. static inline typename std::conditional<
  878. std::is_same<key_type,value_type>::value,
  879. const value_type&,
  880. value_type&
  881. >::type
  882. cast_for(group_exclusive,value_type& x){return x;}
  883. struct erase_on_exit
  884. {
  885. erase_on_exit(
  886. concurrent_table& x_,
  887. group_type* pg_,unsigned int pos_,element_type* p_):
  888. x(x_),pg(pg_),pos(pos_),p(p_){}
  889. ~erase_on_exit(){if(!rollback_)x.super::erase(pg,pos,p);}
  890. void rollback(){rollback_=true;}
  891. concurrent_table &x;
  892. group_type *pg;
  893. unsigned int pos;
  894. element_type *p;
  895. bool rollback_=false;
  896. };
  897. template<typename GroupAccessMode,typename Key,typename F>
  898. BOOST_FORCEINLINE std::size_t visit_impl(
  899. GroupAccessMode access_mode,const Key& x,F&& f)const
  900. {
  901. auto lck=shared_access();
  902. auto hash=this->hash_for(x);
  903. return unprotected_visit(
  904. access_mode,x,this->position_for(hash),hash,std::forward<F>(f));
  905. }
  906. template<typename GroupAccessMode,typename FwdIterator,typename F>
  907. BOOST_FORCEINLINE
  908. std::size_t bulk_visit_impl(
  909. GroupAccessMode access_mode,FwdIterator first,FwdIterator last,F&& f)const
  910. {
  911. auto lck=shared_access();
  912. std::size_t res=0;
  913. auto n=static_cast<std::size_t>(std::distance(first,last));
  914. while(n){
  915. auto m=n<2*bulk_visit_size?n:bulk_visit_size;
  916. res+=unprotected_bulk_visit(access_mode,first,m,std::forward<F>(f));
  917. n-=m;
  918. std::advance(
  919. first,
  920. static_cast<
  921. typename std::iterator_traits<FwdIterator>::difference_type>(m));
  922. }
  923. return res;
  924. }
  925. template<typename GroupAccessMode,typename F>
  926. std::size_t visit_all_impl(GroupAccessMode access_mode,F&& f)const
  927. {
  928. auto lck=shared_access();
  929. std::size_t res=0;
  930. for_all_elements(access_mode,[&](element_type* p){
  931. f(cast_for(access_mode,type_policy::value_from(*p)));
  932. ++res;
  933. });
  934. return res;
  935. }
  936. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  937. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  938. void visit_all_impl(
  939. GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
  940. {
  941. auto lck=shared_access();
  942. for_all_elements(
  943. access_mode,std::forward<ExecutionPolicy>(policy),
  944. [&](element_type* p){
  945. f(cast_for(access_mode,type_policy::value_from(*p)));
  946. });
  947. }
  948. #endif
  949. template<typename GroupAccessMode,typename F>
  950. bool visit_while_impl(GroupAccessMode access_mode,F&& f)const
  951. {
  952. auto lck=shared_access();
  953. return for_all_elements_while(access_mode,[&](element_type* p){
  954. return f(cast_for(access_mode,type_policy::value_from(*p)));
  955. });
  956. }
  957. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  958. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  959. bool visit_while_impl(
  960. GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
  961. {
  962. auto lck=shared_access();
  963. return for_all_elements_while(
  964. access_mode,std::forward<ExecutionPolicy>(policy),
  965. [&](element_type* p){
  966. return f(cast_for(access_mode,type_policy::value_from(*p)));
  967. });
  968. }
  969. #endif
  970. template<typename GroupAccessMode,typename Key,typename F>
  971. BOOST_FORCEINLINE std::size_t unprotected_visit(
  972. GroupAccessMode access_mode,
  973. const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
  974. {
  975. return unprotected_internal_visit(
  976. access_mode,x,pos0,hash,
  977. [&](group_type*,unsigned int,element_type* p)
  978. {f(cast_for(access_mode,type_policy::value_from(*p)));});
  979. }
  980. #if defined(BOOST_MSVC)
  981. /* warning: forcing value to bool 'true' or 'false' in bool(pred()...) */
  982. #pragma warning(push)
  983. #pragma warning(disable:4800)
  984. #endif
  985. template<typename GroupAccessMode,typename Key,typename F>
  986. BOOST_FORCEINLINE std::size_t unprotected_internal_visit(
  987. GroupAccessMode access_mode,
  988. const Key& x,std::size_t pos0,std::size_t hash,F&& f)const
  989. {
  990. prober pb(pos0);
  991. do{
  992. auto pos=pb.get();
  993. auto pg=this->arrays.groups()+pos;
  994. auto mask=pg->match(hash);
  995. if(mask){
  996. auto p=this->arrays.elements()+pos*N;
  997. BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
  998. auto lck=access(access_mode,pos);
  999. do{
  1000. auto n=unchecked_countr_zero(mask);
  1001. if(BOOST_LIKELY(
  1002. pg->is_occupied(n)&&bool(this->pred()(x,this->key_from(p[n]))))){
  1003. f(pg,n,p+n);
  1004. return 1;
  1005. }
  1006. mask&=mask-1;
  1007. }while(mask);
  1008. }
  1009. if(BOOST_LIKELY(pg->is_not_overflowed(hash))){
  1010. return 0;
  1011. }
  1012. }
  1013. while(BOOST_LIKELY(pb.next(this->arrays.groups_size_mask)));
  1014. return 0;
  1015. }
  1016. template<typename GroupAccessMode,typename FwdIterator,typename F>
  1017. BOOST_FORCEINLINE std::size_t unprotected_bulk_visit(
  1018. GroupAccessMode access_mode,FwdIterator first,std::size_t m,F&& f)const
  1019. {
  1020. BOOST_ASSERT(m<2*bulk_visit_size);
  1021. std::size_t res=0,
  1022. hashes[2*bulk_visit_size-1],
  1023. positions[2*bulk_visit_size-1];
  1024. int masks[2*bulk_visit_size-1];
  1025. auto it=first;
  1026. for(auto i=m;i--;++it){
  1027. auto hash=hashes[i]=this->hash_for(*it);
  1028. auto pos=positions[i]=this->position_for(hash);
  1029. BOOST_UNORDERED_PREFETCH(this->arrays.groups()+pos);
  1030. }
  1031. for(auto i=m;i--;){
  1032. auto hash=hashes[i];
  1033. auto pos=positions[i];
  1034. auto mask=masks[i]=(this->arrays.groups()+pos)->match(hash);
  1035. if(mask){
  1036. BOOST_UNORDERED_PREFETCH(this->arrays.group_accesses()+pos);
  1037. BOOST_UNORDERED_PREFETCH(
  1038. this->arrays.elements()+pos*N+unchecked_countr_zero(mask));
  1039. }
  1040. }
  1041. it=first;
  1042. for(auto i=m;i--;++it){
  1043. auto pos=positions[i];
  1044. prober pb(pos);
  1045. auto pg=this->arrays.groups()+pos;
  1046. auto mask=masks[i];
  1047. element_type *p;
  1048. if(!mask)goto post_mask;
  1049. p=this->arrays.elements()+pos*N;
  1050. for(;;){
  1051. {
  1052. auto lck=access(access_mode,pos);
  1053. do{
  1054. auto n=unchecked_countr_zero(mask);
  1055. if(BOOST_LIKELY(
  1056. pg->is_occupied(n)&&
  1057. bool(this->pred()(*it,this->key_from(p[n]))))){
  1058. f(cast_for(access_mode,type_policy::value_from(p[n])));
  1059. ++res;
  1060. goto next_key;
  1061. }
  1062. mask&=mask-1;
  1063. }while(mask);
  1064. }
  1065. post_mask:
  1066. do{
  1067. if(BOOST_LIKELY(pg->is_not_overflowed(hashes[i]))||
  1068. BOOST_UNLIKELY(!pb.next(this->arrays.groups_size_mask))){
  1069. goto next_key;
  1070. }
  1071. pos=pb.get();
  1072. pg=this->arrays.groups()+pos;
  1073. mask=pg->match(hashes[i]);
  1074. }while(!mask);
  1075. p=this->arrays.elements()+pos*N;
  1076. BOOST_UNORDERED_PREFETCH_ELEMENTS(p,N);
  1077. }
  1078. next_key:;
  1079. }
  1080. return res;
  1081. }
  1082. #if defined(BOOST_MSVC)
  1083. #pragma warning(pop) /* C4800 */
  1084. #endif
  1085. std::size_t unprotected_size()const
  1086. {
  1087. std::size_t m=this->size_ctrl.ml;
  1088. std::size_t s=this->size_ctrl.size;
  1089. return s<=m?s:m;
  1090. }
  1091. template<typename... Args>
  1092. BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args)
  1093. {
  1094. return construct_and_emplace_or_visit(
  1095. group_shared{},[](const value_type&){},std::forward<Args>(args)...);
  1096. }
  1097. struct call_construct_and_emplace_or_visit
  1098. {
  1099. template<typename... Args>
  1100. BOOST_FORCEINLINE bool operator()(
  1101. concurrent_table* this_,Args&&... args)const
  1102. {
  1103. return this_->construct_and_emplace_or_visit(
  1104. std::forward<Args>(args)...);
  1105. }
  1106. };
  1107. template<typename GroupAccessMode,typename... Args>
  1108. BOOST_FORCEINLINE bool construct_and_emplace_or_visit_flast(
  1109. GroupAccessMode access_mode,Args&&... args)
  1110. {
  1111. return mp11::tuple_apply(
  1112. call_construct_and_emplace_or_visit{},
  1113. std::tuple_cat(
  1114. std::make_tuple(this,access_mode),
  1115. tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
  1116. )
  1117. );
  1118. }
  1119. template<typename GroupAccessMode,typename F,typename... Args>
  1120. BOOST_FORCEINLINE bool construct_and_emplace_or_visit(
  1121. GroupAccessMode access_mode,F&& f,Args&&... args)
  1122. {
  1123. auto lck=shared_access();
  1124. auto x=alloc_make_insert_type<type_policy>(
  1125. this->al(),std::forward<Args>(args)...);
  1126. int res=unprotected_norehash_emplace_or_visit(
  1127. access_mode,std::forward<F>(f),type_policy::move(x.value()));
  1128. if(BOOST_LIKELY(res>=0))return res!=0;
  1129. lck.unlock();
  1130. rehash_if_full();
  1131. return noinline_emplace_or_visit(
  1132. access_mode,std::forward<F>(f),type_policy::move(x.value()));
  1133. }
  1134. template<typename... Args>
  1135. BOOST_FORCEINLINE bool emplace_impl(Args&&... args)
  1136. {
  1137. return emplace_or_visit_impl(
  1138. group_shared{},[](const value_type&){},std::forward<Args>(args)...);
  1139. }
  1140. template<typename GroupAccessMode,typename F,typename... Args>
  1141. BOOST_NOINLINE bool noinline_emplace_or_visit(
  1142. GroupAccessMode access_mode,F&& f,Args&&... args)
  1143. {
  1144. return emplace_or_visit_impl(
  1145. access_mode,std::forward<F>(f),std::forward<Args>(args)...);
  1146. }
  1147. struct call_emplace_or_visit_impl
  1148. {
  1149. template<typename... Args>
  1150. BOOST_FORCEINLINE bool operator()(
  1151. concurrent_table* this_,Args&&... args)const
  1152. {
  1153. return this_->emplace_or_visit_impl(std::forward<Args>(args)...);
  1154. }
  1155. };
  1156. template<typename GroupAccessMode,typename... Args>
  1157. BOOST_FORCEINLINE bool emplace_or_visit_flast(
  1158. GroupAccessMode access_mode,Args&&... args)
  1159. {
  1160. return mp11::tuple_apply(
  1161. call_emplace_or_visit_impl{},
  1162. std::tuple_cat(
  1163. std::make_tuple(this,access_mode),
  1164. tuple_rotate_right(std::forward_as_tuple(std::forward<Args>(args)...))
  1165. )
  1166. );
  1167. }
  1168. template<typename GroupAccessMode,typename F,typename... Args>
  1169. BOOST_FORCEINLINE bool emplace_or_visit_impl(
  1170. GroupAccessMode access_mode,F&& f,Args&&... args)
  1171. {
  1172. for(;;){
  1173. {
  1174. auto lck=shared_access();
  1175. int res=unprotected_norehash_emplace_or_visit(
  1176. access_mode,std::forward<F>(f),std::forward<Args>(args)...);
  1177. if(BOOST_LIKELY(res>=0))return res!=0;
  1178. }
  1179. rehash_if_full();
  1180. }
  1181. }
  1182. template<typename... Args>
  1183. BOOST_FORCEINLINE bool unprotected_emplace(Args&&... args)
  1184. {
  1185. const auto &k=this->key_from(std::forward<Args>(args)...);
  1186. auto hash=this->hash_for(k);
  1187. auto pos0=this->position_for(hash);
  1188. if(this->find(k,pos0,hash))return false;
  1189. if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
  1190. this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...);
  1191. }
  1192. else{
  1193. this->unchecked_emplace_with_rehash(hash,std::forward<Args>(args)...);
  1194. }
  1195. return true;
  1196. }
  1197. struct reserve_size
  1198. {
  1199. reserve_size(concurrent_table& x_):x(x_)
  1200. {
  1201. size_=++x.size_ctrl.size;
  1202. }
  1203. ~reserve_size()
  1204. {
  1205. if(!commit_)--x.size_ctrl.size;
  1206. }
  1207. bool succeeded()const{return size_<=x.size_ctrl.ml;}
  1208. void commit(){commit_=true;}
  1209. concurrent_table &x;
  1210. std::size_t size_;
  1211. bool commit_=false;
  1212. };
  1213. struct reserve_slot
  1214. {
  1215. reserve_slot(group_type* pg_,std::size_t pos_,std::size_t hash):
  1216. pg{pg_},pos{pos_}
  1217. {
  1218. pg->set(pos,hash);
  1219. }
  1220. ~reserve_slot()
  1221. {
  1222. if(!commit_)pg->reset(pos);
  1223. }
  1224. void commit(){commit_=true;}
  1225. group_type *pg;
  1226. std::size_t pos;
  1227. bool commit_=false;
  1228. };
  1229. template<typename GroupAccessMode,typename F,typename... Args>
  1230. BOOST_FORCEINLINE int
  1231. unprotected_norehash_emplace_or_visit(
  1232. GroupAccessMode access_mode,F&& f,Args&&... args)
  1233. {
  1234. const auto &k=this->key_from(std::forward<Args>(args)...);
  1235. auto hash=this->hash_for(k);
  1236. auto pos0=this->position_for(hash);
  1237. for(;;){
  1238. startover:
  1239. boost::uint32_t counter=insert_counter(pos0);
  1240. if(unprotected_visit(
  1241. access_mode,k,pos0,hash,std::forward<F>(f)))return 0;
  1242. reserve_size rsize(*this);
  1243. if(BOOST_LIKELY(rsize.succeeded())){
  1244. for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){
  1245. auto pos=pb.get();
  1246. auto pg=this->arrays.groups()+pos;
  1247. auto lck=access(group_exclusive{},pos);
  1248. auto mask=pg->match_available();
  1249. if(BOOST_LIKELY(mask!=0)){
  1250. auto n=unchecked_countr_zero(mask);
  1251. reserve_slot rslot{pg,n,hash};
  1252. if(BOOST_UNLIKELY(insert_counter(pos0)++!=counter)){
  1253. /* other thread inserted from pos0, need to start over */
  1254. goto startover;
  1255. }
  1256. auto p=this->arrays.elements()+pos*N+n;
  1257. this->construct_element(p,std::forward<Args>(args)...);
  1258. rslot.commit();
  1259. rsize.commit();
  1260. return 1;
  1261. }
  1262. pg->mark_overflow(hash);
  1263. }
  1264. }
  1265. else return -1;
  1266. }
  1267. }
  1268. void rehash_if_full()
  1269. {
  1270. auto lck=exclusive_access();
  1271. if(this->size_ctrl.size==this->size_ctrl.ml){
  1272. this->unchecked_rehash_for_growth();
  1273. }
  1274. }
  1275. template<typename GroupAccessMode,typename F>
  1276. auto for_all_elements(GroupAccessMode access_mode,F f)const
  1277. ->decltype(f(nullptr),void())
  1278. {
  1279. for_all_elements(
  1280. access_mode,[&](group_type*,unsigned int,element_type* p){f(p);});
  1281. }
  1282. template<typename GroupAccessMode,typename F>
  1283. auto for_all_elements(GroupAccessMode access_mode,F f)const
  1284. ->decltype(f(nullptr,0,nullptr),void())
  1285. {
  1286. for_all_elements_while(
  1287. access_mode,[&](group_type* pg,unsigned int n,element_type* p)
  1288. {f(pg,n,p);return true;});
  1289. }
  1290. template<typename GroupAccessMode,typename F>
  1291. auto for_all_elements_while(GroupAccessMode access_mode,F f)const
  1292. ->decltype(f(nullptr),bool())
  1293. {
  1294. return for_all_elements_while(
  1295. access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);});
  1296. }
  1297. template<typename GroupAccessMode,typename F>
  1298. auto for_all_elements_while(GroupAccessMode access_mode,F f)const
  1299. ->decltype(f(nullptr,0,nullptr),bool())
  1300. {
  1301. auto p=this->arrays.elements();
  1302. if(p){
  1303. for(auto pg=this->arrays.groups(),last=pg+this->arrays.groups_size_mask+1;
  1304. pg!=last;++pg,p+=N){
  1305. auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups()));
  1306. auto mask=this->match_really_occupied(pg,last);
  1307. while(mask){
  1308. auto n=unchecked_countr_zero(mask);
  1309. if(!f(pg,n,p+n))return false;
  1310. mask&=mask-1;
  1311. }
  1312. }
  1313. }
  1314. return true;
  1315. }
  1316. #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
  1317. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  1318. auto for_all_elements(
  1319. GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
  1320. ->decltype(f(nullptr),void())
  1321. {
  1322. for_all_elements(
  1323. access_mode,std::forward<ExecutionPolicy>(policy),
  1324. [&](group_type*,unsigned int,element_type* p){f(p);});
  1325. }
  1326. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  1327. auto for_all_elements(
  1328. GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
  1329. ->decltype(f(nullptr,0,nullptr),void())
  1330. {
  1331. if(!this->arrays.elements())return;
  1332. auto first=this->arrays.groups(),
  1333. last=first+this->arrays.groups_size_mask+1;
  1334. std::for_each(std::forward<ExecutionPolicy>(policy),first,last,
  1335. [&,this](group_type& g){
  1336. auto pos=static_cast<std::size_t>(&g-first);
  1337. auto p=this->arrays.elements()+pos*N;
  1338. auto lck=access(access_mode,pos);
  1339. auto mask=this->match_really_occupied(&g,last);
  1340. while(mask){
  1341. auto n=unchecked_countr_zero(mask);
  1342. f(&g,n,p+n);
  1343. mask&=mask-1;
  1344. }
  1345. }
  1346. );
  1347. }
  1348. template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
  1349. bool for_all_elements_while(
  1350. GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
  1351. {
  1352. if(!this->arrays.elements())return true;
  1353. auto first=this->arrays.groups(),
  1354. last=first+this->arrays.groups_size_mask+1;
  1355. return std::all_of(std::forward<ExecutionPolicy>(policy),first,last,
  1356. [&,this](group_type& g){
  1357. auto pos=static_cast<std::size_t>(&g-first);
  1358. auto p=this->arrays.elements()+pos*N;
  1359. auto lck=access(access_mode,pos);
  1360. auto mask=this->match_really_occupied(&g,last);
  1361. while(mask){
  1362. auto n=unchecked_countr_zero(mask);
  1363. if(!f(p+n))return false;
  1364. mask&=mask-1;
  1365. }
  1366. return true;
  1367. }
  1368. );
  1369. }
  1370. #endif
  1371. friend class boost::serialization::access;
  1372. template<typename Archive>
  1373. void serialize(Archive& ar,unsigned int version)
  1374. {
  1375. core::split_member(ar,*this,version);
  1376. }
  1377. template<typename Archive>
  1378. void save(Archive& ar,unsigned int version)const
  1379. {
  1380. save(
  1381. ar,version,
  1382. std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
  1383. }
  1384. template<typename Archive>
  1385. void save(Archive& ar,unsigned int,std::true_type /* set */)const
  1386. {
  1387. auto lck=exclusive_access();
  1388. const std::size_t s=super::size();
  1389. const serialization_version<value_type> value_version;
  1390. ar<<core::make_nvp("count",s);
  1391. ar<<core::make_nvp("value_version",value_version);
  1392. super::for_all_elements([&,this](element_type* p){
  1393. auto& x=type_policy::value_from(*p);
  1394. core::save_construct_data_adl(ar,std::addressof(x),value_version);
  1395. ar<<serialization::make_nvp("item",x);
  1396. });
  1397. }
  1398. template<typename Archive>
  1399. void save(Archive& ar,unsigned int,std::false_type /* map */)const
  1400. {
  1401. using raw_key_type=typename std::remove_const<key_type>::type;
  1402. using raw_mapped_type=typename std::remove_const<
  1403. typename TypePolicy::mapped_type>::type;
  1404. auto lck=exclusive_access();
  1405. const std::size_t s=super::size();
  1406. const serialization_version<raw_key_type> key_version;
  1407. const serialization_version<raw_mapped_type> mapped_version;
  1408. ar<<core::make_nvp("count",s);
  1409. ar<<core::make_nvp("key_version",key_version);
  1410. ar<<core::make_nvp("mapped_version",mapped_version);
  1411. super::for_all_elements([&,this](element_type* p){
  1412. /* To remain lib-independent from Boost.Serialization and not rely on
  1413. * the user having included the serialization code for std::pair
  1414. * (boost/serialization/utility.hpp), we serialize the key and the
  1415. * mapped value separately.
  1416. */
  1417. auto& x=type_policy::value_from(*p);
  1418. core::save_construct_data_adl(
  1419. ar,std::addressof(x.first),key_version);
  1420. ar<<serialization::make_nvp("key",x.first);
  1421. core::save_construct_data_adl(
  1422. ar,std::addressof(x.second),mapped_version);
  1423. ar<<serialization::make_nvp("mapped",x.second);
  1424. });
  1425. }
  1426. template<typename Archive>
  1427. void load(Archive& ar,unsigned int version)
  1428. {
  1429. load(
  1430. ar,version,
  1431. std::integral_constant<bool,std::is_same<key_type,value_type>::value>{});
  1432. }
  1433. template<typename Archive>
  1434. void load(Archive& ar,unsigned int,std::true_type /* set */)
  1435. {
  1436. auto lck=exclusive_access();
  1437. std::size_t s;
  1438. serialization_version<value_type> value_version;
  1439. ar>>core::make_nvp("count",s);
  1440. ar>>core::make_nvp("value_version",value_version);
  1441. super::clear();
  1442. super::reserve(s);
  1443. for(std::size_t n=0;n<s;++n){
  1444. archive_constructed<value_type> value("item",ar,value_version);
  1445. auto& x=value.get();
  1446. auto hash=this->hash_for(x);
  1447. auto pos0=this->position_for(hash);
  1448. if(this->find(x,pos0,hash))throw_exception(bad_archive_exception());
  1449. auto loc=this->unchecked_emplace_at(pos0,hash,std::move(x));
  1450. ar.reset_object_address(std::addressof(*loc.p),std::addressof(x));
  1451. }
  1452. }
  1453. template<typename Archive>
  1454. void load(Archive& ar,unsigned int,std::false_type /* map */)
  1455. {
  1456. using raw_key_type=typename std::remove_const<key_type>::type;
  1457. using raw_mapped_type=typename std::remove_const<
  1458. typename TypePolicy::mapped_type>::type;
  1459. auto lck=exclusive_access();
  1460. std::size_t s;
  1461. serialization_version<raw_key_type> key_version;
  1462. serialization_version<raw_mapped_type> mapped_version;
  1463. ar>>core::make_nvp("count",s);
  1464. ar>>core::make_nvp("key_version",key_version);
  1465. ar>>core::make_nvp("mapped_version",mapped_version);
  1466. super::clear();
  1467. super::reserve(s);
  1468. for(std::size_t n=0;n<s;++n){
  1469. archive_constructed<raw_key_type> key("key",ar,key_version);
  1470. archive_constructed<raw_mapped_type> mapped("mapped",ar,mapped_version);
  1471. auto& k=key.get();
  1472. auto& m=mapped.get();
  1473. auto hash=this->hash_for(k);
  1474. auto pos0=this->position_for(hash);
  1475. if(this->find(k,pos0,hash))throw_exception(bad_archive_exception());
  1476. auto loc=this->unchecked_emplace_at(pos0,hash,std::move(k),std::move(m));
  1477. ar.reset_object_address(std::addressof(loc.p->first),std::addressof(k));
  1478. ar.reset_object_address(std::addressof(loc.p->second),std::addressof(m));
  1479. }
  1480. }
  1481. static std::atomic<std::size_t> thread_counter;
  1482. mutable multimutex_type mutexes;
  1483. };
  1484. template<typename T,typename H,typename P,typename A>
  1485. std::atomic<std::size_t> concurrent_table<T,H,P,A>::thread_counter={};
  1486. #if defined(BOOST_MSVC)
  1487. #pragma warning(pop) /* C4714 */
  1488. #endif
  1489. #include <boost/unordered/detail/foa/restore_wshadow.hpp>
  1490. } /* namespace foa */
  1491. } /* namespace detail */
  1492. } /* namespace unordered */
  1493. } /* namespace boost */
  1494. #endif