stack.hpp
1 // Copyright (C) 2008-2013 Tim Blechmann 2 // 3 // Distributed under the Boost Software License, Version 1.0. (See 4 // accompanying file LICENSE_1_0.txt or copy at 5 // http://www.boost.org/LICENSE_1_0.txt) 6 7 #ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED 8 #define BOOST_LOCKFREE_STACK_HPP_INCLUDED 9 10 #include <boost/assert.hpp> 11 #include <boost/checked_delete.hpp> 12 #include <boost/integer_traits.hpp> 13 #ifdef BOOST_NO_CXX11_DELETED_FUNCTIONS 14 #include <boost/noncopyable.hpp> 15 #endif 16 #include <boost/static_assert.hpp> 17 #include <boost/tuple/tuple.hpp> 18 #include <boost/type_traits/has_trivial_assign.hpp> 19 #include <boost/type_traits/has_trivial_destructor.hpp> 20 21 #include <boost/lockfree/detail/atomic.hpp> 22 #include <boost/lockfree/detail/copy_payload.hpp> 23 #include <boost/lockfree/detail/freelist.hpp> 24 #include <boost/lockfree/detail/parameter.hpp> 25 #include <boost/lockfree/detail/tagged_ptr.hpp> 26 27 namespace boost { 28 namespace lockfree { 29 namespace detail { 30 31 typedef parameter::parameters<boost::parameter::optional<tag::allocator>, 32 boost::parameter::optional<tag::capacity> 33 > stack_signature; 34 35 } 36 37 /** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free, 38 * construction/destruction has to be synchronized. It uses a freelist for memory management, 39 * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed. 40 * 41 * \b Policies: 42 * 43 * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br> 44 * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br> 45 * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed 46 * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index 47 * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way 48 * to achieve lock-freedom. 49 * 50 * - \c boost::lockfree::capacity<>, optional <br> 51 * If this template argument is passed to the options, the size of the stack is set at compile-time. <br> 52 * It this option implies \c fixed_sized<true> 53 * 54 * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br> 55 * Specifies the allocator that is used for the internal freelist 56 * 57 * \b Requirements: 58 * - T must have a copy constructor 59 * */ 60 #ifndef BOOST_DOXYGEN_INVOKED 61 template <typename T, 62 class A0 = boost::parameter::void_, 63 class A1 = boost::parameter::void_, 64 class A2 = boost::parameter::void_> 65 #else 66 template <typename T, ...Options> 67 #endif 68 class stack 69 #ifdef BOOST_NO_CXX11_DELETED_FUNCTIONS 70 : boost::noncopyable 71 #endif 72 { 73 private: 74 #ifndef BOOST_DOXYGEN_INVOKED 75 BOOST_STATIC_ASSERT(boost::has_trivial_assign<T>::value); 76 BOOST_STATIC_ASSERT(boost::has_trivial_destructor<T>::value); 77 78 typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args; 79 80 static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity; 81 static const size_t capacity = detail::extract_capacity<bound_args>::capacity; 82 static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value; 83 static const bool node_based = !(has_capacity || fixed_sized); 84 static const bool compile_time_sized = has_capacity; 85 86 struct node 87 { 88 node(T const & val): 89 v(val) 90 {} 91 92 typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t; 93 handle_t next; 94 const T v; 95 }; 96 97 typedef typename detail::extract_allocator<bound_args, node>::type node_allocator; 98 typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t; 99 typedef typename pool_t::tagged_node_handle tagged_node_handle; 100 101 // check compile-time capacity 102 BOOST_STATIC_ASSERT((mpl::if_c<has_capacity, 103 mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>, 104 mpl::true_ 105 >::type::value)); 106 107 struct implementation_defined 108 { 109 typedef node_allocator allocator; 110 typedef std::size_t size_type; 111 }; 112 113 #endif 114 115 #ifndef BOOST_NO_CXX11_DELETED_FUNCTIONS 116 stack(stack const &) = delete; 117 stack(stack &&) = delete; 118 const stack& operator=( const stack& ) = delete; 119 #endif 120 121 public: 122 typedef T value_type; 123 typedef typename implementation_defined::allocator allocator; 124 typedef typename implementation_defined::size_type size_type; 125 126 /** 127 * \return true, if implementation is lock-free. 128 * 129 * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner. 130 * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, 131 * there is no possibility to provide a completely accurate implementation, because one would need to test 132 * every internal node, which is impossible if further nodes will be allocated from the operating system. 133 * 134 * */ 135 bool is_lock_free (void) const 136 { 137 return tos.is_lock_free() && pool.is_lock_free(); 138 } 139 140 //! Construct stack 141 // @{ 142 stack(void): 143 pool(node_allocator(), capacity) 144 { 145 BOOST_ASSERT(has_capacity); 146 initialize(); 147 } 148 149 template <typename U> 150 explicit stack(typename node_allocator::template rebind<U>::other const & alloc): 151 pool(alloc, capacity) 152 { 153 BOOST_STATIC_ASSERT(has_capacity); 154 initialize(); 155 } 156 157 explicit stack(allocator const & alloc): 158 pool(alloc, capacity) 159 { 160 BOOST_ASSERT(has_capacity); 161 initialize(); 162 } 163 // @} 164 165 //! Construct stack, allocate n nodes for the freelist. 166 // @{ 167 explicit stack(size_type n): 168 pool(node_allocator(), n) 169 { 170 BOOST_ASSERT(!has_capacity); 171 initialize(); 172 } 173 174 template <typename U> 175 stack(size_type n, typename node_allocator::template rebind<U>::other const & alloc): 176 pool(alloc, n) 177 { 178 BOOST_STATIC_ASSERT(!has_capacity); 179 initialize(); 180 } 181 // @} 182 183 /** Allocate n nodes for freelist 184 * 185 * \pre only valid if no capacity<> argument given 186 * \note thread-safe, may block if memory allocator blocks 187 * 188 * */ 189 void reserve(size_type n) 190 { 191 BOOST_STATIC_ASSERT(!has_capacity); 192 pool.template reserve<true>(n); 193 } 194 195 /** Allocate n nodes for freelist 196 * 197 * \pre only valid if no capacity<> argument given 198 * \note not thread-safe, may block if memory allocator blocks 199 * 200 * */ 201 void reserve_unsafe(size_type n) 202 { 203 BOOST_STATIC_ASSERT(!has_capacity); 204 pool.template reserve<false>(n); 205 } 206 207 /** Destroys stack, free all nodes from freelist. 208 * 209 * \note not thread-safe 210 * 211 * */ 212 ~stack(void) 213 { 214 T dummy; 215 while(unsynchronized_pop(dummy)) 216 {} 217 } 218 219 private: 220 #ifndef BOOST_DOXYGEN_INVOKED 221 void initialize(void) 222 { 223 tos.store(tagged_node_handle(pool.null_handle(), 0)); 224 } 225 226 void link_nodes_atomic(node * new_top_node, node * end_node) 227 { 228 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed); 229 for (;;) { 230 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag()); 231 end_node->next = pool.get_handle(old_tos); 232 233 if (tos.compare_exchange_weak(old_tos, new_tos)) 234 break; 235 } 236 } 237 238 void link_nodes_unsafe(node * new_top_node, node * end_node) 239 { 240 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed); 241 242 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag()); 243 end_node->next = pool.get_pointer(old_tos); 244 245 tos.store(new_tos, memory_order_relaxed); 246 } 247 248 template <bool Threadsafe, bool Bounded, typename ConstIterator> 249 tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret) 250 { 251 ConstIterator it = begin; 252 node * end_node = pool.template construct<Threadsafe, Bounded>(*it++); 253 if (end_node == NULL) { 254 ret = begin; 255 return make_tuple<node*, node*>(NULL, NULL); 256 } 257 258 node * new_top_node = end_node; 259 end_node->next = NULL; 260 261 try { 262 /* link nodes */ 263 for (; it != end; ++it) { 264 node * newnode = pool.template construct<Threadsafe, Bounded>(*it); 265 if (newnode == NULL) 266 break; 267 newnode->next = new_top_node; 268 new_top_node = newnode; 269 } 270 } catch (...) { 271 for (node * current_node = new_top_node; current_node != NULL;) { 272 node * next = current_node->next; 273 pool.template destruct<Threadsafe>(current_node); 274 current_node = next; 275 } 276 throw; 277 } 278 ret = it; 279 return make_tuple(new_top_node, end_node); 280 } 281 #endif 282 283 public: 284 /** Pushes object t to the stack. 285 * 286 * \post object will be pushed to the stack, if internal node can be allocated 287 * \returns true, if the push operation is successful. 288 * 289 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated 290 * from the OS. This may not be lock-free. 291 * \throws if memory allocator throws 292 * */ 293 bool push(T const & v) 294 { 295 return do_push<false>(v); 296 } 297 298 /** Pushes object t to the stack. 299 * 300 * \post object will be pushed to the stack, if internal node can be allocated 301 * \returns true, if the push operation is successful. 302 * 303 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail 304 * */ 305 bool bounded_push(T const & v) 306 { 307 return do_push<true>(v); 308 } 309 310 #ifndef BOOST_DOXYGEN_INVOKED 311 private: 312 template <bool Bounded> 313 bool do_push(T const & v) 314 { 315 node * newnode = pool.template construct<true, Bounded>(v); 316 if (newnode == 0) 317 return false; 318 319 link_nodes_atomic(newnode, newnode); 320 return true; 321 } 322 323 template <bool Bounded, typename ConstIterator> 324 ConstIterator do_push(ConstIterator begin, ConstIterator end) 325 { 326 node * new_top_node; 327 node * end_node; 328 ConstIterator ret; 329 330 tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret); 331 if (new_top_node) 332 link_nodes_atomic(new_top_node, end_node); 333 334 return ret; 335 } 336 337 public: 338 #endif 339 340 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated. 341 * 342 * \return iterator to the first element, which has not been pushed 343 * 344 * \note Operation is applied atomically 345 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated 346 * from the OS. This may not be lock-free. 347 * \throws if memory allocator throws 348 */ 349 template <typename ConstIterator> 350 ConstIterator push(ConstIterator begin, ConstIterator end) 351 { 352 return do_push<false, ConstIterator>(begin, end); 353 } 354 355 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated. 356 * 357 * \return iterator to the first element, which has not been pushed 358 * 359 * \note Operation is applied atomically 360 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail 361 * \throws if memory allocator throws 362 */ 363 template <typename ConstIterator> 364 ConstIterator bounded_push(ConstIterator begin, ConstIterator end) 365 { 366 return do_push<true, ConstIterator>(begin, end); 367 } 368 369 370 /** Pushes object t to the stack. 371 * 372 * \post object will be pushed to the stack, if internal node can be allocated 373 * \returns true, if the push operation is successful. 374 * 375 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated 376 * from the OS. This may not be lock-free. 377 * \throws if memory allocator throws 378 * */ 379 bool unsynchronized_push(T const & v) 380 { 381 node * newnode = pool.template construct<false, false>(v); 382 if (newnode == 0) 383 return false; 384 385 link_nodes_unsafe(newnode, newnode); 386 return true; 387 } 388 389 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated. 390 * 391 * \return iterator to the first element, which has not been pushed 392 * 393 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated 394 * from the OS. This may not be lock-free. 395 * \throws if memory allocator throws 396 */ 397 template <typename ConstIterator> 398 ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end) 399 { 400 node * new_top_node; 401 node * end_node; 402 ConstIterator ret; 403 404 tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret); 405 if (new_top_node) 406 link_nodes_unsafe(new_top_node, end_node); 407 408 return ret; 409 } 410 411 412 /** Pops object from stack. 413 * 414 * \post if pop operation is successful, object will be copied to ret. 415 * \returns true, if the pop operation is successful, false if stack was empty. 416 * 417 * \note Thread-safe and non-blocking 418 * 419 * */ 420 bool pop(T & ret) 421 { 422 return pop<T>(ret); 423 } 424 425 /** Pops object from stack. 426 * 427 * \pre type T must be convertible to U 428 * \post if pop operation is successful, object will be copied to ret. 429 * \returns true, if the pop operation is successful, false if stack was empty. 430 * 431 * \note Thread-safe and non-blocking 432 * 433 * */ 434 template <typename U> 435 bool pop(U & ret) 436 { 437 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value)); 438 detail::consume_via_copy<U> consumer(ret); 439 440 return consume_one(consumer); 441 } 442 443 444 /** Pops object from stack. 445 * 446 * \post if pop operation is successful, object will be copied to ret. 447 * \returns true, if the pop operation is successful, false if stack was empty. 448 * 449 * \note Not thread-safe, but non-blocking 450 * 451 * */ 452 bool unsynchronized_pop(T & ret) 453 { 454 return unsynchronized_pop<T>(ret); 455 } 456 457 /** Pops object from stack. 458 * 459 * \pre type T must be convertible to U 460 * \post if pop operation is successful, object will be copied to ret. 461 * \returns true, if the pop operation is successful, false if stack was empty. 462 * 463 * \note Not thread-safe, but non-blocking 464 * 465 * */ 466 template <typename U> 467 bool unsynchronized_pop(U & ret) 468 { 469 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value)); 470 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed); 471 node * old_tos_pointer = pool.get_pointer(old_tos); 472 473 if (!pool.get_pointer(old_tos)) 474 return false; 475 476 node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next); 477 tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_next_tag()); 478 479 tos.store(new_tos, memory_order_relaxed); 480 detail::copy_payload(old_tos_pointer->v, ret); 481 pool.template destruct<false>(old_tos); 482 return true; 483 } 484 485 /** consumes one element via a functor 486 * 487 * pops one element from the stack and applies the functor on this object 488 * 489 * \returns true, if one element was consumed 490 * 491 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking 492 * */ 493 template <typename Functor> 494 bool consume_one(Functor & f) 495 { 496 tagged_node_handle old_tos = tos.load(detail::memory_order_consume); 497 498 for (;;) { 499 node * old_tos_pointer = pool.get_pointer(old_tos); 500 if (!old_tos_pointer) 501 return false; 502 503 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag()); 504 505 if (tos.compare_exchange_weak(old_tos, new_tos)) { 506 f(old_tos_pointer->v); 507 pool.template destruct<true>(old_tos); 508 return true; 509 } 510 } 511 } 512 513 /// \copydoc boost::lockfree::stack::consume_one(Functor & rhs) 514 template <typename Functor> 515 bool consume_one(Functor const & f) 516 { 517 tagged_node_handle old_tos = tos.load(detail::memory_order_consume); 518 519 for (;;) { 520 node * old_tos_pointer = pool.get_pointer(old_tos); 521 if (!old_tos_pointer) 522 return false; 523 524 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_next_tag()); 525 526 if (tos.compare_exchange_weak(old_tos, new_tos)) { 527 f(old_tos_pointer->v); 528 pool.template destruct<true>(old_tos); 529 return true; 530 } 531 } 532 } 533 534 /** consumes all elements via a functor 535 * 536 * sequentially pops all elements from the stack and applies the functor on each object 537 * 538 * \returns number of elements that are consumed 539 * 540 * \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking 541 * */ 542 template <typename Functor> 543 size_t consume_all(Functor & f) 544 { 545 size_t element_count = 0; 546 while (consume_one(f)) 547 element_count += 1; 548 549 return element_count; 550 } 551 552 /// \copydoc boost::lockfree::stack::consume_all(Functor & rhs) 553 template <typename Functor> 554 size_t consume_all(Functor const & f) 555 { 556 size_t element_count = 0; 557 while (consume_one(f)) 558 element_count += 1; 559 560 return element_count; 561 } 562 563 /** 564 * \return true, if stack is empty. 565 * 566 * \note It only guarantees that at some point during the execution of the function the stack has been empty. 567 * It is rarely practical to use this value in program logic, because the stack can be modified by other threads. 568 * */ 569 bool empty(void) const 570 { 571 return pool.get_pointer(tos.load()) == NULL; 572 } 573 574 private: 575 #ifndef BOOST_DOXYGEN_INVOKED 576 detail::atomic<tagged_node_handle> tos; 577 578 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle); 579 char padding[padding_size]; 580 581 pool_t pool; 582 #endif 583 }; 584 585 } /* namespace lockfree */ 586 } /* namespace boost */ 587 588 #endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */