Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions include/boost/lockfree/detail/atomic.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@ namespace detail {

#if defined( BOOST_LOCKFREE_FORCE_BOOST_ATOMIC )
using boost::atomic;
using boost::atomic_thread_fence;
using boost::memory_order_acquire;
using boost::memory_order_consume;
using boost::memory_order_relaxed;
using boost::memory_order_release;
#else
using std::atomic;
using std::atomic_thread_fence;
using std::memory_order_acquire;
using std::memory_order_consume;
using std::memory_order_relaxed;
Expand All @@ -32,6 +34,7 @@ using std::memory_order_release;

} // namespace detail
using detail::atomic;
using detail::atomic_thread_fence;
using detail::memory_order_acquire;
using detail::memory_order_consume;
using detail::memory_order_relaxed;
Expand Down
18 changes: 12 additions & 6 deletions include/boost/lockfree/detail/freelist.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ class alignas( cacheline_bytes ) freelist_stack : Alloc
template < bool Bounded >
T* allocate_impl( void )
{
tagged_node_ptr old_pool = pool_.load( memory_order_consume );
tagged_node_ptr old_pool = pool_.load( memory_order_acquire );

for ( ;; ) {
if ( !old_pool.get_ptr() ) {
Expand Down Expand Up @@ -241,13 +241,16 @@ class alignas( cacheline_bytes ) freelist_stack : Alloc
void deallocate_impl( T* n )
{
void* node = n;
tagged_node_ptr old_pool = pool_.load( memory_order_consume );
tagged_node_ptr old_pool = pool_.load( memory_order_acquire );
freelist_node* new_pool_ptr = reinterpret_cast< freelist_node* >( node );

for ( ;; ) {
tagged_node_ptr new_pool( new_pool_ptr, old_pool.get_tag() );
new_pool->next.set_ptr( old_pool.get_ptr() );

// Ensure the write to next is visible before the CAS makes the node visible
atomic_thread_fence( memory_order_release );

if ( pool_.compare_exchange_weak( old_pool, new_pool ) )
return;
}
Expand Down Expand Up @@ -561,7 +564,7 @@ class fixed_size_freelist : NodeStorage
private:
index_t allocate_impl( void )
{
tagged_index old_pool = pool_.load( memory_order_consume );
tagged_index old_pool = pool_.load( memory_order_acquire );

for ( ;; ) {
index_t index = old_pool.get_index();
Expand All @@ -580,7 +583,7 @@ class fixed_size_freelist : NodeStorage

index_t allocate_impl_unsafe( void )
{
tagged_index old_pool = pool_.load( memory_order_consume );
tagged_index old_pool = pool_.load( memory_order_acquire );

index_t index = old_pool.get_index();
if ( index == null_handle() )
Expand All @@ -607,12 +610,15 @@ class fixed_size_freelist : NodeStorage
void deallocate_impl( index_t index )
{
freelist_node* new_pool_node = reinterpret_cast< freelist_node* >( NodeStorage::nodes() + index );
tagged_index old_pool = pool_.load( memory_order_consume );
tagged_index old_pool = pool_.load( memory_order_acquire );

for ( ;; ) {
tagged_index new_pool( index, old_pool.get_tag() );
new_pool_node->next.set_index( old_pool.get_index() );

// Ensure the write to next is visible before the CAS makes the node visible
atomic_thread_fence( memory_order_release );

if ( pool_.compare_exchange_weak( old_pool, new_pool ) )
return;
}
Expand All @@ -621,7 +627,7 @@ class fixed_size_freelist : NodeStorage
void deallocate_impl_unsafe( index_t index )
{
freelist_node* new_pool_node = reinterpret_cast< freelist_node* >( NodeStorage::nodes() + index );
tagged_index old_pool = pool_.load( memory_order_consume );
tagged_index old_pool = pool_.load( memory_order_acquire );

tagged_index new_pool( index, old_pool.get_tag() );
new_pool_node->next.set_index( old_pool.get_index() );
Expand Down
17 changes: 12 additions & 5 deletions include/boost/lockfree/stack.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,12 @@ class stack
static const bool node_based = !( has_capacity || fixed_sized );
static const bool compile_time_sized = has_capacity;

struct node
struct node;
typedef typename detail::select_tagged_handle< node, node_based >::tagged_handle_type tagged_node_handle;

struct alignas( node_based
? ( alignof( tagged_node_handle ) > alignof( T ) ? alignof( tagged_node_handle ) : alignof( T ) )
: 0 ) node
{
node( const T& val ) :
v( val )
Expand All @@ -101,7 +106,6 @@ class stack
typedef typename detail::extract_allocator< bound_args, node >::type node_allocator;
typedef
typename detail::select_freelist< node, node_allocator, compile_time_sized, fixed_sized, capacity >::type pool_t;
typedef typename pool_t::tagged_node_handle tagged_node_handle;

// check compile-time capacity
static constexpr bool capacity_is_valid = has_capacity ? capacity - 1 < std::numeric_limits< std::uint16_t >::max()
Expand Down Expand Up @@ -266,6 +270,9 @@ class stack
tagged_node_handle new_tos( pool.get_handle( new_top_node ), old_tos.get_tag() );
end_node->next = pool.get_handle( old_tos );

// Ensure the write to end_node->next is visible before the CAS makes the node visible
detail::atomic_thread_fence( detail::memory_order_release );

if ( tos.compare_exchange_weak( old_tos, new_tos ) )
break;
}
Expand Down Expand Up @@ -642,7 +649,7 @@ class stack
template < typename Functor >
bool consume_one( Functor&& f )
{
tagged_node_handle old_tos = tos.load( detail::memory_order_consume );
tagged_node_handle old_tos = tos.load( detail::memory_order_acquire );

for ( ;; ) {
node* old_tos_pointer = pool.get_pointer( old_tos );
Expand Down Expand Up @@ -689,7 +696,7 @@ class stack
size_t consume_all_atomic( Functor&& f )
{
size_t element_count = 0;
tagged_node_handle old_tos = tos.load( detail::memory_order_consume );
tagged_node_handle old_tos = tos.load( detail::memory_order_acquire );

for ( ;; ) {
node* old_tos_pointer = pool.get_pointer( old_tos );
Expand Down Expand Up @@ -736,7 +743,7 @@ class stack
size_t consume_all_atomic_reversed( Functor&& f )
{
size_t element_count = 0;
tagged_node_handle old_tos = tos.load( detail::memory_order_consume );
tagged_node_handle old_tos = tos.load( detail::memory_order_acquire );

for ( ;; ) {
node* old_tos_pointer = pool.get_pointer( old_tos );
Expand Down