Skip to content

Use move in WeakRingBuffer where capable. Allow one-sized ring buffers #184

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions cds/container/weak_ringbuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ namespace cds { namespace container {
template <typename Q, typename CopyFunc>
bool push( Q* arr, size_t count, CopyFunc copy )
{
assert( count < capacity());
assert( count <= capacity());
counter_type back = back_.load( memory_model::memory_order_relaxed );

assert( static_cast<size_t>( back - pfront_ ) <= capacity());
Expand Down Expand Up @@ -352,10 +352,10 @@ namespace cds { namespace container {
template <typename Q, typename CopyFunc>
bool pop( Q* arr, size_t count, CopyFunc copy )
{
assert( count < capacity());
assert( count <= capacity());

counter_type front = front_.load( memory_model::memory_order_relaxed );
assert( static_cast<size_t>( cback_ - front ) < capacity());
assert( static_cast<size_t>( cback_ - front ) <= capacity());

if ( static_cast<size_t>( cback_ - front ) < count ) {
cback_ = back_.load( memory_model::memory_order_acquire );
Expand Down Expand Up @@ -388,10 +388,10 @@ namespace cds { namespace container {
Returns \p true if success or \p false if not enough space in the ring
*/
template <typename Q>
typename std::enable_if< std::is_assignable<Q&, value_type const&>::value, bool>::type
typename std::enable_if< std::is_move_assignable<Q&>::value, bool>::type
pop( Q* arr, size_t count )
{
return pop( arr, count, []( Q& dest, value_type& src ) { dest = src; } );
return pop( arr, count, []( Q& dest, value_type& src ) { dest = std::move(src); } );
}

/// Dequeues an element from the ring to \p val
Expand All @@ -402,15 +402,15 @@ namespace cds { namespace container {
Returns \p false if the ring is full or \p true otherwise.
*/
template <typename Q>
typename std::enable_if< std::is_assignable<Q&, value_type const&>::value, bool>::type
typename std::enable_if< std::is_move_assignable<Q&>::value, bool>::type
dequeue( Q& val )
{
return pop( &val, 1 );
}

/// Synonym for \p dequeue( Q& )
template <typename Q>
typename std::enable_if< std::is_assignable<Q&, value_type const&>::value, bool>::type
typename std::enable_if< std::is_move_assignable<Q&>::value, bool>::type
pop( Q& val )
{
return dequeue( val );
Expand All @@ -433,7 +433,7 @@ namespace cds { namespace container {
bool dequeue_with( Func f )
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
assert( static_cast<size_t>( cback_ - front ) < capacity());
assert( static_cast<size_t>( cback_ - front ) <= capacity());

if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
Expand Down Expand Up @@ -466,7 +466,7 @@ namespace cds { namespace container {
value_type* front()
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
assert( static_cast<size_t>( cback_ - front ) < capacity());
assert( static_cast<size_t>( cback_ - front ) <= capacity());

if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
Expand Down Expand Up @@ -687,7 +687,7 @@ namespace cds { namespace container {
size_t real_size = calc_real_size( size );

// check if we can reserve real_size bytes
assert( real_size < capacity());
assert( real_size <= capacity());
counter_type back = back_.load( memory_model::memory_order_relaxed );

assert( static_cast<size_t>( back - pfront_ ) <= capacity());
Expand Down Expand Up @@ -777,7 +777,7 @@ namespace cds { namespace container {
uint8_t* reserved = buffer_.buffer() + buffer_.mod( back );

size_t real_size = calc_real_size( *reinterpret_cast<size_t*>( reserved ));
assert( real_size < capacity());
assert( real_size <= capacity());

back_.store( back + real_size, memory_model::memory_order_release );
}
Expand Down Expand Up @@ -805,7 +805,7 @@ namespace cds { namespace container {
std::pair<void*, size_t> front()
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
assert( static_cast<size_t>( cback_ - front ) < capacity());
assert( static_cast<size_t>( cback_ - front ) <= capacity());

if ( cback_ - front < sizeof( size_t )) {
cback_ = back_.load( memory_model::memory_order_acquire );
Expand Down
4 changes: 2 additions & 2 deletions cds/opt/buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ namespace cds { namespace opt {
uninitialized_dynamic_buffer( size_t nCapacity )
: m_nCapacity( c_bExp2 ? beans::ceil2(nCapacity) : nCapacity )
{
assert( m_nCapacity >= 2 );
assert( m_nCapacity >= 1 );
// Capacity must be power of 2
assert( !c_bExp2 || (m_nCapacity & (m_nCapacity - 1)) == 0 );

Expand Down Expand Up @@ -464,7 +464,7 @@ namespace cds { namespace opt {
initialized_dynamic_buffer( size_t nCapacity )
: m_nCapacity( c_bExp2 ? beans::ceil2(nCapacity) : nCapacity )
{
assert( m_nCapacity >= 2 );
assert( m_nCapacity >= 1 );
// Capacity must be power of 2
assert( !c_bExp2 || (m_nCapacity & (m_nCapacity - 1)) == 0 );

Expand Down
76 changes: 76 additions & 0 deletions test/unit/queue/weak_ringbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,26 @@ namespace {
}
};

TEST_F( WeakRingBuffer, one_sized )
{
typedef cds::container::WeakRingBuffer< int > test_queue;

test_queue q( 1 );
test( q );
}

TEST_F( WeakRingBuffer, one_sized_static )
{
struct traits: public cds::container::weak_ringbuffer::traits
{
typedef cds::opt::v::uninitialized_static_buffer<void*, 1> buffer;
};
typedef cds::container::WeakRingBuffer< int, traits > test_queue;

test_queue q;
test( q );
}

TEST_F( WeakRingBuffer, defaulted )
{
typedef cds::container::WeakRingBuffer< int > test_queue;
Expand All @@ -222,6 +242,62 @@ namespace {
test_array( q );
}

struct MoveCopyNode {
MoveCopyNode() = default;
MoveCopyNode(const MoveCopyNode&) {
copy_count++;
}
MoveCopyNode& operator=(const MoveCopyNode&) {
copy_count++;
return *this;
}
MoveCopyNode(MoveCopyNode&&) {
move_count++;
}
MoveCopyNode& operator=(MoveCopyNode&&) {
move_count++;
return *this;
}

static size_t copy_count;
static size_t move_count;
};

size_t MoveCopyNode::copy_count = 0;
size_t MoveCopyNode::move_count = 0;

TEST_F( WeakRingBuffer, move_only )
{
MoveCopyNode::copy_count = 0;
MoveCopyNode::move_count = 0;
typedef cds::container::WeakRingBuffer< MoveCopyNode > test_queue;

test_queue q( 128 );
q.enqueue(MoveCopyNode{});
ASSERT_EQ(MoveCopyNode::move_count, 1);
ASSERT_EQ(MoveCopyNode::copy_count, 0);
q.push(MoveCopyNode{});
ASSERT_EQ(MoveCopyNode::move_count, 2);
ASSERT_EQ(MoveCopyNode::copy_count, 0);
MoveCopyNode arr[12]{};
q.push(arr, 12);
ASSERT_EQ(MoveCopyNode::move_count, 2);
ASSERT_EQ(MoveCopyNode::copy_count, 12);

MoveCopyNode::copy_count = 0;
MoveCopyNode::move_count = 0;

MoveCopyNode node;
q.dequeue(node);
ASSERT_EQ(MoveCopyNode::move_count, 1);
ASSERT_EQ(MoveCopyNode::copy_count, 0);
q.pop(node);
ASSERT_EQ(MoveCopyNode::move_count, 2);
ASSERT_EQ(MoveCopyNode::copy_count, 0);

q.pop_front(); // does not trigger move or copy
}

TEST_F( WeakRingBuffer, stat )
{
struct traits: public cds::container::weak_ringbuffer::traits
Expand Down