#pragma region CPL License /* Nuclex Native Framework Copyright (C) 2002-2023 Nuclex Development Labs This library is free software; you can redistribute it and/or modify it under the terms of the IBM Common Public License as published by the IBM Corporation; either version 1.0 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the IBM Common Public License for more details. You should have received a copy of the IBM Common Public License along with this library */ #pragma endregion // CPL License #ifndef NUCLEX_SUPPORT_COLLECTIONS_SHIFTQUEUE_H #define NUCLEX_SUPPORT_COLLECTIONS_SHIFTQUEUE_H #include "Nuclex/Support/Config.h" #include "Nuclex/Support/ScopeGuard.h" #include "Nuclex/Support/BitTricks.h" #include // for std::size_t #include // for std::uint8_t #include // for std::unique_ptr #include // for assert() #include // for std::memcpy() #include // for std::enable_if<> namespace Nuclex { namespace Support { namespace Collections { // ------------------------------------------------------------------------------------------- // /// A buffer that acts like a ring buffer but guarantees linear memory /// /// /// Thread safety: each instance should be accessed by a single thread /// /// /// Container type: unbounded linear buffer with batch operations /// /// /// This is a buffer for FIFO batch operations like the ring buffer, but instead of /// wrapping data around, it will keep all data linear. This can be less efficient than /// a ring buffer if there are lots of partial updates, but can also be more efficient /// in cases where the buffer is mostly or completely emptied regularly. /// /// /// It works by naively accumulating data in a buffer. Reads advance a read pointer, /// leaving unused memory behind. When writing to the buffer, if more space is wasted /// than there is data in the buffer, all data is shifted back to the front of the /// buffer. This is a fairly good heuristic so long as your reads typically consume /// most of the buffer. /// /// /// In contrast to a ring buffer, this buffer also allows you to obtain a pointer to /// the data it holds, allowing for extra efficiency if the data can be processed /// directly from a buffer. You can also obtain a pointer to write into the buffer. /// /// /// This class offers the basic exception guarantee: if your items throw /// in their copy or move constructors throw, the ring buffer will remain in a usable /// state and not leak memory, but operations may end up applied partially, /// i.e. a read may fail and return nothing, yet kill half your buffer contents. /// /// template class ShiftQueue { /// Initializes a new shift queue /// Storage space in the shift queue at the beginning public: explicit ShiftQueue(std::size_t capacity = 256) : itemMemory( new std::uint8_t[sizeof(TItem[2]) * BitTricks::GetUpperPowerOfTwo(capacity) / 2] ), capacity(BitTricks::GetUpperPowerOfTwo(capacity)), startIndex(0), endIndex(0) {} /// Initializes a shift queue as a copy of another shift queue /// Other shift queue that will be copied public: ShiftQueue(const ShiftQueue &other) : itemMemory(new std::uint8_t[sizeof(TItem[2]) * other.capacity / 2]), capacity(other.capacity), startIndex(0), endIndex(0) { const TItem *sourceItems = ( reinterpret_cast(other.itemMemory.get()) + other.startIndex ); emplaceItems(sourceItems, other.endIndex - other.startIndex); } /// Initializes a shift queue taking over another shift queue /// Other shift queue that will be taken over public: ShiftQueue(ShiftQueue &&other) : itemMemory(std::move(other.itemMemory)), capacity(other.capacity), startIndex(other.startIndex), endIndex(other.endIndex) { other.startIndex = other.endIndex = 0; // Ensure other doesn't try to destroy items } /// Destroys the shift queue and all items in it public: ~ShiftQueue() { if(this->startIndex != this->endIndex) { TItem *items = reinterpret_cast(this->itemMemory.get()) + this->startIndex; for(std::size_t index = this->startIndex; index < this->endIndex; ++index) { items->~TItem(); ++items; } } } /// Returns the number of items the shift queue has allocated memory for /// The number of items the shift queue has reserved space for /// /// Just like std::vector::capacity(), this is not a limit. If the capacity is /// exceeded, the shift queue will allocate a larger memory block and use that one. /// public: std::size_t GetCapacity() const { return this->capacity; } /// Counts the number of items currently stored in the shift queue public: std::size_t Count() const { return this->endIndex - this->startIndex; } /// Provides direct read access to the items stored in the buffer /// /// A pointer to the oldest item in the buffer, following sequentially by /// all newer items in the order they were written /// public: const TItem *Access() const { return reinterpret_cast(this->itemMemory.get()) + this->startIndex; } /// Skips the specified number of items /// Number of items that will be skipped public: void Skip(std::size_t skipItemCount) { assert( ((this->startIndex + skipItemCount) <= this->endIndex) && u8"Amount of data skipped must be less or equal to the amount of data in the buffer" ); skipItems(skipItemCount); } /// Reads items out of the buffer, starting with the oldest item /// Memory to which the items will be moved /// Number of items that will be read from the buffer public: void Read(TItem *items, std::size_t count) { assert( ((this->startIndex + count) <= this->endIndex) && u8"Amount of data read must be less or equal to the amount of data in the buffer" ); extractItems(items, count); } /// Copies the specified number of items into the shift queue /// Items that will be copied into the shift queue /// Number of items that will be copied public: void Write(const TItem *items, std::size_t count) { makeSpace(count); emplaceItems(items, count); } /// Moves the specified number of items into the shift queue /// Items that will be moves into the shift queue /// Number of items that will be moves public: void Shove(TItem *items, std::size_t count) { makeSpace(count); moveEmplaceItems(items, count); } #if 1 // Cool, efficient and an invitation to shoot yourself in the foot /// /// Promises the shift queue to write the specified number of items before /// the next call to any non-const method /// /// Number of items to promise the shift queue /// The address at which the items must be written /// /// /// Warning! The returned pointer is to uninitialized memory. That means assigning /// the items is an error, they must be constructed into their addresses via /// placement new, not assignment! (unless they're std::is_trivially_copyable, /// in which case, std::memcpy() away) /// /// /// Additional Warning! After calling this method, the shift queue will attempt /// to destroy the promised items if it is itself destroyed or needs to shuffle /// items around. If you do not fill the promised items (or are interrupted by /// an item constructor throwing an exception), you have to take care to call /// to revert your promise in all cases! /// /// protected: TItem *Promise(std::size_t itemCount) { makeSpace(itemCount); TItem *items = reinterpret_cast(this->itemMemory.get()) + this->endIndex; this->endIndex += itemCount; return items; } /// Reverses a promise of data given via /// Number of items for which to reverse the promise /// /// Warning! You must not reverse a promise for more data than you promised with /// your last call to . The items for which you reverse your /// promise will be considered uninitialized memory again and will not have their /// destructors called. /// protected: void Unpromise(std::size_t itemCount) { this->endIndex -= itemCount; assert( (this->endIndex >= startIndex) && u8"Promise reversal can not deny more data than was promised earlier" ); } #endif /// Ensures that space is available for the specified number of items /// Number of items for which space will be made available /// /// When this method finishes, there will be enough space between /// and to fit the requested number /// of items. If there was enough space in the first place, this method does nothing. /// private: void makeSpace(std::size_t itemCount) { std::size_t usedItemCount = this->endIndex - this->startIndex; // Is more space in the buffer inaccessible than is occupied by items? if(this->startIndex > usedItemCount) { // If the buffer needs to be resized anyway, we don't need to shift back // and can do the resize + shift in one operation std::size_t totalItemCount = usedItemCount + itemCount; if(likely(this->capacity >= totalItemCount)) { TItem *items = reinterpret_cast(this->itemMemory.get()) + this->startIndex; shiftItems(items, usedItemCount); } else { // No buffer resize needed, just shift the items back this->capacity = BitTricks::GetUpperPowerOfTwo(this->startIndex + totalItemCount); { std::unique_ptr newItemMemory( new std::uint8_t[sizeof(TItem[2]) * this->capacity / 2] ); newItemMemory.swap(this->itemMemory); TItem *items = reinterpret_cast(newItemMemory.get()) + this->startIndex; shiftItems(items, usedItemCount); } } } else { // The inaccessible space in the buffer is less than the used space // If the space at the end of the buffer is too small, allocate a new buffer // two times the required size. This ensures that the buffer will settle into // a read-shift-fill cycle without resizes if the current usage pattern repeats. std::size_t freeItemCount = this->capacity - this->endIndex; if(likely(freeItemCount >= itemCount)) { // Enough space available, no action needed } else { this->capacity = BitTricks::GetUpperPowerOfTwo((usedItemCount + itemCount) * 2); { std::unique_ptr newItemMemory( new std::uint8_t[sizeof(TItem[2]) * this->capacity / 2] ); newItemMemory.swap(this->itemMemory); TItem *items = reinterpret_cast(newItemMemory.get()) + this->startIndex; shiftItems(items, usedItemCount); } } } } /// Copies the specified items into the already available buffer /// Items that will be copied into the buffer /// Number of items that will be copied private: template typename std::enable_if::value>::type emplaceItems( const TItem *sourceItems, std::size_t itemCount ) { TItem *targetItems = reinterpret_cast(this->itemMemory.get()); targetItems += this->endIndex; std::size_t count = itemCount; { ON_SCOPE_EXIT { this->endIndex += itemCount - count; }; while(count > 0) { new(targetItems) TItem(*sourceItems); ++sourceItems; ++targetItems; --count; } } } /// Copies the specified items into the already available buffer /// Items that will be copied into the buffer /// Number of items that will be copied private: template typename std::enable_if::value>::type emplaceItems( const TItem *sourceItems, std::size_t itemCount ) { TItem *targetItems = reinterpret_cast(this->itemMemory.get()); targetItems += this->endIndex; std::memcpy(targetItems, sourceItems, itemCount * sizeof(TItem)); this->endIndex += itemCount; } /// Moves the specified items into the already available buffer /// Items that will be copied into the buffer /// Number of items that will be copied private: template typename std::enable_if::value>::type moveEmplaceItems( TItem *sourceItems, std::size_t itemCount ) { TItem *targetItems = reinterpret_cast(this->itemMemory.get()); targetItems += this->endIndex; std::size_t count = itemCount; { ON_SCOPE_EXIT { this->endIndex += itemCount - count; }; while(count > 0) { new(targetItems) TItem(std::move(*sourceItems)); // no d'tor call here, source isn't ours and will be destroyed externally ++sourceItems; ++targetItems; --count; } } } /// Moves the specified items into the already available buffer /// Items that will be copied into the buffer /// Number of items that will be copied private: template typename std::enable_if::value>::type moveEmplaceItems( TItem *sourceItems, std::size_t itemCount ) { TItem *targetItems = reinterpret_cast(this->itemMemory.get()); targetItems += this->endIndex; std::memcpy(targetItems, sourceItems, itemCount * sizeof(TItem)); this->endIndex += itemCount; } /// Takes the specified number of items out of the buffer /// Address at which the items will be placed /// Number of items that will be extracted private: template typename std::enable_if< !std::is_trivially_copyable::value || !std::is_trivially_destructible::value >::type extractItems( TItem *targetItems, std::size_t itemCount ) { TItem *sourceItems = ( reinterpret_cast(this->itemMemory.get()) + this->startIndex ); std::size_t count = itemCount; try { while(count > 0) { *targetItems = std::move(*sourceItems); sourceItems->~TItem(); ++targetItems; ++sourceItems; --count; } this->startIndex += itemCount; } catch(...) { this->startIndex += (itemCount - count); throw; } } /// Takes the specified number of items out of the buffer /// Address at which the items will be placed /// Number of items that will be extracted private: template typename std::enable_if< std::is_trivially_copyable::value && std::is_trivially_destructible::value >::type extractItems( TItem *targetItems, std::size_t itemCount ) { TItem *sourceItems = reinterpret_cast(this->itemMemory.get()); std::memcpy(targetItems, sourceItems, itemCount * sizeof(TItem)); this->startIndex += itemCount; } /// Skips the specified number of items in the buffer /// Number of items that will be skipped private: template typename std::enable_if::value>::type skipItems( std::size_t itemCount ) { TItem *sourceItems = ( reinterpret_cast(this->itemMemory.get()) + this->startIndex ); std::size_t count = itemCount; while(count > 0) { sourceItems->~TItem(); ++sourceItems; --count; } this->startIndex += itemCount; } /// Skips the specified number of items in the buffer /// Number of items that will be skipped /// /// Storage occupied by trivially destructible objects may be reused without /// calling the destructor - so we don't. /// private: template typename std::enable_if::value>::type skipItems( std::size_t itemCount ) { this->startIndex += itemCount; } /// Moves the items from another location into the buffer /// Items that will be moved into the buffer /// Number of items that will be moved /// /// /// The source buffer can be the ShiftQueue's own memory so long as there is /// no overlap between the items to be moved and the target memory range. /// /// /// In all cases, the target is this buffer's own memory, starting at index 0. /// The start and end index of the instance will be updated accordingly. /// /// /// Guarantees that all items in the source buffer up to the specified item count /// are either moved or destroyed after the call. /// /// private: template typename std::enable_if< !std::is_trivially_copyable::value || !std::is_trivially_destructible::value >::type shiftItems( TItem *sourceItems, std::size_t itemCount ) { TItem *targetItems = reinterpret_cast(this->itemMemory.get()); this->startIndex = 0; std::size_t count = itemCount; { auto cleanUp = ON_SCOPE_EXIT_TRANSACTION { this->endIndex = itemCount - count; // Move failed, kill all items that remain in the source buffer. Moving // the rest would result in skipping an item in the buffer and risking // another exception. We can't deal with segmented buffers either. while(count > 0) { sourceItems->~TItem(); ++sourceItems; --count; } }; // Move all items from their old location to their new location while(count > 0) { new(targetItems) TItem(std::move(*sourceItems)); sourceItems->~TItem(); ++sourceItems; ++targetItems; --count; } cleanUp.Commit(); } // Move succeeded, the new end index is the number of items we have moved this->endIndex = itemCount; } /// Moves the items from another location into the buffer /// Items that will be moved into the buffer /// Number of items that will be moved /// /// Variant for trivially copyable items where we don't have to worry about /// individual items throwing inside the move constructor. /// private: template typename std::enable_if< std::is_trivially_copyable::value && std::is_trivially_destructible::value >::type shiftItems( TItem *sourceItems, std::size_t itemCount ) { // std::copy_n() would use std::memmove(), but we know there is no overlap, so: std::memcpy(this->itemMemory.get(), sourceItems, itemCount * sizeof(TItem)); this->startIndex = 0; this->endIndex = itemCount; } /// Holds the items stored in the shift queue private: std::unique_ptr itemMemory; /// Number of items the shift queue can currently hold private: std::size_t capacity; /// Index of the first item in the shift queue private: std::size_t startIndex; /// Index one past the last item private: std::size_t endIndex; }; // ------------------------------------------------------------------------------------------- // }}} // namespace Nuclex::Support::Collections #endif // NUCLEX_SUPPORT_COLLECTIONS_SHIFTQUEUE_H