//--STRIP #ifndef TIGHT_LOCAL_VECTOR_H #define TIGHT_LOCAL_VECTOR_H //--STRIP /*************************************************************************/ /* tight_local_vector.h */ /* From https://github.com/Relintai/pandemonium_engine (MIT) */ /*************************************************************************/ //--STRIP #include "core/pool_vector.h" #include "core/sort_array.h" #include "core/vector.h" #include "core/error_macros.h" #include "core/memory.h" //--STRIP // It grows strictly as much as needed. (The vanilla LocalVector is what you want in most cases). template class TightLocalVector { private: U count = 0; U capacity = 0; T *data = nullptr; public: T *ptr() { return data; } const T *ptr() const { return data; } _FORCE_INLINE_ void push_back(T p_elem) { if (unlikely(count == capacity)) { if (capacity == 0) { capacity = 1; } else { capacity <<= 1; } data = (T *)memrealloc(data, capacity * sizeof(T)); CRASH_COND_MSG(!data, "Out of memory"); } if (!HAS_TRIVIAL_CONSTRUCTOR(T) && !force_trivial) { memnew_placement(&data[count++], T(p_elem)); } else { data[count++] = p_elem; } } void remove(U p_index) { ERR_FAIL_UNSIGNED_INDEX(p_index, count); count--; for (U i = p_index; i < count; i++) { data[i] = data[i + 1]; } if (!HAS_TRIVIAL_DESTRUCTOR(T) && !force_trivial) { data[count].~T(); } } /// Removes the item copying the last value into the position of the one to /// remove. It's generally faster than `remove`. void remove_unordered(U p_index) { ERR_FAIL_INDEX(p_index, count); count--; if (count > p_index) { data[p_index] = data[count]; } if (!HAS_TRIVIAL_DESTRUCTOR(T) && !force_trivial) { data[count].~T(); } } void erase(const T &p_val) { int64_t idx = find(p_val); if (idx >= 0) { remove(idx); } } U erase_multiple_unordered(const T &p_val) { U from = 0; U count = 0; while (true) { int64_t idx = find(p_val, from); if (idx == -1) { break; } remove_unordered(idx); from = idx; count++; } return count; } void invert() { for (U i = 0; i < count / 2; i++) { SWAP(data[i], data[count - i - 1]); } } _FORCE_INLINE_ void clear() { resize(0); } _FORCE_INLINE_ void reset() { clear(); if (data) { memfree(data); data = nullptr; capacity = 0; } } _FORCE_INLINE_ bool empty() const { return count == 0; } _FORCE_INLINE_ U get_capacity() const { return capacity; } _FORCE_INLINE_ void reserve(U p_size) { if (p_size > capacity) { capacity = p_size; data = (T *)memrealloc(data, capacity * sizeof(T)); CRASH_COND_MSG(!data, "Out of memory"); } } _FORCE_INLINE_ U size() const { return count; } void resize(U p_size) { if (p_size < count) { if (!HAS_TRIVIAL_DESTRUCTOR(T) && !force_trivial) { for (U i = p_size; i < count; i++) { data[i].~T(); } } count = p_size; } else if (p_size > count) { if (unlikely(p_size > capacity)) { if (capacity == 0) { capacity = 1; } while (capacity < p_size) { capacity <<= 1; } data = (T *)memrealloc(data, capacity * sizeof(T)); CRASH_COND_MSG(!data, "Out of memory"); } if (!HAS_TRIVIAL_CONSTRUCTOR(T) && !force_trivial) { for (U i = count; i < p_size; i++) { memnew_placement(&data[i], T); } } count = p_size; } } _FORCE_INLINE_ const T &operator[](U p_index) const { CRASH_BAD_UNSIGNED_INDEX(p_index, count); return data[p_index]; } _FORCE_INLINE_ T &operator[](U p_index) { CRASH_BAD_UNSIGNED_INDEX(p_index, count); return data[p_index]; } void fill(T p_val) { for (U i = 0; i < count; i++) { data[i] = p_val; } } void insert(U p_pos, T p_val) { ERR_FAIL_UNSIGNED_INDEX(p_pos, count + 1); if (p_pos == count) { push_back(p_val); } else { resize(count + 1); for (U i = count - 1; i > p_pos; i--) { data[i] = data[i - 1]; } data[p_pos] = p_val; } } int64_t find(const T &p_val, U p_from = 0) const { for (U i = p_from; i < count; i++) { if (data[i] == p_val) { return int64_t(i); } } return -1; } template void sort_custom() { U len = count; if (len == 0) { return; } SortArray sorter; sorter.sort(data, len); } void sort() { sort_custom<_DefaultComparator>(); } void ordered_insert(T p_val) { U i; for (i = 0; i < count; i++) { if (p_val < data[i]) { break; } } insert(i, p_val); } operator Vector() const { Vector ret; ret.resize(size()); T *w = ret.ptrw(); memcpy(w, data, sizeof(T) * count); return ret; } operator PoolVector() const { PoolVector pl; if (size()) { pl.resize(size()); typename PoolVector::Write w = pl.write(); T *dest = w.ptr(); memcpy(dest, data, sizeof(T) * count); } return pl; } Vector to_byte_array() const { //useful to pass stuff to gpu or variant Vector ret; ret.resize(count * sizeof(T)); uint8_t *w = ret.ptrw(); memcpy(w, data, sizeof(T) * count); return ret; } _FORCE_INLINE_ TightLocalVector() {} _FORCE_INLINE_ TightLocalVector(const TightLocalVector &p_from) { resize(p_from.size()); for (U i = 0; i < p_from.count; i++) { data[i] = p_from.data[i]; } } TightLocalVector(const Vector &p_from) { resize(p_from.size()); for (U i = 0; i < count; i++) { data[i] = p_from[i]; } } TightLocalVector(const PoolVector &p_from) { resize(p_from.size()); typename PoolVector::Read r = p_from.read(); for (U i = 0; i < count; i++) { data[i] = r[i]; } } inline void operator=(const TightLocalVector &p_from) { resize(p_from.size()); for (U i = 0; i < p_from.count; i++) { data[i] = p_from.data[i]; } } inline void operator=(const Vector &p_from) { resize(p_from.size()); for (U i = 0; i < count; i++) { data[i] = p_from[i]; } } inline TightLocalVector &operator=(const PoolVector &p_from) { resize(p_from.size()); typename PoolVector::Read r = p_from.read(); for (U i = 0; i < count; i++) { data[i] = r[i]; } return *this; } _FORCE_INLINE_ ~TightLocalVector() { if (data) { reset(); } } }; // Integer default version template class TightLocalVectori : public TightLocalVector { }; //--STRIP #endif // TIGHT_LOCAL_VECTOR_H //--STRIP