[beast: 28/57] SFI: provide AlignedArray and related functions



commit ac2c6d57d8c85c76d4a9b9986a5421f862fac901
Author: Tim Janik <timj gnu org>
Date:   Sun Jul 16 20:39:22 2017 +0200

    SFI: provide AlignedArray and related functions
    
    Signed-off-by: Tim Janik <timj gnu org>

 sfi/bcore.cc |   54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 sfi/bcore.hh |   46 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 100 insertions(+), 0 deletions(-)
---
diff --git a/sfi/bcore.cc b/sfi/bcore.cc
index 0c1ff94..52fdc10 100644
--- a/sfi/bcore.cc
+++ b/sfi/bcore.cc
@@ -32,6 +32,60 @@ static_assert (DBL_EPSILON  <= 1E-9, "");
 
 namespace Bse {
 
+// == Memory Utilities ==
+/**
+ * The fmsb() function returns the position of the most significant bit set in the word @a val.
+ * The least significant bit is position 1 and the most significant position is, for example, 32 or 64.
+ * @returns The position of the most significant bit set is returned, or 0 if no bits were set.
+ */
+int // 0 or 1..64
+fmsb (uint64 val)
+{
+  if (val >> 32)
+    return 32 + fmsb (val >> 32);
+  int nb = 32;
+  do
+    {
+      nb--;
+      if (val & (1U << nb))
+        return nb + 1;  /* 1..32 */
+    }
+  while (nb > 0);
+  return 0; /* none found */
+}
+
+/// Allocate a block of memory aligned to at least @a alignment bytes.
+void*
+aligned_alloc (size_t total_size, size_t alignment, uint8 **free_pointer)
+{
+  assert_return (free_pointer != NULL, NULL);
+  uint8 *aligned_mem = new uint8[total_size];
+  *free_pointer = aligned_mem;
+  if (aligned_mem && (!alignment || 0 == size_t (aligned_mem) % alignment))
+    return aligned_mem;
+  if (aligned_mem)
+    delete[] aligned_mem;
+  aligned_mem = new uint8[total_size + alignment - 1];
+  assert_return (aligned_mem != NULL, NULL);
+  *free_pointer = aligned_mem;
+  if (size_t (aligned_mem) % alignment)
+    aligned_mem += alignment - size_t (aligned_mem) % alignment;
+  return aligned_mem;
+}
+
+/// Release a block of memory allocated through aligned_malloc().
+void
+aligned_free (uint8 **free_pointer)
+{
+  assert_return (free_pointer != NULL);
+  if (*free_pointer)
+    {
+      uint8 *data = *free_pointer;
+      *free_pointer = NULL;
+      delete[] data;
+    }
+}
+
 // == Internal ==
 namespace Internal {
 
diff --git a/sfi/bcore.hh b/sfi/bcore.hh
index a7e9105..c551231 100644
--- a/sfi/bcore.hh
+++ b/sfi/bcore.hh
@@ -234,6 +234,52 @@ info (const char *format, const Args &...args)
 #define return_unless(cond, ...)        BSE_RETURN_UNLESS (cond, __VA_ARGS__)
 #endif // BSE_CONVENIENCE
 
+// == Memory Utilities ==
+int     fmsb          (uint64  word) BSE_CONST; ///< Find most significant bit set in a word.
+void*   aligned_alloc (size_t  total_size, size_t alignment, uint8 **free_pointer);
+void    aligned_free  (uint8 **free_pointer);
+
+/// Class to maintain an array of aligned memory.
+template<class T, int ALIGNMENT>
+class AlignedArray {
+  uint8 *unaligned_mem_;
+  T     *data_;
+  size_t n_elements_;
+  void
+  allocate_aligned_data()
+  {
+    static_assert (ALIGNMENT % sizeof (T) == 0, "ALIGNMENT must exactly fit a multiple of sizeof (T)");
+    data_ = reinterpret_cast<T*> (aligned_alloc (n_elements_ * sizeof (T), ALIGNMENT, &unaligned_mem_));
+  }
+  // disallow copy constructor assignment operator
+  RAPICORN_CLASS_NON_COPYABLE (AlignedArray);
+public:
+  AlignedArray (const vector<T>& elements) :
+    n_elements_ (elements.size())
+  {
+    allocate_aligned_data();
+    for (size_t i = 0; i < n_elements_; i++)
+      new (data_ + i) T (elements[i]);
+  }
+  AlignedArray (size_t n_elements) :
+    n_elements_ (n_elements)
+  {
+    allocate_aligned_data();
+    for (size_t i = 0; i < n_elements_; i++)
+      new (data_ + i) T();
+  }
+  ~AlignedArray()
+  {
+    // C++ destruction order: last allocated element is deleted first
+    while (n_elements_)
+      data_[--n_elements_].~T();
+    aligned_free (&unaligned_mem_);
+  }
+  T&            operator[] (size_t pos)         { return data_[pos]; }
+  const T&      operator[] (size_t pos) const   { return data_[pos]; }
+  size_t        size       () const             { return n_elements_; }
+};
+
 // == Threading ==
 /**
  * The Spinlock uses low-latency busy spinning to acquire locks.


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]