bpool.h
Go to the documentation of this file.
1 /*--------------------------------------------------------------------------------------+
2 |
3 | Supplied under applicable software license agreement.
4 |
5 | Copyright (c) 2018 Bentley Systems, Incorporated. All rights reserved.
6 |
7 +---------------------------------------------------------------------------------------*/
8 #pragma once
9 
11 
13 
14 // Copyright (C) 2000, 2001 Stephen Cleary
15 //
16 // Distributed under the Boost Software License, Version 1.0. (See
17 // accompanying file LICENSE_1_0.txt or copy at
18 // http://www.boost.org/LICENSE_1_0.txt)
19 //
20 // See http://www.boost.org for updates, documentation, and revision history.
21 
22 #ifndef BBOOST_POOL_HPP
23 #define BBOOST_POOL_HPP
24 #include <Bentley/BeAssert.h>
25 #include <type_traits>
26 
27 //#include <boost/config.hpp> // for workarounds
28 #ifdef _MSC_VER
29  #define BBOOST_MSVC
30  #pragma push_macro("max")
31  #pragma push_macro("min")
32  #undef max
33  #undef min
34 #endif
35 #define BBOOST_NO_INCLASS_MEMBER_INITIALIZATION
36 #define BBOOST_ASSERT(X) BeAssert((X))
37 
38 # ifdef BBOOST_NO_INCLASS_MEMBER_INITIALIZATION
39 # define BBOOST_STATIC_CONSTANT(type, assignment) enum { assignment }
40 # else
41 # define BBOOST_STATIC_CONSTANT(type, assignment) static const type assignment
42 # endif
43 
44 // std::less, std::less_equal, std::greater
45 #include <functional>
46 // new[], delete[], std::nothrow
47 #include <new>
48 // std::size_t, std::ptrdiff_t
49 #include <cstddef>
50 // std::malloc, std::free
51 #include <cstdlib>
52 // std::invalid_argument
53 #include <exception>
54 // std::max
55 #include <algorithm>
56 
57 //#include <boost/bpool/poolfwd.hpp>
58 
59 // boost::math::static_lcm
60 //#include "common_factor_ct.h"
61 //#define BBOOST_PTR_SIZE math::static_lcm<sizeof(size_type), sizeof(void *)>::value
62 #define BBOOST_PTR_SIZE sizeof(void *)
63 //#define BBOOSTE_MIN_ALIGNMENT math::static_lcm< std::alignment_of<void *>::value, std::alignment_of<size_type>::value>::value
64 #define BBOOSTE_MIN_ALIGNMENT std::alignment_of<void *>::value
65 
66 // boost::simple_segregated_storage
68 // boost::std::alignment_of
69 //#include <boost/type_traits/std::alignment_of.hpp>
70 // BBOOST_ASSERT
71 //#include <boost/assert.hpp>
72 
73 #ifdef BBOOST_POOL_INSTRUMENT
74 #include <iostream>
75 #include<iomanip>
76 #endif
77 
78 // There are a few places in this file where the expression "this->m" is used.
79 // This expression is used to force instantiation-time name lookup, which I am
80 // informed is required for strict Standard compliance. It's only necessary
81 // if "m" is a member of a base class that is dependent on a template
82 // parameter.
83 // Thanks to Jens Maurer for pointing this out!
84 
85 #ifdef BBOOST_MSVC
86 #pragma warning(push)
87 #pragma warning(disable:4127) // Conditional expression is constant
88 #endif
89 
90 namespace bboost
91 {
92 
93 namespace details
94 {
95 
96 template <typename SizeType>
97 class PODptr
98 {
99 
124  public:
125  typedef SizeType size_type;
126  static_assert (sizeof(void*) == sizeof(size_type), "simplifying assumption");
127 
128  private:
129  char * ptr;
130  size_type sz;
131 
132  char * ptr_next_size() const
133  {
134  return (ptr + sz - sizeof(size_type));
135  }
136  char * ptr_next_ptr() const
137  {
138  return (ptr_next_size() - BBOOST_PTR_SIZE);
139  }
140 
141  public:
142  PODptr(char * const nptr, const size_type nsize)
143  :ptr(nptr), sz(nsize)
144  {
148  }
149  PODptr()
150  : ptr(0), sz(0)
151  {
152  }
153 
154  bool valid() const
155  {
156  return (begin() != 0);
159  }
160  void invalidate()
161  {
162  begin() = 0;
163  }
164  char * & begin()
165  {
166  return ptr;
168  }
169  char * begin() const
170  {
171  return ptr;
173  }
174  char * end() const
175  {
176  return ptr_next_ptr();
177  }
178  size_type total_size() const
179  {
180  return sz;
184  }
185  size_type element_size() const
186  {
187  return static_cast<size_type>(sz - sizeof(size_type) - BBOOST_PTR_SIZE);
188  }
189 
190  size_type & next_size() const
191  {
192  return *(static_cast<size_type *>(static_cast<void*>((ptr_next_size()))));
194  }
195  char * & next_ptr() const
196  {
197  return *(static_cast<char **>(static_cast<void*>(ptr_next_ptr())));
198  }
199 
200  PODptr next() const
201  {
202  return PODptr<size_type>(next_ptr(), next_size());
203  }
204  void next(const PODptr & arg) const
205  {
206  next_ptr() = arg.begin();
207  next_size() = arg.total_size();
208  }
209 }; // class PODptr
210 } // namespace details
211 
249 template <typename UserAllocator>
250 class bpool: protected simple_segregated_storage < typename UserAllocator::size_type >
251 {
252  public:
253  typedef UserAllocator user_allocator;
254  typedef typename UserAllocator::size_type size_type;
255  typedef typename UserAllocator::difference_type difference_type;
256  static_assert (sizeof(void*) == sizeof(size_type), "simplifying assumption");
257 
258  private:
259  BBOOST_STATIC_CONSTANT(size_type, min_alloc_size = BBOOST_PTR_SIZE );
260  BBOOST_STATIC_CONSTANT(size_type, min_align = BBOOSTE_MIN_ALIGNMENT );
261 
264  void * malloc_need_resize();
265  void * ordered_malloc_need_resize();
266 
267  protected:
268  details::PODptr<size_type> list;
269 
270  simple_segregated_storage<size_type> & store()
271  {
272  return *this;
273  }
274  const simple_segregated_storage<size_type> & store() const
275  {
276  return *this;
277  }
278  const size_type requested_size;
279  size_type next_size;
280  size_type start_size;
281  size_type max_size;
282 
284  details::PODptr<size_type> find_POD(void * const chunk) const;
285 
286  // is_from() tests a chunk to determine if it belongs in a block.
287  static bool is_from(void * const chunk, char * const i,
288  const size_type sizeof_i)
289  {
290 
301  // We use std::less_equal and std::less to test 'chunk'
302  // against the array bounds because standard operators
303  // may return unspecified results.
304  // This is to ensure portability. The operators < <= > >= are only
305  // defined for pointers to objects that are 1) in the same array, or
306  // 2) subobjects of the same object [5.9/2].
307  // The functor objects guarantee a total order for any pointer [20.3.3/8]
308  std::less_equal<void *> lt_eq;
309  std::less<void *> lt;
310  return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
311  }
312 
313  size_type alloc_size() const
314  {
315  // For alignment reasons, this used to be defined to be lcm(requested_size, sizeof(void *), sizeof(size_type)),
317  // but is now more parsimonious: just rounding up to the minimum required alignment of our housekeeping data
318  // when required. This works provided all alignments are powers of two.
319  size_type s = std::max<size_type> (requested_size, min_alloc_size);
320  size_type rem = s % min_align;
321  if(rem)
322  s += min_align - rem;
323  BBOOST_ASSERT(s >= min_alloc_size);
324  BBOOST_ASSERT(s % min_align == 0);
325  return s;
326  }
327 
328  static void * & nextof(void * const ptr)
329  {
330  return *(static_cast<void **>(ptr));
332  }
333 
334  public:
335  // pre: npartition_size != 0 && nnext_size != 0
336  explicit bpool(const size_type nrequested_size,
337  const size_type nnext_size = 32,
338  const size_type nmax_size = 0)
339  :
340  list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size)
341  {
342  }
349 
350  ~bpool()
351  {
352  purge_memory();
353  }
354 
355  // Releases memory blocks that don't have chunks allocated
356  // pre: lists are ordered
357  // Returns true if memory was actually deallocated
358  bool release_memory();
359 
360  // Releases *all* memory blocks, even if chunks are still allocated
361  // Returns true if memory was actually deallocated
362  bool purge_memory();
363 
364  size_type get_next_size() const
365  {
366  return next_size;
368  }
369  void set_next_size(const size_type nnext_size)
370  {
371  next_size = start_size = nnext_size;
373  }
374  size_type get_max_size() const
375  {
376  return max_size;
377  }
378  void set_max_size(const size_type nmax_size)
379  {
380  max_size = nmax_size;
381  }
382  size_type get_requested_size() const
383  {
384  return requested_size;
386  }
387 
388  // Both malloc and ordered_malloc do a quick inlined check first for any
389  // free chunks. Only if we need to get another memory block do we call
390  // the non-inlined *_need_resize() functions.
391  // Returns 0 if out-of-memory
392  void * malloc ()
393  {
394  // Look for a non-empty storage
399  if (!store().empty())
400  return (store().malloc)();
401  return malloc_need_resize();
402  }
403 
404  void * ordered_malloc()
405  {
406  // Look for a non-empty storage
409  if (!store().empty())
410  return (store().malloc)();
411  return ordered_malloc_need_resize();
412  }
413 
414  // Returns 0 if out-of-memory
415  // Allocate a contiguous section of n chunks
416  void * ordered_malloc(size_type n);
420 
421  // pre: 'chunk' must have been previously
422  // returned by *this.malloc().
423  void free (void * const chunk)
424  {
425  (store().free)(chunk);
432  }
433 
434  // pre: 'chunk' must have been previously
435  // returned by *this.malloc().
436  void ordered_free(void * const chunk)
437  {
438  store().ordered_free(chunk);
442  }
443 
444  // pre: 'chunk' must have been previously
445  // returned by *this.malloc(n).
446  void free (void * const chunks, const size_type n)
447  {
448  const size_type partition_size = alloc_size();
454  const size_type total_req_size = n * requested_size;
455  const size_type num_chunks = total_req_size / partition_size +
456  ((total_req_size % partition_size) ? true : false);
457 
458  store().free_n(chunks, num_chunks, partition_size);
459  }
460 
461  // pre: 'chunk' must have been previously
462  // returned by *this.malloc(n).
463  void ordered_free(void * const chunks, const size_type n)
464  {
465 
470  const size_type partition_size = alloc_size();
471  const size_type total_req_size = n * requested_size;
472  const size_type num_chunks = total_req_size / partition_size +
473  ((total_req_size % partition_size) ? true : false);
474 
475  store().ordered_free_n(chunks, num_chunks, partition_size);
476  }
477 
478  // is_from() tests a chunk to determine if it was allocated from *this
479  bool is_from(void * const chunk) const
480  {
481  return (find_POD(chunk).valid());
487  }
488 };
489 
490 #ifndef BBOOST_NO_INCLASS_MEMBER_INITIALIZATION
491 template <typename UserAllocator>
492 typename bpool<UserAllocator>::size_type const bpool<UserAllocator>::min_alloc_size;
493 template <typename UserAllocator>
494 typename bpool<UserAllocator>::size_type const bpool<UserAllocator>::min_align;
495 #endif
496 
497 template <typename UserAllocator>
498 bool bpool<UserAllocator>::release_memory()
499 {
500 
502  // ret is the return value: it will be set to true when we actually call
503  // UserAllocator::free(..)
504  bool ret = false;
505 
506  // This is a current & previous iterator pair over the memory block list
507  details::PODptr<size_type> ptr = list;
508  details::PODptr<size_type> prev;
509 
510  // This is a current & previous iterator pair over the free memory chunk list
511  // Note that "prev_free" in this case does NOT point to the previous memory
512  // chunk in the free list, but rather the last free memory chunk before the
513  // current block.
514  void * free_p = this->first;
515  void * prev_free_p = 0;
516 
517  const size_type partition_size = alloc_size();
518 
519  // Search through all the all the allocated memory blocks
520  while (ptr.valid())
521  {
522  // At this point:
523  // ptr points to a valid memory block
524  // free_p points to either:
525  // 0 if there are no more free chunks
526  // the first free chunk in this or some next memory block
527  // prev_free_p points to either:
528  // the last free chunk in some previous memory block
529  // 0 if there is no such free chunk
530  // prev is either:
531  // the PODptr whose next() is ptr
532  // !valid() if there is no such PODptr
533 
534  // If there are no more free memory chunks, then every remaining
535  // block is allocated out to its fullest capacity, and we can't
536  // release any more memory
537  if (free_p == 0)
538  break;
539 
540  // We have to check all the chunks. If they are *all* free (i.e., present
541  // in the free list), then we can free the block.
542  bool all_chunks_free = true;
543 
544  // Iterate 'i' through all chunks in the memory block
545  // if free starts in the memory block, be careful to keep it there
546  void * saved_free = free_p;
547  for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
548  {
549  // If this chunk is not free
550  if (i != free_p)
551  {
552  // We won't be able to free this block
553  all_chunks_free = false;
554 
555  // free_p might have travelled outside ptr
556  free_p = saved_free;
557  // Abort searching the chunks; we won't be able to free this
558  // block because a chunk is not free.
559  break;
560  }
561 
562  // We do not increment prev_free_p because we are in the same block
563  free_p = nextof(free_p);
564  }
565 
566  // post: if the memory block has any chunks, free_p points to one of them
567  // otherwise, our assertions above are still valid
568 
569  const details::PODptr<size_type> next = ptr.next();
570 
571  if (!all_chunks_free)
572  {
573  if (is_from(free_p, ptr.begin(), ptr.element_size()))
574  {
575  std::less<void *> lt;
576  void * const end = ptr.end();
577  do
578  {
579  prev_free_p = free_p;
580  free_p = nextof(free_p);
581  } while (free_p && lt(free_p, end));
582  }
583  // This invariant is now restored:
584  // free_p points to the first free chunk in some next memory block, or
585  // 0 if there is no such chunk.
586  // prev_free_p points to the last free chunk in this memory block.
587 
588  // We are just about to advance ptr. Maintain the invariant:
589  // prev is the PODptr whose next() is ptr, or !valid()
590  // if there is no such PODptr
591  prev = ptr;
592  }
593  else
594  {
595  // All chunks from this block are free
596 
597  // Remove block from list
598  if (prev.valid())
599  prev.next(next);
600  else
601  list = next;
602 
603  // Remove all entries in the free list from this block
604  if (prev_free_p != 0)
605  nextof(prev_free_p) = free_p;
606  else
607  this->first = free_p;
608 
609  // And release memory
610  (UserAllocator::free)(ptr.begin());
611  ret = true;
612  }
613 
614  // Increment ptr
615  ptr = next;
616  }
617 
618  next_size = start_size;
619  return ret;
620 }
621 
622 template <typename UserAllocator>
623 bool bpool<UserAllocator>::purge_memory()
624 {
625 
631  details::PODptr<size_type> iter = list;
632 
633  if (!iter.valid())
634  return false;
635 
636  do
637  {
638  // hold "next" pointer
639  const details::PODptr<size_type> next = iter.next();
640 
641  // delete the storage
642  (UserAllocator::free)(iter.begin());
643 
644  // increment iter
645  iter = next;
646  } while (iter.valid());
647 
648  list.invalidate();
649  this->first = 0;
650  next_size = start_size;
651 
652  return true;
653 }
654 
655 template <typename UserAllocator>
656 void * bpool<UserAllocator>::malloc_need_resize()
657 {
658  size_type partition_size = alloc_size();
661  size_type POD_size = static_cast<size_type>(next_size * partition_size + BBOOST_PTR_SIZE + sizeof(size_type));
662  char * ptr = (UserAllocator::malloc)(POD_size);
663  if (ptr == 0)
664  {
665  if(next_size > 4)
666  {
667  next_size >>= 1;
668  partition_size = alloc_size();
669  POD_size = static_cast<size_type>(next_size * partition_size + BBOOST_PTR_SIZE + sizeof(size_type));
670  ptr = (UserAllocator::malloc)(POD_size);
671  }
672  if(ptr == 0)
673  return 0;
674  }
675  const details::PODptr<size_type> node(ptr, POD_size);
676 
677  if(!max_size)
678  next_size <<= 1;
679  else if( next_size*partition_size/requested_size < max_size)
680  next_size = std::min (next_size << 1, max_size*requested_size/ partition_size);
681 
682  // initialize it,
683  store().add_block(node.begin(), node.element_size(), partition_size);
684 
685  // insert it into the list,
686  node.next(list);
687  list = node;
688 
689  // and return a chunk from it.
690  return (store().malloc)();
691 }
692 
693 template <typename UserAllocator>
694 void * bpool<UserAllocator>::ordered_malloc_need_resize()
695 {
696  size_type partition_size = alloc_size();
698  size_type POD_size = static_cast<size_type>(next_size * partition_size + BBOOST_PTR_SIZE + sizeof(size_type));
699  char * ptr = (UserAllocator::malloc)(POD_size);
700  if (ptr == 0)
701  {
702  if(next_size > 4)
703  {
704  next_size >>= 1;
705  partition_size = alloc_size();
706  POD_size = static_cast<size_type>(next_size * partition_size + BBOOST_PTR_SIZE + sizeof(size_type));
707  ptr = (UserAllocator::malloc)(POD_size);
708  }
709  if(ptr == 0)
710  return 0;
711  }
712  const details::PODptr<size_type> node(ptr, POD_size);
713 
714  if(!max_size)
715  next_size <<= 1;
716  else if( next_size*partition_size/requested_size < max_size)
717  next_size = std::min (next_size << 1, max_size*requested_size/ partition_size);
718 
719  // initialize it,
720  // (we can use "add_block" here because we know that
721  // the free list is empty, so we don't have to use
722  // the slower ordered version)
723  store().add_ordered_block(node.begin(), node.element_size(), partition_size);
724 
725  // insert it into the list,
726  // handle border case
727  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
728  {
729  node.next(list);
730  list = node;
731  }
732  else
733  {
734  details::PODptr<size_type> prev = list;
735 
736  while (true)
737  {
738  // if we're about to hit the end or
739  // if we've found where "node" goes
740  if (prev.next_ptr() == 0
741  || std::greater<void *>()(prev.next_ptr(), node.begin()))
742  break;
743 
744  prev = prev.next();
745  }
746 
747  node.next(prev.next());
748  prev.next(node);
749  }
750  // and return a chunk from it.
751  return (store().malloc)();
752 }
753 
754 template <typename UserAllocator>
755 void * bpool<UserAllocator>::ordered_malloc(const size_type n)
756 {
757 
760  const size_type partition_size = alloc_size();
761  const size_type total_req_size = n * requested_size;
762  const size_type num_chunks = total_req_size / partition_size +
763  ((total_req_size % partition_size) ? true : false);
764 
765  void * ret = store().malloc_n(num_chunks, partition_size);
766 
767 #ifdef BBOOST_POOL_INSTRUMENT
768  std::cout << "Allocating " << n << " chunks from bpool of size " << partition_size << std::endl;
769 #endif
770  if ((ret != 0) || (n == 0))
771  return ret;
772 
773 #ifdef BBOOST_POOL_INSTRUMENT
774  std::cout << "Cache miss, allocating another chunk...\n";
775 #endif
776 
777  // Not enough memory in our storages; make a new storage,
778  next_size = std::max (next_size, num_chunks);
779  size_type POD_size = static_cast<size_type>(next_size * partition_size + BBOOST_PTR_SIZE + sizeof(size_type));
780  char * ptr = (UserAllocator::malloc)(POD_size);
781 
782  if (ptr == 0)
783  {
784  if(num_chunks < next_size)
785  {
786  // Try again with just enough memory to do the job, or at least whatever we
787  // allocated last time:
788  next_size >>= 1;
789  next_size = std::max (next_size, num_chunks);
790  POD_size = static_cast<size_type>(next_size * partition_size + BBOOST_PTR_SIZE + sizeof(size_type));
791  ptr = (UserAllocator::malloc)(POD_size);
792  }
793  if(ptr == 0)
794  return 0;
795  }
796  const details::PODptr<size_type> node(ptr, POD_size);
797 
798  // Split up block so we can use what wasn't requested.
799  if (next_size > num_chunks)
800  store().add_ordered_block(node.begin() + num_chunks * partition_size,
801  node.element_size() - num_chunks * partition_size, partition_size);
802 
803  if(!max_size)
804  next_size <<= 1;
805  else if( next_size*partition_size/requested_size < max_size)
806  next_size = std::min (next_size << 1, max_size*requested_size/ partition_size);
807 
808  // insert it into the list,
809  // handle border case.
810  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
811  {
812  node.next(list);
813  list = node;
814  }
815  else
816  {
817  details::PODptr<size_type> prev = list;
818 
819  while (true)
820  {
821  // if we're about to hit the end, or if we've found where "node" goes.
822  if (prev.next_ptr() == 0
823  || std::greater<void *>()(prev.next_ptr(), node.begin()))
824  break;
825 
826  prev = prev.next();
827  }
828 
829  node.next(prev.next());
830  prev.next(node);
831  }
832 
833  // and return it.
834  return node.begin();
835 }
836 
837 template <typename UserAllocator>
838 details::PODptr<typename bpool<UserAllocator>::size_type>
839 bpool<UserAllocator>::find_POD(void * const chunk) const
840 {
841  // Iterate down list to find which storage this chunk is from.
843  details::PODptr<size_type> iter = list;
844  while (iter.valid())
845  {
846  if (is_from(chunk, iter.begin(), iter.element_size()))
847  return iter;
848  iter = iter.next();
849  }
850 
851  return iter;
852 }
853 
854 
855 } // namespace boost
856 
857 #ifdef BBOOST_MSVC
858 #pragma warning(pop)
859 #pragma pop_macro("max")
860 #pragma pop_macro("min")
861 #endif
862 
863 #endif // #ifdef BBOOST_POOL_HPP
864 
bool empty() const
Definition: stdcxx/bstdmap.h:210
#define min(x, y)
Definition: MathUtils.h:21
iterator begin()
Definition: stdcxx/bstdmap.h:178
iterator end()
Definition: stdcxx/bstdmap.h:186
Provides Bentley specific assert functions (Bentley/BeAssert.h).
#define max(x, y)
Definition: MathUtils.h:24
size_type max_size() const
Definition: stdcxx/bstdmap.h:218

Copyright © 2017 Bentley Systems, Incorporated. All rights reserved.