LCOV - code coverage report
Current view: top level - DCPS - MemoryPool.cpp (source / functions) Hit Total Coverage
Test: coverage.info Lines: 234 240 97.5 %
Date: 2023-04-30 01:32:43 Functions: 30 32 93.8 %

          Line data    Source code
       1             : /*
       2             :  *
       3             :  *
       4             :  * Distributed under the OpenDDS License.
       5             :  * See: http://www.opendds.org/license.html
       6             :  */
       7             : 
       8             : #include "DCPS/DdsDcps_pch.h"  ////Only the _pch include should start with DCPS/
       9             : #include "MemoryPool.h"
      10             : #include "PoolAllocator.h"
      11             : #include "ace/Log_Msg.h"
      12             : #include "ace/OS_NS_stdio.h"
      13             : #include <stdexcept>
      14             : #include <limits>
      15             : #include <map>
      16             : #include <cstring>
      17             : 
      18             : #if defined(WITH_VALGRIND)
      19             : #include "valgrind/memcheck.h"
      20             : #endif
      21             : 
      22             : #define TEST_CHECK(COND) \
      23             :   if (!( COND )) { \
      24             :     char msg[1024]; \
      25             :     ACE_OS::snprintf(msg, 1024, "%s: FAILED at %s:%d", #COND, __FILE__, __LINE__); \
      26             :     ACE_OS::printf("%s\n", msg); \
      27             :     throw std::runtime_error(msg); \
      28             :     return; \
      29             :   }
      30             : 
      31             : OPENDDS_BEGIN_VERSIONED_NAMESPACE_DECL
      32             : 
      33             : namespace OpenDDS {  namespace DCPS {
      34             : 
      35         820 : AllocHeader::AllocHeader()
      36         820 : : alloc_size_(0)
      37         820 : , prev_size_(0)
      38             : {
      39         820 : }
      40             : 
      41             : unsigned char*
      42       12938 : AllocHeader::ptr() const
      43             : {
      44       12938 :   const unsigned char* buff = reinterpret_cast<const unsigned char*>(this + 1);
      45       12938 :   return const_cast<unsigned char*>(buff);
      46             : }
      47             : 
      48             : AllocHeader*
      49       12154 : AllocHeader::next_adjacent() {
      50       12154 :   unsigned char* past_buffer_end = ptr() + size();
      51       12154 :   return reinterpret_cast<AllocHeader*>(past_buffer_end);
      52             : }
      53             : 
      54             : AllocHeader*
      55        2984 : AllocHeader::prev_adjacent() {
      56        2984 :   AllocHeader* result = NULL;
      57        2984 :   if (prev_size_) {
      58        2984 :     unsigned char* self = reinterpret_cast<unsigned char*>(this);
      59        2984 :     unsigned char* prev_buffer_start = self - prev_size_;
      60        2984 :     unsigned char* past_alloc = prev_buffer_start - sizeof(AllocHeader);
      61        2984 :     result = reinterpret_cast<AllocHeader*>(past_alloc);
      62             :   }
      63        2984 :   return result;
      64             : }
      65             : 
      66             : void
      67           0 : AllocHeader::allocate(size_t size) {
      68           0 :   set_allocated();
      69           0 :   set_size(size);
      70           0 : }
      71             : 
      72             : void
      73        2150 : AllocHeader::set_size(size_t size)
      74             : {
      75        2150 :   if (is_free()) {
      76        1346 :     size *= -1;
      77             :   }
      78        2150 :   alloc_size_ = (int)size;
      79        2150 : }
      80             : 
      81             : void
      82         566 : AllocHeader::join_next() {
      83             :   // All unsigned, set_size will make negative if free (it is)
      84         566 :   size_t next_size = this->next_adjacent()->size();
      85         566 :   size_t joined_size = this->size() + next_size + sizeof(AllocHeader);
      86         566 :   this->set_size(joined_size);
      87         566 : }
      88             : 
      89             : void
      90          52 : FreeHeader::init_free_block(unsigned int pool_size)
      91             : {
      92          52 :   alloc_size_ = static_cast<int>(pool_size - sizeof(AllocHeader));
      93          52 :   prev_size_ = 0;
      94          52 :   set_free();
      95          52 : }
      96             : 
      97             : void
      98         691 : FreeHeader::set_free()
      99             : {
     100             :   // If this is newly freed
     101         691 :   if (!is_free()) {
     102         691 :     alloc_size_ *= -1;
     103         691 :     set_smaller_free(NULL, NULL);
     104         691 :     set_larger_free(NULL, NULL);
     105             :   }
     106         691 : }
     107             : FreeHeader*
     108       20764 : FreeHeader::smaller_free(unsigned char* pool_base) const
     109             : {
     110       20764 :   FreeHeader* result = NULL;
     111       20764 :   if (offset_smaller_free_ != std::numeric_limits<size_t>::max()) {
     112        1953 :     result = reinterpret_cast<FreeHeader*>(pool_base + offset_smaller_free_);
     113             :   }
     114       20764 :   return result;
     115             : }
     116             : 
     117             : FreeHeader*
     118        2916 : FreeHeader::larger_free(unsigned char* pool_base) const
     119             : {
     120        2916 :   FreeHeader* result = NULL;
     121        2916 :   if (offset_larger_free_ != std::numeric_limits<size_t>::max()) {
     122         702 :     result = reinterpret_cast<FreeHeader*>(pool_base + offset_larger_free_);
     123             :   }
     124        2916 :   return result;
     125             : }
     126             : 
     127             : void
     128        3795 : FreeHeader::set_smaller_free(FreeHeader* next, unsigned char* pool_base)
     129             : {
     130        3795 :   if (next) {
     131         423 :     offset_smaller_free_ = reinterpret_cast<unsigned char*>(next) - pool_base;
     132             :   } else {
     133        3372 :     offset_smaller_free_ = std::numeric_limits<size_t>::max();
     134             :   }
     135        3795 : }
     136             : 
     137             : void
     138        2481 : FreeHeader::set_larger_free(FreeHeader* prev, unsigned char* pool_base)
     139             : {
     140        2481 :   if (prev) {
     141         423 :     offset_larger_free_ = reinterpret_cast<unsigned char*>(prev) - pool_base;
     142             :   } else {
     143        2058 :     offset_larger_free_ = std::numeric_limits<size_t>::max();
     144             :   }
     145        2481 : }
     146             : 
     147         520 : FreeIndexNode::FreeIndexNode()
     148         520 : : size_(0)
     149         520 : , limit_(0)
     150         520 : , ptr_(0)
     151             : {
     152         520 : }
     153             : 
     154             : void
     155         520 : FreeIndexNode::set_sizes(size_t size, size_t limit)
     156             : {
     157         520 :   size_ = size;
     158         520 :   limit_ = limit;
     159         520 : }
     160             : 
     161          52 : FreeIndex::FreeIndex(FreeHeader*& largest_free)
     162          52 : : size_(0)
     163         572 : , largest_free_(largest_free)
     164             : {
     165          52 : }
     166             : 
     167             : void
     168        1471 : FreeIndex::add(FreeHeader* freed)
     169             : {
     170        1471 :   unsigned int index = node_index(freed->size());
     171        1471 :   FreeIndexNode* node = nodes_ + index;
     172             : 
     173             :   // If the node is empty, or if freed is smaller or equal to the node's alloc
     174        1471 :   if ((node->ptr() == NULL) || (node->ptr()->size() >= freed->size())) {
     175             :     // Use this alloc in the index
     176        1454 :     node->set_ptr(freed);
     177             :   }
     178        1471 : }
     179             : 
     180             : void
     181        1353 : FreeIndex::remove(FreeHeader* free_block, FreeHeader* larger)
     182             : {
     183        1353 :   unsigned int index = node_index(free_block->size());
     184        1353 :   FreeIndexNode* node = nodes_ + index;
     185             : 
     186             :   // If the node points to the free block
     187        1353 :   if (node->ptr() == free_block) {
     188             :     // If the larger can be used by this node
     189        1348 :     if (larger && node->contains(larger->size())) {
     190           9 :       node->set_ptr(larger);
     191             :     } else {
     192        1339 :       node->set_ptr(NULL);
     193             :     }
     194             :   }
     195        1353 : }
     196             : 
     197             : void
     198          52 : FreeIndex::init(FreeHeader* init_free_block)
     199             : {
     200          52 :   size_t max = std::numeric_limits<size_t>::max();
     201         572 :   for (size_t size = min_index; size <= max_index; size *= 2) {
     202         520 :     nodes_[size_].set_sizes(size, (size == max_index) ? max  :  size*2);
     203         520 :     ++size_;
     204             :   }
     205          52 :   add(init_free_block);
     206          52 : }
     207             : 
     208             : FreeHeader*
     209       13869 : FreeIndex::find(size_t search_size, unsigned char* pool_base)
     210             : {
     211       13869 :   unsigned int index = node_index(search_size);
     212       13869 :   FreeIndexNode* index_node = nodes_ + index;
     213             : 
     214             :   // Larger or equal to search_size
     215       13869 :   FreeHeader* result = NULL;
     216       13869 :   if (largest_free_ && (largest_free_->size() >= search_size)) {
     217        9289 :     result = largest_free_;
     218             : 
     219             :     // Look from here and larger
     220       33246 :     while (index_node < nodes_ + size_) {
     221       33246 :       if (index_node->ptr() && index_node->ptr()->size() >= search_size) {
     222        9289 :         result = index_node->ptr();
     223        9289 :         break;
     224             :       }
     225       23957 :       ++index_node;
     226             :     }
     227             :   }
     228             : 
     229             :   // Now traverse, searching for smaller than result
     230       13888 :   while (result) {
     231        9308 :     FreeHeader* smaller = result->smaller_free(pool_base);
     232        9308 :     if (smaller && smaller->size() >= search_size) {
     233          19 :       result = smaller;
     234             :     } else {
     235        9289 :       break;
     236             :     }
     237             :   }
     238             : 
     239       13869 :   return result;
     240             : }
     241             : 
     242             : unsigned int
     243       16712 : FreeIndex::node_index(size_t size)
     244             : {
     245             :   // Use shifting to perform log base 2 of size
     246             :   //   start by using min + 1 (+1 because min is a power of 2 whch is already
     247             :   //   one bit)
     248       16712 :   size_t size_copy = size >> (min_index_pow + 1);
     249       16712 :   unsigned int index = 0;
     250       16712 :   unsigned int max_idx = max_index_pow - min_index_pow;
     251       95984 :   while (size_copy && (index < max_idx)) {
     252       79272 :     ++index;
     253       79272 :     size_copy = size_copy >> 1;
     254             :   }
     255       16712 :   return index;
     256             : }
     257             : 
     258             : #ifdef VALIDATE_MEMORY_POOL
     259             : void
     260             : FreeIndex::validate_index(FreeIndex& index, unsigned char* base, bool log)
     261             : {
     262             :   if (log) {
     263             :     FreeIndexNode* node = index.nodes_;
     264             :     while (node < index.nodes_ + index.size_) {
     265             :       if (node->ptr()) {
     266             :         ACE_OS::printf("  IND[%4d] -> %4d\n", node->size(), node->ptr()->size());
     267             :       } else {
     268             :         ACE_OS::printf("  IND[%4d] -> NULL\n", node->size());
     269             :       }
     270             :       ++node;
     271             :     }
     272             :   }
     273             : 
     274             :   // Validate searches of each size
     275             :   for (size_t size = min_index; size <= max_index; size *= 2) {
     276             :     // Find size or larger
     277             :     FreeHeader* size_or_larger = index.find(size, base);
     278             :     if (size_or_larger) {
     279             :       TEST_CHECK(size_or_larger->size() >= size);
     280             :     }
     281             :   }
     282             : 
     283             :   // Validate each node points to a free block of the proper size;
     284             :   for (FreeIndexNode* node = index.nodes_; node < index.nodes_ + index.size_; ++node) {
     285             :     FreeHeader* block = node->ptr();
     286             :     if (block) {
     287             :       // node should point to a free block of the proper size;
     288             :       TEST_CHECK(node->contains(block->size()));
     289             : 
     290             :       FreeHeader* smaller = block;
     291             :       while ((smaller = smaller->smaller_free(base))) {
     292             :         // Anything smaller should be too small for this node
     293             :         TEST_CHECK(smaller->size() < node->size());
     294             :       }
     295             :     }
     296             :   }
     297             : }
     298             : #endif
     299             : 
     300          40 : MemoryPool::MemoryPool(unsigned int pool_size, size_t granularity)
     301          40 : : granularity_(align(granularity, 8))
     302          40 : , min_alloc_size_(align(min_free_size - sizeof(AllocHeader), granularity_))
     303          40 : , pool_size_(align(pool_size, granularity_))
     304          40 : , pool_ptr_(new unsigned char[pool_size_])
     305          40 : , largest_free_(NULL)
     306          40 : , free_index_(largest_free_)
     307             : {
     308          40 :   AllocHeader* the_pool = new (pool_ptr_) AllocHeader();
     309          40 :   FreeHeader* first_free = reinterpret_cast<FreeHeader*>(the_pool);
     310          40 :   first_free->init_free_block(static_cast<unsigned int>(pool_size_));
     311          40 :   largest_free_ = first_free;
     312          40 :   free_index_.init(first_free);
     313          40 :   lwm_free_bytes_ = largest_free_->size();
     314             : #if defined(WITH_VALGRIND)
     315             :   VALGRIND_MAKE_MEM_NOACCESS(pool_ptr_, pool_size_);
     316             :   VALGRIND_CREATE_MEMPOOL(pool_ptr_, 0, false);
     317             : #endif
     318          40 : }
     319             : 
     320          40 : MemoryPool::~MemoryPool()
     321             : {
     322             : #ifndef OPENDDS_SAFETY_PROFILE
     323          40 :   delete [] pool_ptr_;
     324             : #endif
     325          40 : }
     326             : 
     327             : size_t
     328           0 : MemoryPool::lwm_free_bytes() const
     329             : {
     330           0 :   return lwm_free_bytes_;
     331             : }
     332             : 
     333             : void*
     334         787 : MemoryPool::pool_alloc(size_t size)
     335             : {
     336             : #if defined(WITH_VALGRIND)
     337             :   VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
     338             : #endif
     339             : 
     340             :   // Pointer to return
     341         787 :   unsigned char* block = NULL;
     342             : 
     343             :   // Round up to 8-byte boundary
     344         787 :   size_t aligned_size = align(size, granularity_);
     345             : 
     346         787 :   if (aligned_size < min_alloc_size_) {
     347           1 :     aligned_size = min_alloc_size_;
     348             :   }
     349             : 
     350             :   // The block to allocate from
     351         787 :   FreeHeader* block_to_alloc = free_index_.find(aligned_size, pool_ptr_);
     352             : 
     353         787 :   if (block_to_alloc) {
     354         784 :     block = allocate(block_to_alloc, aligned_size);
     355             :   }
     356             : 
     357             :   // Update lwm
     358         787 :   size_t largest_free_bytes = largest_free_ ? largest_free_->size() : 0;
     359         787 :   if (largest_free_bytes < lwm_free_bytes_) {
     360         253 :     lwm_free_bytes_ = largest_free_bytes;
     361             :   }
     362             : 
     363             : #ifdef VALIDATE_MEMORY_POOL
     364             :   validate_pool(*this, false);
     365             : #endif
     366             : 
     367             : #if defined(WITH_VALGRIND)
     368             :   VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
     369             :   VALGRIND_MEMPOOL_ALLOC(pool_ptr_, block, size);
     370             : #endif
     371             : 
     372         787 :   return block;
     373             : }
     374             : 
     375             : bool
     376         616 : MemoryPool::pool_free(void* ptr)
     377             : {
     378         616 :   bool freed = false;
     379         616 :   if (ptr && includes(ptr)) {
     380             : #if defined(WITH_VALGRIND)
     381             :     VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
     382             : #endif
     383             : 
     384         615 :     FreeHeader* header = reinterpret_cast<FreeHeader*>(
     385             :         reinterpret_cast<AllocHeader*>(ptr) - 1);
     386             : 
     387             :     // Free header
     388         615 :     header->set_free();
     389             : 
     390         615 :     join_free_allocs(header);
     391             : 
     392             : #ifdef VALIDATE_MEMORY_POOL
     393             :     validate_pool(*this, false);
     394             : #endif
     395             : 
     396         615 :     freed = true;
     397             : 
     398             : #if defined(WITH_VALGRIND)
     399             :     VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
     400             :     VALGRIND_MEMPOOL_FREE(pool_ptr_, ptr);
     401             : #endif
     402             :   }
     403             : 
     404         616 :   return freed;
     405             : }
     406             : 
     407             : void
     408         615 : MemoryPool::join_free_allocs(FreeHeader* freed)
     409             : {
     410             :   // Check adjacent
     411         615 :   if (joinable_next(freed)) {
     412         276 :     FreeHeader* next_free = reinterpret_cast<FreeHeader*>(freed->next_adjacent());
     413         276 :     remove_free_alloc(next_free);
     414         276 :     freed->join_next();
     415             :     // Adjust psize of adjacent
     416         276 :     AllocHeader* next = freed->next_adjacent();
     417         276 :     if (includes(next)) {
     418          15 :       next->set_prev_size(freed->size());
     419             :     }
     420             :   }
     421         615 :   if (joinable_prev(freed)) {
     422         290 :     FreeHeader* prev_free = reinterpret_cast<FreeHeader*>(freed->prev_adjacent());
     423         290 :     remove_free_alloc(prev_free);
     424             :     // Join prev with freed
     425         290 :     prev_free->join_next();
     426         290 :     insert_free_alloc(prev_free);
     427             :     // Adjust psize of adjacent
     428         290 :     AllocHeader* next = prev_free->next_adjacent();
     429         290 :     if (includes(next)) {
     430          26 :       next->set_prev_size(prev_free->size());
     431             :     }
     432             :   } else {
     433         325 :     insert_free_alloc(freed);
     434             :   }
     435         615 : }
     436             : 
     437             : void
     438        1350 : MemoryPool::remove_free_alloc(FreeHeader* block_to_alloc)
     439             : {
     440        1350 :   FreeHeader* smaller = block_to_alloc->smaller_free(pool_ptr_);
     441        1350 :   FreeHeader* larger = block_to_alloc->larger_free(pool_ptr_);
     442             : 
     443        1350 :   block_to_alloc->set_smaller_free(NULL, NULL);
     444        1350 :   block_to_alloc->set_larger_free(NULL, NULL);
     445             : 
     446             :   // If this was the largest free alloc
     447        1350 :   if (block_to_alloc == largest_free_) {
     448             :     // It no longer is
     449        1048 :     largest_free_ = smaller;
     450             :   }
     451             : 
     452        1350 :   if (larger) {
     453         302 :     larger->set_smaller_free(smaller, pool_ptr_);
     454             :   }
     455             : 
     456        1350 :   if (smaller) {
     457          24 :     smaller->set_larger_free(larger, pool_ptr_);
     458             :   }
     459             : 
     460             :   // Remove from free index
     461        1350 :   free_index_.remove(block_to_alloc, larger);
     462        1350 : }
     463             : 
     464             : void
     465        1395 : MemoryPool::insert_free_alloc(FreeHeader* freed)
     466             : {
     467             :   // Find free alloc freed size or larger
     468        1395 :   FreeHeader* alloc = free_index_.find(freed->size(), pool_ptr_);
     469             :   // If found
     470        1395 :   if (alloc) {
     471         341 :     FreeHeader* smaller = alloc->smaller_free(pool_ptr_);
     472             : 
     473             :     // Insert into list
     474         341 :     freed->set_larger_free(alloc, pool_ptr_);
     475         341 :     alloc->set_smaller_free(freed, pool_ptr_);
     476         341 :     if (smaller) {
     477          21 :       smaller->set_larger_free(freed, pool_ptr_);
     478          21 :       freed->set_smaller_free(smaller, pool_ptr_);
     479             :     }
     480             :   // Else freed the largest alloc
     481             :   } else {
     482        1054 :     if (freed != largest_free_) {
     483        1054 :       freed->set_smaller_free(largest_free_, pool_ptr_);
     484        1054 :       if (largest_free_) {
     485          21 :         largest_free_->set_larger_free(freed, pool_ptr_);
     486             :       }
     487        1054 :       largest_free_ = freed;
     488             :     }
     489             :   }
     490             : 
     491             :   // Insert and replace alloc if necessary
     492        1395 :   free_index_.add(freed);
     493        1395 : }
     494             : 
     495             : unsigned char*
     496         784 : MemoryPool::allocate(FreeHeader* free_block, size_t alloc_size)
     497             : {
     498         784 :   size_t free_block_size = free_block->size();
     499         784 :   size_t remainder = free_block_size - alloc_size;
     500             : 
     501             :   // May not be enough room for another allocation
     502         784 :   if (remainder < min_free_size) {
     503           4 :     alloc_size = free_block_size; // use it all
     504           4 :     remainder = 0;
     505             :   }
     506             : 
     507             :   // If we are NOT allocating the whole block
     508         784 :   if (remainder) {
     509             :     // Account for header here - won't overflow due to check, above
     510         780 :     remainder -= sizeof(AllocHeader);
     511             : 
     512             :     // Adjust current adjacent block (after free block)
     513         780 :     AllocHeader* next_adjacent = free_block->next_adjacent();
     514         780 :     if (includes(next_adjacent)) {
     515         484 :       next_adjacent->set_prev_size(static_cast<int>(alloc_size));
     516             :     }
     517             : 
     518             :     // Always remove, resize, and reinsert to make sure free list and free
     519             :     // index are in sync
     520         780 :     remove_free_alloc(free_block);
     521         780 :     free_block->set_size(remainder);
     522         780 :     insert_free_alloc(free_block);
     523             : 
     524             :     // After resize, can use next_adjacent() to safely get to the end of the
     525             :     // resized block.
     526             :     // Taking free memory and allocating, so invoke constructor
     527         780 :     AllocHeader* alloc_block = new(free_block->next_adjacent()) AllocHeader();
     528             : 
     529             :     // Allocate adjacent block (at end of existing block)
     530         780 :     alloc_block->set_size(alloc_size);
     531         780 :     alloc_block->set_allocated();
     532         780 :     alloc_block->set_prev_size(static_cast<int>(remainder));
     533         780 :     return alloc_block->ptr();
     534             :   // Else we ARE allocating the whole block
     535             :   } else {
     536           4 :     free_block->set_allocated();
     537             :     // remove free_block from free list
     538           4 :     remove_free_alloc(free_block);
     539           4 :     return free_block->ptr();
     540             :   }
     541             : }
     542             : 
     543             : bool
     544         615 : MemoryPool::joinable_next(FreeHeader* freed)
     545             : {
     546         615 :   AllocHeader* next_alloc = freed->next_adjacent();
     547        1230 :   return freed->is_free() &&
     548        1230 :          includes(next_alloc) &&
     549         961 :          next_alloc->is_free();
     550             : }
     551             : 
     552             : bool
     553         615 : MemoryPool::joinable_prev(FreeHeader* freed)
     554             : {
     555         615 :   AllocHeader* prev_alloc = freed->prev_adjacent();
     556        1230 :   return freed->is_free() &&
     557        1230 :          includes(prev_alloc) &&
     558        1230 :          prev_alloc->is_free();
     559             : }
     560             : 
     561             : #ifdef VALIDATE_MEMORY_POOL
     562             : void
     563             : MemoryPool::validate_pool(MemoryPool& pool, bool log) {
     564             :   AllocHeader* prev = 0;
     565             :   size_t allocated_bytes = 0;
     566             :   size_t free_bytes = 0;
     567             :   size_t oh_bytes = 0;
     568             :   size_t free_count = 0;
     569             :   unsigned char* pool_end = pool.pool_ptr_ + pool.pool_size_;
     570             :   bool prev_was_free;
     571             :   size_t index = 0;
     572             : 
     573             :   typedef std::map<FreeHeader*, int> FreeMap;
     574             :   FreeMap free_map;
     575             :   // Gather all free indices
     576             :   AllocHeader* alloc = reinterpret_cast<AllocHeader*>(pool.pool_ptr_);
     577             :   while (pool.includes(alloc)) {
     578             :     FreeHeader* free_header = alloc->is_free() ?
     579             :           reinterpret_cast<FreeHeader*>(alloc) : NULL;
     580             :     if (free_header) {
     581             :       free_map[free_header] = index;
     582             :     }
     583             :     alloc = alloc->next_adjacent();
     584             :     ++index;
     585             :   }
     586             : 
     587             :   index = 0;
     588             :   if (log) {
     589             :     ACE_OS::printf("Pool ptr %zx end %zx\n", (unsigned long)pool.pool_ptr_,
     590             :            (unsigned long)pool_end);
     591             :    }
     592             : 
     593             :   // Check all allocs in positional order and not overlapping
     594             :   alloc = reinterpret_cast<AllocHeader*>(pool.pool_ptr_);
     595             :   while (pool.includes(alloc)) {
     596             :     if (log) {
     597             : 
     598             :       int smlr_index = -1;
     599             :       int lrgr_index = -1;
     600             :       char lrgr_buff[32];
     601             :       char smlr_buff[32];
     602             : 
     603             :       FreeHeader* free_header = alloc->is_free() ?
     604             :             reinterpret_cast<FreeHeader*>(alloc) : NULL;
     605             :       if (free_header) {
     606             :         FreeMap::const_iterator found;
     607             :         found = free_map.find(free_header->smaller_free(pool.pool_ptr_));
     608             :         if (found != free_map.end()) {
     609             :           smlr_index = found->second;
     610             :           snprintf(smlr_buff, 32, "[%2d]", smlr_index); // preprocessed out
     611             :         }
     612             :         found = free_map.find(free_header->larger_free(pool.pool_ptr_));
     613             :         if (found != free_map.end()) {
     614             :           lrgr_index = found->second;
     615             :           snprintf(lrgr_buff, 32, "[%2d]", lrgr_index); // preprocessed out
     616             :         }
     617             :       }
     618             :       ACE_OS::printf(
     619             :         "Alloc[%zu] %s at %zx ptr %zx lg %s sm %s size %d psize %d\n",
     620             :         index++,
     621             :         alloc->is_free() ?
     622             :         (alloc == pool.largest_free_ ? "FREE!" : "free ")  : "     ",
     623             :         (unsigned long)alloc,
     624             :         (unsigned long)alloc->ptr(),
     625             :         lrgr_index >= 0 ? lrgr_buff : "[  ]",
     626             :         smlr_index >= 0 ? smlr_buff : "[  ]",
     627             :         alloc->size(),
     628             :         alloc->prev_size()
     629             :       );
     630             :     }
     631             : 
     632             :     TEST_CHECK(alloc->size());
     633             :     if (prev) {
     634             :       TEST_CHECK(prev->next_adjacent() == alloc);
     635             :       TEST_CHECK(alloc->prev_adjacent() == prev);
     636             :       // Validate  these are not consecutive free blocks
     637             :       TEST_CHECK(!(prev_was_free && alloc->is_free()));
     638             :     }
     639             : 
     640             :     if (!alloc->is_free()) {
     641             :       allocated_bytes += alloc->size();
     642             :       prev_was_free = false;
     643             :     } else {
     644             :       free_bytes += alloc->size();
     645             :       prev_was_free = true;
     646             :     }
     647             :     oh_bytes += sizeof(AllocHeader);
     648             :     prev = alloc;
     649             :     alloc = alloc->next_adjacent();
     650             :   }
     651             :   TEST_CHECK((unsigned char*)alloc == pool_end);
     652             : 
     653             :   TEST_CHECK(allocated_bytes + free_bytes + oh_bytes == pool.pool_size_);
     654             : 
     655             :   FreeIndex::validate_index(pool.free_index_, pool.pool_ptr_, log);
     656             : 
     657             :   size_t prev_size = 0;
     658             :   size_t free_bytes_in_list = 0;
     659             :   FreeHeader* free_alloc = NULL;
     660             :   FreeHeader* prev_free = NULL;
     661             : 
     662             :   // Check all free blocks in size order
     663             :   for (free_alloc = pool.largest_free_;
     664             :        free_alloc;
     665             :        free_alloc = free_alloc->smaller_free(pool.pool_ptr_)) {
     666             :     // Should be marked free
     667             :     TEST_CHECK(free_alloc->is_free());
     668             :     // Check for infinite loop
     669             :     TEST_CHECK(++free_count < 10000);
     670             : 
     671             :     // Sum bytes found
     672             :     free_bytes_in_list += free_alloc->size();
     673             : 
     674             :     // If not the first alloc
     675             :     if (prev_size) {
     676             :       TEST_CHECK(free_alloc->size() <= prev_size);
     677             :       TEST_CHECK(free_alloc->size() > 0);
     678             :     }
     679             :     prev_size = free_alloc->size();
     680             :     prev_free = free_alloc;
     681             :   }
     682             : 
     683             :   TEST_CHECK(free_bytes == free_bytes_in_list);
     684             : 
     685             :   // Try again from smallest to largest
     686             :   if (prev_free) {
     687             :     free_bytes_in_list = 0;
     688             : 
     689             :     for (free_alloc = prev_free;
     690             :          free_alloc;
     691             :          free_alloc = free_alloc->larger_free(pool.pool_ptr_)) {
     692             :       // Should be marked free
     693             :       TEST_CHECK(free_alloc->is_free());
     694             : 
     695             :       // Sum bytes found
     696             :       free_bytes_in_list += free_alloc->size();
     697             : 
     698             :       // If not the first alloc
     699             :       if (free_alloc != prev_free) {
     700             :         TEST_CHECK(free_alloc->size() >= prev_size);
     701             :         TEST_CHECK(free_alloc->size() > 0);
     702             :       }
     703             :       prev_size = free_alloc->size();
     704             :     }
     705             :     TEST_CHECK(free_bytes == free_bytes_in_list);
     706             :   }
     707             : }
     708             : #endif
     709             : 
     710             : }}
     711             : 
     712             : OPENDDS_END_VERSIONED_NAMESPACE_DECL

Generated by: LCOV version 1.16