MemoryPool.cpp

Go to the documentation of this file.
00001 /*
00002  *
00003  *
00004  * Distributed under the OpenDDS License.
00005  * See: http://www.opendds.org/license.html
00006  */
00007 
00008 #include "DCPS/DdsDcps_pch.h"  ////Only the _pch include should start with DCPS/
00009 #include "MemoryPool.h"
00010 #include "PoolAllocator.h"
00011 #include "ace/Log_Msg.h"
00012 #include "ace/OS_NS_stdio.h"
00013 #include <stdexcept>
00014 #include <limits>
00015 #include <map>
00016 #include <cstring>
00017 
00018 #if defined(WITH_VALGRIND)
00019 #include "valgrind/memcheck.h"
00020 #endif
00021 
00022 #define TEST_CHECK(COND) \
00023   if (!( COND )) { \
00024     char msg[1024]; \
00025     ACE_OS::snprintf(msg, 1024, "%s: FAILED at %s:%d", #COND, __FILE__, __LINE__); \
00026     ACE_OS::printf("%s\n", msg); \
00027     throw std::runtime_error(msg); \
00028     return; \
00029   }
00030 
00031 namespace OpenDDS {  namespace DCPS {
00032 
00033 AllocHeader::AllocHeader()
00034 : alloc_size_(0)
00035 , prev_size_(0)
00036 {
00037 }
00038 
00039 unsigned char*
00040 AllocHeader::ptr() const
00041 {
00042   const unsigned char* buff = reinterpret_cast<const unsigned char*>(this + 1);
00043   return const_cast<unsigned char*>(buff);
00044 }
00045 
00046 AllocHeader*
00047 AllocHeader::next_adjacent() {
00048   unsigned char* past_buffer_end = ptr() + size();
00049   return reinterpret_cast<AllocHeader*>(past_buffer_end);
00050 }
00051 
00052 AllocHeader*
00053 AllocHeader::prev_adjacent() {
00054   AllocHeader* result = NULL;
00055   if (prev_size_) {
00056     unsigned char* self = reinterpret_cast<unsigned char*>(this);
00057     unsigned char* prev_buffer_start = self - prev_size_;
00058     unsigned char* past_alloc = prev_buffer_start - sizeof(AllocHeader);
00059     result = reinterpret_cast<AllocHeader*>(past_alloc);
00060   }
00061   return result;
00062 }
00063 
00064 void
00065 AllocHeader::allocate(size_t size) {
00066   set_allocated();
00067   set_size(size);
00068 }
00069 
00070 void
00071 AllocHeader::set_size(size_t size)
00072 {
00073   if (is_free()) {
00074     size *= -1;
00075   }
00076   alloc_size_ = (int)size;
00077 }
00078 
00079 void
00080 AllocHeader::join_next() {
00081   // All unsigned, set_size will make negative if free (it is)
00082   size_t next_size = this->next_adjacent()->size();
00083   size_t joined_size = this->size() + next_size + sizeof(AllocHeader);
00084   this->set_size(joined_size);
00085 }
00086 
00087 void
00088 FreeHeader::init_free_block(unsigned int pool_size)
00089 {
00090   alloc_size_ = static_cast<int>(pool_size - sizeof(AllocHeader));
00091   prev_size_ = 0;
00092   set_free();
00093 }
00094 
00095 void
00096 FreeHeader::set_free()
00097 {
00098   // If this is newly freed
00099   if (!is_free()) {
00100     alloc_size_ *= -1;
00101     set_smaller_free(NULL, NULL);
00102     set_larger_free(NULL, NULL);
00103   }
00104 }
00105 FreeHeader*
00106 FreeHeader::smaller_free(unsigned char* pool_base) const
00107 {
00108   FreeHeader* result = NULL;
00109   if (offset_smaller_free_ != std::numeric_limits<size_t>::max()) {
00110     result = reinterpret_cast<FreeHeader*>(pool_base + offset_smaller_free_);
00111   }
00112   return result;
00113 }
00114 
00115 FreeHeader*
00116 FreeHeader::larger_free(unsigned char* pool_base) const
00117 {
00118   FreeHeader* result = NULL;
00119   if (offset_larger_free_ != std::numeric_limits<size_t>::max()) {
00120     result = reinterpret_cast<FreeHeader*>(pool_base + offset_larger_free_);
00121   }
00122   return result;
00123 }
00124 
00125 void
00126 FreeHeader::set_smaller_free(FreeHeader* next, unsigned char* pool_base)
00127 {
00128   if (next) {
00129     offset_smaller_free_ = reinterpret_cast<unsigned char*>(next) - pool_base;
00130   } else {
00131     offset_smaller_free_ = std::numeric_limits<size_t>::max();
00132   }
00133 }
00134 
00135 void
00136 FreeHeader::set_larger_free(FreeHeader* prev, unsigned char* pool_base)
00137 {
00138   if (prev) {
00139     offset_larger_free_ = reinterpret_cast<unsigned char*>(prev) - pool_base;
00140   } else {
00141     offset_larger_free_ = std::numeric_limits<size_t>::max();
00142   }
00143 }
00144 
00145 FreeIndexNode::FreeIndexNode()
00146 : size_(0)
00147 , limit_(0)
00148 , ptr_(0)
00149 {
00150 }
00151 
00152 void
00153 FreeIndexNode::set_sizes(size_t size, size_t limit)
00154 {
00155   size_ = size;
00156   limit_ = limit;
00157 }
00158 
00159 FreeIndex::FreeIndex(FreeHeader*& largest_free)
00160 : size_(0)
00161 , largest_free_(largest_free)
00162 {
00163 }
00164 
00165 void
00166 FreeIndex::add(FreeHeader* freed)
00167 {
00168   unsigned int index = node_index(freed->size());
00169   FreeIndexNode* node = nodes_ + index;
00170 
00171   // If the node is empty, or if freed is smaller or equal to the node's alloc
00172   if ((node->ptr() == NULL) || (node->ptr()->size() >= freed->size())) {
00173     // Use this alloc in the index
00174     node->set_ptr(freed);
00175   }
00176 }
00177 
00178 void
00179 FreeIndex::remove(FreeHeader* free_block, FreeHeader* larger)
00180 {
00181   unsigned int index = node_index(free_block->size());
00182   FreeIndexNode* node = nodes_ + index;
00183 
00184   // If the node points to the free block
00185   if (node->ptr() == free_block) {
00186     // If the larger can be used by this node
00187     if (larger && node->contains(larger->size())) {
00188       node->set_ptr(larger);
00189     } else {
00190       node->set_ptr(NULL);
00191     }
00192   }
00193 }
00194 
00195 void
00196 FreeIndex::init(FreeHeader* init_free_block)
00197 {
00198   size_t max = std::numeric_limits<size_t>::max();
00199   for (size_t size = min_index; size <= max_index; size *= 2) {
00200     nodes_[size_].set_sizes(size, (size == max_index) ? max  :  size*2);
00201     ++size_;
00202   }
00203   add(init_free_block);
00204 }
00205 
00206 FreeHeader*
00207 FreeIndex::find(size_t search_size, unsigned char* pool_base)
00208 {
00209   unsigned int index = node_index(search_size);
00210   FreeIndexNode* index_node = nodes_ + index;
00211 
00212   // Larger or equal to search_size
00213   FreeHeader* result = NULL;
00214   if (largest_free_ && (largest_free_->size() >= search_size)) {
00215     result = largest_free_;
00216 
00217     // Look from here and larger
00218     while (index_node < nodes_ + size_) {
00219       if (index_node->ptr() && index_node->ptr()->size() >= search_size) {
00220         result = index_node->ptr();
00221         break;
00222       }
00223       ++index_node;
00224     }
00225   }
00226 
00227   // Now traverse, searching for smaller than result
00228   while (result) {
00229     FreeHeader* smaller = result->smaller_free(pool_base);
00230     if (smaller && smaller->size() >= search_size) {
00231       result = smaller;
00232     } else {
00233       break;
00234     }
00235   }
00236 
00237   return result;
00238 }
00239 
00240 unsigned int
00241 FreeIndex::node_index(size_t size)
00242 {
00243   // Use shifting to perform log base 2 of size
00244   //   start by using min + 1 (+1 because min is a power of 2 whch is already
00245   //   one bit)
00246   size_t size_copy = size >> (min_index_pow + 1);
00247   unsigned int index = 0;
00248   unsigned int max_idx = max_index_pow - min_index_pow;
00249   while (size_copy && (index < max_idx)) {
00250     ++index;
00251     size_copy = size_copy >> 1;
00252   }
00253   return index;
00254 }
00255 
00256 #ifdef VALIDATE_MEMORY_POOL
00257 void
00258 FreeIndex::validate_index(FreeIndex& index, unsigned char* base, bool log)
00259 {
00260   if (log) {
00261     FreeIndexNode* node = index.nodes_;
00262     while (node < index.nodes_ + index.size_) {
00263       if (node->ptr()) {
00264         ACE_OS::printf("  IND[%4d] -> %4d\n", node->size(), node->ptr()->size());
00265       } else {
00266         ACE_OS::printf("  IND[%4d] -> NULL\n", node->size());
00267       }
00268       ++node;
00269     }
00270   }
00271 
00272   // Validate searches of each size
00273   for (size_t size = min_index; size <= max_index; size *= 2) {
00274     // Find size or larger
00275     FreeHeader* size_or_larger = index.find(size, base);
00276     if (size_or_larger) {
00277       TEST_CHECK(size_or_larger->size() >= size);
00278     }
00279   }
00280 
00281   // Validate each node points to a free block of the proper size;
00282   for (FreeIndexNode* node = index.nodes_; node < index.nodes_ + index.size_; ++node) {
00283     FreeHeader* block = node->ptr();
00284     if (block) {
00285       // node should point to a free block of the proper size;
00286       TEST_CHECK(node->contains(block->size()));
00287 
00288       FreeHeader* smaller = block;
00289       while ((smaller = smaller->smaller_free(base))) {
00290         // Anything smaller should be too small for this node
00291         TEST_CHECK(smaller->size() < node->size());
00292       }
00293     }
00294   }
00295 }
00296 #endif
00297 
00298 MemoryPool::MemoryPool(unsigned int pool_size, size_t granularity)
00299 : granularity_(align(granularity, 8))
00300 , min_alloc_size_(align(min_free_size - sizeof(AllocHeader), granularity_))
00301 , pool_size_(align(pool_size, granularity_))
00302 , pool_ptr_(new unsigned char[pool_size_])
00303 , largest_free_(NULL)
00304 , free_index_(largest_free_)
00305 {
00306   AllocHeader* the_pool = new (pool_ptr_) AllocHeader();
00307   FreeHeader* first_free = reinterpret_cast<FreeHeader*>(the_pool);
00308   first_free->init_free_block(static_cast<unsigned int>(pool_size_));
00309   largest_free_ = first_free;
00310   free_index_.init(first_free);
00311   lwm_free_bytes_ = largest_free_->size();
00312 #if defined(WITH_VALGRIND)
00313   VALGRIND_MAKE_MEM_NOACCESS(pool_ptr_, pool_size_);
00314   VALGRIND_CREATE_MEMPOOL(pool_ptr_, 0, false);
00315 #endif
00316 }
00317 
00318 MemoryPool::~MemoryPool()
00319 {
00320 #ifndef OPENDDS_SAFETY_PROFILE
00321   delete [] pool_ptr_;
00322 #endif
00323 }
00324 
00325 size_t
00326 MemoryPool::lwm_free_bytes() const
00327 {
00328   return lwm_free_bytes_;
00329 }
00330 
00331 void*
00332 MemoryPool::pool_alloc(size_t size)
00333 {
00334 #if defined(WITH_VALGRIND)
00335   VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
00336 #endif
00337 
00338   // Pointer to return
00339   unsigned char* block = NULL;
00340 
00341   // Round up to 8-byte boundary
00342   size_t aligned_size = align(size, granularity_);
00343 
00344   if (aligned_size < min_alloc_size_) {
00345     aligned_size = min_alloc_size_;
00346   }
00347 
00348   // The block to allocate from
00349   FreeHeader* block_to_alloc = free_index_.find(aligned_size, pool_ptr_);
00350 
00351   if (block_to_alloc) {
00352     block = allocate(block_to_alloc, aligned_size);
00353   }
00354 
00355   // Update lwm
00356   size_t largest_free_bytes = largest_free_ ? largest_free_->size() : 0;
00357   if (largest_free_bytes < lwm_free_bytes_) {
00358     lwm_free_bytes_ = largest_free_bytes;
00359   }
00360 
00361 #ifdef VALIDATE_MEMORY_POOL
00362   validate_pool(*this, false);
00363 #endif
00364 
00365 #if defined(WITH_VALGRIND)
00366   VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
00367   VALGRIND_MEMPOOL_ALLOC(pool_ptr_, block, size);
00368 #endif
00369 
00370   return block;
00371 }
00372 
00373 bool
00374 MemoryPool::pool_free(void* ptr)
00375 {
00376   bool freed = false;
00377   if (ptr && includes(ptr)) {
00378 #if defined(WITH_VALGRIND)
00379     VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
00380 #endif
00381 
00382     FreeHeader* header = reinterpret_cast<FreeHeader*>(
00383         reinterpret_cast<AllocHeader*>(ptr) - 1);
00384 
00385     // Free header
00386     header->set_free();
00387 
00388     join_free_allocs(header);
00389 
00390 #ifdef VALIDATE_MEMORY_POOL
00391     validate_pool(*this, false);
00392 #endif
00393 
00394     freed = true;
00395 
00396 #if defined(WITH_VALGRIND)
00397     VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
00398     VALGRIND_MEMPOOL_FREE(pool_ptr_, ptr);
00399 #endif
00400   }
00401 
00402   return freed;
00403 }
00404 
00405 void
00406 MemoryPool::join_free_allocs(FreeHeader* freed)
00407 {
00408   // Check adjacent
00409   if (joinable_next(freed)) {
00410     FreeHeader* next_free = reinterpret_cast<FreeHeader*>(freed->next_adjacent());
00411     remove_free_alloc(next_free);
00412     freed->join_next();
00413     // Adjust psize of adjacent
00414     AllocHeader* next = freed->next_adjacent();
00415     if (includes(next)) {
00416       next->set_prev_size(freed->size());
00417     }
00418   }
00419   if (joinable_prev(freed)) {
00420     FreeHeader* prev_free = reinterpret_cast<FreeHeader*>(freed->prev_adjacent());
00421     remove_free_alloc(prev_free);
00422     // Join prev with freed
00423     prev_free->join_next();
00424     insert_free_alloc(prev_free);
00425     // Adjust psize of adjacent
00426     AllocHeader* next = prev_free->next_adjacent();
00427     if (includes(next)) {
00428       next->set_prev_size(prev_free->size());
00429     }
00430   } else {
00431     insert_free_alloc(freed);
00432   }
00433 }
00434 
00435 void
00436 MemoryPool::remove_free_alloc(FreeHeader* block_to_alloc)
00437 {
00438   FreeHeader* smaller = block_to_alloc->smaller_free(pool_ptr_);
00439   FreeHeader* larger = block_to_alloc->larger_free(pool_ptr_);
00440 
00441   block_to_alloc->set_smaller_free(NULL, NULL);
00442   block_to_alloc->set_larger_free(NULL, NULL);
00443 
00444   // If this was the largest free alloc
00445   if (block_to_alloc == largest_free_) {
00446     // It no longer is
00447     largest_free_ = smaller;
00448   }
00449 
00450   if (larger) {
00451     larger->set_smaller_free(smaller, pool_ptr_);
00452   }
00453 
00454   if (smaller) {
00455     smaller->set_larger_free(larger, pool_ptr_);
00456   }
00457 
00458   // Remove from free index
00459   free_index_.remove(block_to_alloc, larger);
00460 }
00461 
00462 void
00463 MemoryPool::insert_free_alloc(FreeHeader* freed)
00464 {
00465   // Find free alloc freed size or larger
00466   FreeHeader* alloc = free_index_.find(freed->size(), pool_ptr_);
00467   // If found
00468   if (alloc) {
00469     FreeHeader* smaller = alloc->smaller_free(pool_ptr_);
00470 
00471     // Insert into list
00472     freed->set_larger_free(alloc, pool_ptr_);
00473     alloc->set_smaller_free(freed, pool_ptr_);
00474     if (smaller) {
00475       smaller->set_larger_free(freed, pool_ptr_);
00476       freed->set_smaller_free(smaller, pool_ptr_);
00477     }
00478   // Else freed the largest alloc
00479   } else {
00480     if (freed != largest_free_) {
00481       freed->set_smaller_free(largest_free_, pool_ptr_);
00482       if (largest_free_) {
00483         largest_free_->set_larger_free(freed, pool_ptr_);
00484       }
00485       largest_free_ = freed;
00486     }
00487   }
00488 
00489   // Insert and replace alloc if necessary
00490   free_index_.add(freed);
00491 }
00492 
00493 unsigned char*
00494 MemoryPool::allocate(FreeHeader* free_block, size_t alloc_size)
00495 {
00496   size_t free_block_size = free_block->size();
00497   size_t remainder = free_block_size - alloc_size;
00498 
00499   // May not be enough room for another allocation
00500   if (remainder < min_free_size) {
00501     alloc_size = free_block_size; // use it all
00502     remainder = 0;
00503   }
00504 
00505   // If we are NOT allocating the whole block
00506   if (remainder) {
00507     // Account for header here - won't overflow due to check, above
00508     remainder -= sizeof(AllocHeader);
00509 
00510     // Adjust current adjacent block (after free block)
00511     AllocHeader* next_adjacent = free_block->next_adjacent();
00512     if (includes(next_adjacent)) {
00513       next_adjacent->set_prev_size(static_cast<int>(alloc_size));
00514     }
00515 
00516     // Always remove, resize, and reinsert to make sure free list and free
00517     // index are in sync
00518     remove_free_alloc(free_block);
00519     free_block->set_size(remainder);
00520     insert_free_alloc(free_block);
00521 
00522     // After resize, can use next_adjacent() to safely get to the end of the
00523     // resized block.
00524     // Taking free memory and allocating, so invoke constructor
00525     AllocHeader* alloc_block = new(free_block->next_adjacent()) AllocHeader();
00526 
00527     // Allocate adjacent block (at end of existing block)
00528     alloc_block->set_size(alloc_size);
00529     alloc_block->set_allocated();
00530     alloc_block->set_prev_size(static_cast<int>(remainder));
00531     return alloc_block->ptr();
00532   // Else we ARE allocating the whole block
00533   } else {
00534     free_block->set_allocated();
00535     // remove free_block from free list
00536     remove_free_alloc(free_block);
00537     return free_block->ptr();
00538   }
00539 }
00540 
00541 bool
00542 MemoryPool::joinable_next(FreeHeader* freed)
00543 {
00544   AllocHeader* next_alloc = freed->next_adjacent();
00545   return freed->is_free() &&
00546          includes(next_alloc) &&
00547          next_alloc->is_free();
00548 }
00549 
00550 bool
00551 MemoryPool::joinable_prev(FreeHeader* freed)
00552 {
00553   AllocHeader* prev_alloc = freed->prev_adjacent();
00554   return freed->is_free() &&
00555          includes(prev_alloc) &&
00556          prev_alloc->is_free();
00557 }
00558 
00559 #ifdef VALIDATE_MEMORY_POOL
00560 void
00561 MemoryPool::validate_pool(MemoryPool& pool, bool log) {
00562   AllocHeader* prev = 0;
00563   size_t allocated_bytes = 0;
00564   size_t free_bytes = 0;
00565   size_t oh_bytes = 0;
00566   size_t free_count = 0;
00567   unsigned char* pool_end = pool.pool_ptr_ + pool.pool_size_;
00568   bool prev_was_free;
00569   size_t index = 0;
00570 
00571   typedef std::map<FreeHeader*, int> FreeMap;
00572   FreeMap free_map;
00573   // Gather all free indices
00574   AllocHeader* alloc = reinterpret_cast<AllocHeader*>(pool.pool_ptr_);
00575   while (pool.includes(alloc)) {
00576     FreeHeader* free_header = alloc->is_free() ?
00577           reinterpret_cast<FreeHeader*>(alloc) : NULL;
00578     if (free_header) {
00579       free_map[free_header] = index;
00580     }
00581     alloc = alloc->next_adjacent();
00582     ++index;
00583   }
00584 
00585   index = 0;
00586   if (log) {
00587     ACE_OS::printf("Pool ptr %zx end %zx\n", (unsigned long)pool.pool_ptr_,
00588            (unsigned long)pool_end);
00589    }
00590 
00591   // Check all allocs in positional order and not overlapping
00592   alloc = reinterpret_cast<AllocHeader*>(pool.pool_ptr_);
00593   while (pool.includes(alloc)) {
00594     if (log) {
00595 
00596       int smlr_index = -1;
00597       int lrgr_index = -1;
00598       char lrgr_buff[32];
00599       char smlr_buff[32];
00600 
00601       FreeHeader* free_header = alloc->is_free() ?
00602             reinterpret_cast<FreeHeader*>(alloc) : NULL;
00603       if (free_header) {
00604         FreeMap::const_iterator found;
00605         found = free_map.find(free_header->smaller_free(pool.pool_ptr_));
00606         if (found != free_map.end()) {
00607           smlr_index = found->second;
00608           snprintf(smlr_buff, 32, "[%2d]", smlr_index); // preprocessed out
00609         }
00610         found = free_map.find(free_header->larger_free(pool.pool_ptr_));
00611         if (found != free_map.end()) {
00612           lrgr_index = found->second;
00613           snprintf(lrgr_buff, 32, "[%2d]", lrgr_index); // preprocessed out
00614         }
00615       }
00616       ACE_OS::printf(
00617         "Alloc[%zu] %s at %zx ptr %zx lg %s sm %s size %d psize %d\n",
00618         index++,
00619         alloc->is_free() ?
00620         (alloc == pool.largest_free_ ? "FREE!" : "free ")  : "     ",
00621         (unsigned long)alloc,
00622         (unsigned long)alloc->ptr(),
00623         lrgr_index >= 0 ? lrgr_buff : "[  ]",
00624         smlr_index >= 0 ? smlr_buff : "[  ]",
00625         alloc->size(),
00626         alloc->prev_size()
00627       );
00628     }
00629 
00630     TEST_CHECK(alloc->size());
00631     if (prev) {
00632       TEST_CHECK(prev->next_adjacent() == alloc);
00633       TEST_CHECK(alloc->prev_adjacent() == prev);
00634       // Validate  these are not consecutive free blocks
00635       TEST_CHECK(!(prev_was_free && alloc->is_free()));
00636     }
00637 
00638     if (!alloc->is_free()) {
00639       allocated_bytes += alloc->size();
00640       prev_was_free = false;
00641     } else {
00642       free_bytes += alloc->size();
00643       prev_was_free = true;
00644     }
00645     oh_bytes += sizeof(AllocHeader);
00646     prev = alloc;
00647     alloc = alloc->next_adjacent();
00648   }
00649   TEST_CHECK((unsigned char*)alloc == pool_end);
00650 
00651   TEST_CHECK(allocated_bytes + free_bytes + oh_bytes == pool.pool_size_);
00652 
00653   FreeIndex::validate_index(pool.free_index_, pool.pool_ptr_, log);
00654 
00655   size_t prev_size = 0;
00656   size_t free_bytes_in_list = 0;
00657   FreeHeader* free_alloc = NULL;
00658   FreeHeader* prev_free = NULL;
00659 
00660   // Check all free blocks in size order
00661   for (free_alloc = pool.largest_free_;
00662        free_alloc;
00663        free_alloc = free_alloc->smaller_free(pool.pool_ptr_)) {
00664     // Should be marked free
00665     TEST_CHECK(free_alloc->is_free());
00666     // Check for infinite loop
00667     TEST_CHECK(++free_count < 10000);
00668 
00669     // Sum bytes found
00670     free_bytes_in_list += free_alloc->size();
00671 
00672     // If not the first alloc
00673     if (prev_size) {
00674       TEST_CHECK(free_alloc->size() <= prev_size);
00675       TEST_CHECK(free_alloc->size() > 0);
00676     }
00677     prev_size = free_alloc->size();
00678     prev_free = free_alloc;
00679   }
00680 
00681   TEST_CHECK(free_bytes == free_bytes_in_list);
00682 
00683   // Try again from smallest to largest
00684   if (prev_free) {
00685     free_bytes_in_list = 0;
00686 
00687     for (free_alloc = prev_free;
00688          free_alloc;
00689          free_alloc = free_alloc->larger_free(pool.pool_ptr_)) {
00690       // Should be marked free
00691       TEST_CHECK(free_alloc->is_free());
00692 
00693       // Sum bytes found
00694       free_bytes_in_list += free_alloc->size();
00695 
00696       // If not the first alloc
00697       if (free_alloc != prev_free) {
00698         TEST_CHECK(free_alloc->size() >= prev_size);
00699         TEST_CHECK(free_alloc->size() > 0);
00700       }
00701       prev_size = free_alloc->size();
00702     }
00703     TEST_CHECK(free_bytes == free_bytes_in_list);
00704   }
00705 }
00706 #endif
00707 
00708 }}

Generated on Fri Feb 12 20:05:24 2016 for OpenDDS by  doxygen 1.4.7