OpenDDS  Snapshot(2023/04/28-20:55)
MemoryPool.cpp
Go to the documentation of this file.
1 /*
2  *
3  *
4  * Distributed under the OpenDDS License.
5  * See: http://www.opendds.org/license.html
6  */
7 
8 #include "DCPS/DdsDcps_pch.h" ////Only the _pch include should start with DCPS/
9 #include "MemoryPool.h"
10 #include "PoolAllocator.h"
11 #include "ace/Log_Msg.h"
12 #include "ace/OS_NS_stdio.h"
13 #include <stdexcept>
14 #include <limits>
15 #include <map>
16 #include <cstring>
17 
18 #if defined(WITH_VALGRIND)
19 #include "valgrind/memcheck.h"
20 #endif
21 
22 #define TEST_CHECK(COND) \
23  if (!( COND )) { \
24  char msg[1024]; \
25  ACE_OS::snprintf(msg, 1024, "%s: FAILED at %s:%d", #COND, __FILE__, __LINE__); \
26  ACE_OS::printf("%s\n", msg); \
27  throw std::runtime_error(msg); \
28  return; \
29  }
30 
32 
33 namespace OpenDDS { namespace DCPS {
34 
36 : alloc_size_(0)
37 , prev_size_(0)
38 {
39 }
40 
41 unsigned char*
43 {
44  const unsigned char* buff = reinterpret_cast<const unsigned char*>(this + 1);
45  return const_cast<unsigned char*>(buff);
46 }
47 
50  unsigned char* past_buffer_end = ptr() + size();
51  return reinterpret_cast<AllocHeader*>(past_buffer_end);
52 }
53 
56  AllocHeader* result = NULL;
57  if (prev_size_) {
58  unsigned char* self = reinterpret_cast<unsigned char*>(this);
59  unsigned char* prev_buffer_start = self - prev_size_;
60  unsigned char* past_alloc = prev_buffer_start - sizeof(AllocHeader);
61  result = reinterpret_cast<AllocHeader*>(past_alloc);
62  }
63  return result;
64 }
65 
66 void
68  set_allocated();
69  set_size(size);
70 }
71 
72 void
74 {
75  if (is_free()) {
76  size *= -1;
77  }
78  alloc_size_ = (int)size;
79 }
80 
81 void
83  // All unsigned, set_size will make negative if free (it is)
84  size_t next_size = this->next_adjacent()->size();
85  size_t joined_size = this->size() + next_size + sizeof(AllocHeader);
86  this->set_size(joined_size);
87 }
88 
89 void
90 FreeHeader::init_free_block(unsigned int pool_size)
91 {
92  alloc_size_ = static_cast<int>(pool_size - sizeof(AllocHeader));
93  prev_size_ = 0;
94  set_free();
95 }
96 
97 void
99 {
100  // If this is newly freed
101  if (!is_free()) {
102  alloc_size_ *= -1;
103  set_smaller_free(NULL, NULL);
104  set_larger_free(NULL, NULL);
105  }
106 }
107 FreeHeader*
108 FreeHeader::smaller_free(unsigned char* pool_base) const
109 {
110  FreeHeader* result = NULL;
111  if (offset_smaller_free_ != std::numeric_limits<size_t>::max()) {
112  result = reinterpret_cast<FreeHeader*>(pool_base + offset_smaller_free_);
113  }
114  return result;
115 }
116 
117 FreeHeader*
118 FreeHeader::larger_free(unsigned char* pool_base) const
119 {
120  FreeHeader* result = NULL;
121  if (offset_larger_free_ != std::numeric_limits<size_t>::max()) {
122  result = reinterpret_cast<FreeHeader*>(pool_base + offset_larger_free_);
123  }
124  return result;
125 }
126 
127 void
128 FreeHeader::set_smaller_free(FreeHeader* next, unsigned char* pool_base)
129 {
130  if (next) {
131  offset_smaller_free_ = reinterpret_cast<unsigned char*>(next) - pool_base;
132  } else {
133  offset_smaller_free_ = std::numeric_limits<size_t>::max();
134  }
135 }
136 
137 void
138 FreeHeader::set_larger_free(FreeHeader* prev, unsigned char* pool_base)
139 {
140  if (prev) {
141  offset_larger_free_ = reinterpret_cast<unsigned char*>(prev) - pool_base;
142  } else {
143  offset_larger_free_ = std::numeric_limits<size_t>::max();
144  }
145 }
146 
148 : size_(0)
149 , limit_(0)
150 , ptr_(0)
151 {
152 }
153 
154 void
155 FreeIndexNode::set_sizes(size_t size, size_t limit)
156 {
157  size_ = size;
158  limit_ = limit;
159 }
160 
162 : size_(0)
163 , largest_free_(largest_free)
164 {
165 }
166 
167 void
169 {
170  unsigned int index = node_index(freed->size());
171  FreeIndexNode* node = nodes_ + index;
172 
173  // If the node is empty, or if freed is smaller or equal to the node's alloc
174  if ((node->ptr() == NULL) || (node->ptr()->size() >= freed->size())) {
175  // Use this alloc in the index
176  node->set_ptr(freed);
177  }
178 }
179 
180 void
182 {
183  unsigned int index = node_index(free_block->size());
184  FreeIndexNode* node = nodes_ + index;
185 
186  // If the node points to the free block
187  if (node->ptr() == free_block) {
188  // If the larger can be used by this node
189  if (larger && node->contains(larger->size())) {
190  node->set_ptr(larger);
191  } else {
192  node->set_ptr(NULL);
193  }
194  }
195 }
196 
197 void
198 FreeIndex::init(FreeHeader* init_free_block)
199 {
200  size_t max = std::numeric_limits<size_t>::max();
201  for (size_t size = min_index; size <= max_index; size *= 2) {
202  nodes_[size_].set_sizes(size, (size == max_index) ? max : size*2);
203  ++size_;
204  }
205  add(init_free_block);
206 }
207 
208 FreeHeader*
209 FreeIndex::find(size_t search_size, unsigned char* pool_base)
210 {
211  unsigned int index = node_index(search_size);
212  FreeIndexNode* index_node = nodes_ + index;
213 
214  // Larger or equal to search_size
215  FreeHeader* result = NULL;
216  if (largest_free_ && (largest_free_->size() >= search_size)) {
217  result = largest_free_;
218 
219  // Look from here and larger
220  while (index_node < nodes_ + size_) {
221  if (index_node->ptr() && index_node->ptr()->size() >= search_size) {
222  result = index_node->ptr();
223  break;
224  }
225  ++index_node;
226  }
227  }
228 
229  // Now traverse, searching for smaller than result
230  while (result) {
231  FreeHeader* smaller = result->smaller_free(pool_base);
232  if (smaller && smaller->size() >= search_size) {
233  result = smaller;
234  } else {
235  break;
236  }
237  }
238 
239  return result;
240 }
241 
242 unsigned int
244 {
245  // Use shifting to perform log base 2 of size
246  // start by using min + 1 (+1 because min is a power of 2 whch is already
247  // one bit)
248  size_t size_copy = size >> (min_index_pow + 1);
249  unsigned int index = 0;
250  unsigned int max_idx = max_index_pow - min_index_pow;
251  while (size_copy && (index < max_idx)) {
252  ++index;
253  size_copy = size_copy >> 1;
254  }
255  return index;
256 }
257 
258 #ifdef VALIDATE_MEMORY_POOL
259 void
260 FreeIndex::validate_index(FreeIndex& index, unsigned char* base, bool log)
261 {
262  if (log) {
263  FreeIndexNode* node = index.nodes_;
264  while (node < index.nodes_ + index.size_) {
265  if (node->ptr()) {
266  ACE_OS::printf(" IND[%4d] -> %4d\n", node->size(), node->ptr()->size());
267  } else {
268  ACE_OS::printf(" IND[%4d] -> NULL\n", node->size());
269  }
270  ++node;
271  }
272  }
273 
274  // Validate searches of each size
275  for (size_t size = min_index; size <= max_index; size *= 2) {
276  // Find size or larger
277  FreeHeader* size_or_larger = index.find(size, base);
278  if (size_or_larger) {
279  TEST_CHECK(size_or_larger->size() >= size);
280  }
281  }
282 
283  // Validate each node points to a free block of the proper size;
284  for (FreeIndexNode* node = index.nodes_; node < index.nodes_ + index.size_; ++node) {
285  FreeHeader* block = node->ptr();
286  if (block) {
287  // node should point to a free block of the proper size;
288  TEST_CHECK(node->contains(block->size()));
289 
290  FreeHeader* smaller = block;
291  while ((smaller = smaller->smaller_free(base))) {
292  // Anything smaller should be too small for this node
293  TEST_CHECK(smaller->size() < node->size());
294  }
295  }
296  }
297 }
298 #endif
299 
300 MemoryPool::MemoryPool(unsigned int pool_size, size_t granularity)
301 : granularity_(align(granularity, 8))
302 , min_alloc_size_(align(min_free_size - sizeof(AllocHeader), granularity_))
303 , pool_size_(align(pool_size, granularity_))
304 , pool_ptr_(new unsigned char[pool_size_])
305 , largest_free_(NULL)
306 , free_index_(largest_free_)
307 {
308  AllocHeader* the_pool = new (pool_ptr_) AllocHeader();
309  FreeHeader* first_free = reinterpret_cast<FreeHeader*>(the_pool);
310  first_free->init_free_block(static_cast<unsigned int>(pool_size_));
311  largest_free_ = first_free;
312  free_index_.init(first_free);
314 #if defined(WITH_VALGRIND)
315  VALGRIND_MAKE_MEM_NOACCESS(pool_ptr_, pool_size_);
316  VALGRIND_CREATE_MEMPOOL(pool_ptr_, 0, false);
317 #endif
318 }
319 
321 {
322 #ifndef OPENDDS_SAFETY_PROFILE
323  delete [] pool_ptr_;
324 #endif
325 }
326 
327 size_t
329 {
330  return lwm_free_bytes_;
331 }
332 
333 void*
335 {
336 #if defined(WITH_VALGRIND)
337  VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
338 #endif
339 
340  // Pointer to return
341  unsigned char* block = NULL;
342 
343  // Round up to 8-byte boundary
344  size_t aligned_size = align(size, granularity_);
345 
346  if (aligned_size < min_alloc_size_) {
347  aligned_size = min_alloc_size_;
348  }
349 
350  // The block to allocate from
351  FreeHeader* block_to_alloc = free_index_.find(aligned_size, pool_ptr_);
352 
353  if (block_to_alloc) {
354  block = allocate(block_to_alloc, aligned_size);
355  }
356 
357  // Update lwm
358  size_t largest_free_bytes = largest_free_ ? largest_free_->size() : 0;
359  if (largest_free_bytes < lwm_free_bytes_) {
360  lwm_free_bytes_ = largest_free_bytes;
361  }
362 
363 #ifdef VALIDATE_MEMORY_POOL
364  validate_pool(*this, false);
365 #endif
366 
367 #if defined(WITH_VALGRIND)
368  VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
369  VALGRIND_MEMPOOL_ALLOC(pool_ptr_, block, size);
370 #endif
371 
372  return block;
373 }
374 
375 bool
377 {
378  bool freed = false;
379  if (ptr && includes(ptr)) {
380 #if defined(WITH_VALGRIND)
381  VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
382 #endif
383 
384  FreeHeader* header = reinterpret_cast<FreeHeader*>(
385  reinterpret_cast<AllocHeader*>(ptr) - 1);
386 
387  // Free header
388  header->set_free();
389 
390  join_free_allocs(header);
391 
392 #ifdef VALIDATE_MEMORY_POOL
393  validate_pool(*this, false);
394 #endif
395 
396  freed = true;
397 
398 #if defined(WITH_VALGRIND)
399  VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(pool_ptr_, pool_size_);
400  VALGRIND_MEMPOOL_FREE(pool_ptr_, ptr);
401 #endif
402  }
403 
404  return freed;
405 }
406 
407 void
409 {
410  // Check adjacent
411  if (joinable_next(freed)) {
412  FreeHeader* next_free = reinterpret_cast<FreeHeader*>(freed->next_adjacent());
413  remove_free_alloc(next_free);
414  freed->join_next();
415  // Adjust psize of adjacent
416  AllocHeader* next = freed->next_adjacent();
417  if (includes(next)) {
418  next->set_prev_size(freed->size());
419  }
420  }
421  if (joinable_prev(freed)) {
422  FreeHeader* prev_free = reinterpret_cast<FreeHeader*>(freed->prev_adjacent());
423  remove_free_alloc(prev_free);
424  // Join prev with freed
425  prev_free->join_next();
426  insert_free_alloc(prev_free);
427  // Adjust psize of adjacent
428  AllocHeader* next = prev_free->next_adjacent();
429  if (includes(next)) {
430  next->set_prev_size(prev_free->size());
431  }
432  } else {
433  insert_free_alloc(freed);
434  }
435 }
436 
437 void
439 {
440  FreeHeader* smaller = block_to_alloc->smaller_free(pool_ptr_);
441  FreeHeader* larger = block_to_alloc->larger_free(pool_ptr_);
442 
443  block_to_alloc->set_smaller_free(NULL, NULL);
444  block_to_alloc->set_larger_free(NULL, NULL);
445 
446  // If this was the largest free alloc
447  if (block_to_alloc == largest_free_) {
448  // It no longer is
449  largest_free_ = smaller;
450  }
451 
452  if (larger) {
453  larger->set_smaller_free(smaller, pool_ptr_);
454  }
455 
456  if (smaller) {
457  smaller->set_larger_free(larger, pool_ptr_);
458  }
459 
460  // Remove from free index
461  free_index_.remove(block_to_alloc, larger);
462 }
463 
464 void
466 {
467  // Find free alloc freed size or larger
468  FreeHeader* alloc = free_index_.find(freed->size(), pool_ptr_);
469  // If found
470  if (alloc) {
471  FreeHeader* smaller = alloc->smaller_free(pool_ptr_);
472 
473  // Insert into list
474  freed->set_larger_free(alloc, pool_ptr_);
475  alloc->set_smaller_free(freed, pool_ptr_);
476  if (smaller) {
477  smaller->set_larger_free(freed, pool_ptr_);
478  freed->set_smaller_free(smaller, pool_ptr_);
479  }
480  // Else freed the largest alloc
481  } else {
482  if (freed != largest_free_) {
484  if (largest_free_) {
486  }
487  largest_free_ = freed;
488  }
489  }
490 
491  // Insert and replace alloc if necessary
492  free_index_.add(freed);
493 }
494 
495 unsigned char*
496 MemoryPool::allocate(FreeHeader* free_block, size_t alloc_size)
497 {
498  size_t free_block_size = free_block->size();
499  size_t remainder = free_block_size - alloc_size;
500 
501  // May not be enough room for another allocation
502  if (remainder < min_free_size) {
503  alloc_size = free_block_size; // use it all
504  remainder = 0;
505  }
506 
507  // If we are NOT allocating the whole block
508  if (remainder) {
509  // Account for header here - won't overflow due to check, above
510  remainder -= sizeof(AllocHeader);
511 
512  // Adjust current adjacent block (after free block)
513  AllocHeader* next_adjacent = free_block->next_adjacent();
514  if (includes(next_adjacent)) {
515  next_adjacent->set_prev_size(static_cast<int>(alloc_size));
516  }
517 
518  // Always remove, resize, and reinsert to make sure free list and free
519  // index are in sync
520  remove_free_alloc(free_block);
521  free_block->set_size(remainder);
522  insert_free_alloc(free_block);
523 
524  // After resize, can use next_adjacent() to safely get to the end of the
525  // resized block.
526  // Taking free memory and allocating, so invoke constructor
527  AllocHeader* alloc_block = new(free_block->next_adjacent()) AllocHeader();
528 
529  // Allocate adjacent block (at end of existing block)
530  alloc_block->set_size(alloc_size);
531  alloc_block->set_allocated();
532  alloc_block->set_prev_size(static_cast<int>(remainder));
533  return alloc_block->ptr();
534  // Else we ARE allocating the whole block
535  } else {
536  free_block->set_allocated();
537  // remove free_block from free list
538  remove_free_alloc(free_block);
539  return free_block->ptr();
540  }
541 }
542 
543 bool
545 {
546  AllocHeader* next_alloc = freed->next_adjacent();
547  return freed->is_free() &&
548  includes(next_alloc) &&
549  next_alloc->is_free();
550 }
551 
552 bool
554 {
555  AllocHeader* prev_alloc = freed->prev_adjacent();
556  return freed->is_free() &&
557  includes(prev_alloc) &&
558  prev_alloc->is_free();
559 }
560 
561 #ifdef VALIDATE_MEMORY_POOL
562 void
563 MemoryPool::validate_pool(MemoryPool& pool, bool log) {
564  AllocHeader* prev = 0;
565  size_t allocated_bytes = 0;
566  size_t free_bytes = 0;
567  size_t oh_bytes = 0;
568  size_t free_count = 0;
569  unsigned char* pool_end = pool.pool_ptr_ + pool.pool_size_;
570  bool prev_was_free;
571  size_t index = 0;
572 
573  typedef std::map<FreeHeader*, int> FreeMap;
574  FreeMap free_map;
575  // Gather all free indices
576  AllocHeader* alloc = reinterpret_cast<AllocHeader*>(pool.pool_ptr_);
577  while (pool.includes(alloc)) {
578  FreeHeader* free_header = alloc->is_free() ?
579  reinterpret_cast<FreeHeader*>(alloc) : NULL;
580  if (free_header) {
581  free_map[free_header] = index;
582  }
583  alloc = alloc->next_adjacent();
584  ++index;
585  }
586 
587  index = 0;
588  if (log) {
589  ACE_OS::printf("Pool ptr %zx end %zx\n", (unsigned long)pool.pool_ptr_,
590  (unsigned long)pool_end);
591  }
592 
593  // Check all allocs in positional order and not overlapping
594  alloc = reinterpret_cast<AllocHeader*>(pool.pool_ptr_);
595  while (pool.includes(alloc)) {
596  if (log) {
597 
598  int smlr_index = -1;
599  int lrgr_index = -1;
600  char lrgr_buff[32];
601  char smlr_buff[32];
602 
603  FreeHeader* free_header = alloc->is_free() ?
604  reinterpret_cast<FreeHeader*>(alloc) : NULL;
605  if (free_header) {
606  FreeMap::const_iterator found;
607  found = free_map.find(free_header->smaller_free(pool.pool_ptr_));
608  if (found != free_map.end()) {
609  smlr_index = found->second;
610  snprintf(smlr_buff, 32, "[%2d]", smlr_index); // preprocessed out
611  }
612  found = free_map.find(free_header->larger_free(pool.pool_ptr_));
613  if (found != free_map.end()) {
614  lrgr_index = found->second;
615  snprintf(lrgr_buff, 32, "[%2d]", lrgr_index); // preprocessed out
616  }
617  }
619  "Alloc[%zu] %s at %zx ptr %zx lg %s sm %s size %d psize %d\n",
620  index++,
621  alloc->is_free() ?
622  (alloc == pool.largest_free_ ? "FREE!" : "free ") : " ",
623  (unsigned long)alloc,
624  (unsigned long)alloc->ptr(),
625  lrgr_index >= 0 ? lrgr_buff : "[ ]",
626  smlr_index >= 0 ? smlr_buff : "[ ]",
627  alloc->size(),
628  alloc->prev_size()
629  );
630  }
631 
632  TEST_CHECK(alloc->size());
633  if (prev) {
634  TEST_CHECK(prev->next_adjacent() == alloc);
635  TEST_CHECK(alloc->prev_adjacent() == prev);
636  // Validate these are not consecutive free blocks
637  TEST_CHECK(!(prev_was_free && alloc->is_free()));
638  }
639 
640  if (!alloc->is_free()) {
641  allocated_bytes += alloc->size();
642  prev_was_free = false;
643  } else {
644  free_bytes += alloc->size();
645  prev_was_free = true;
646  }
647  oh_bytes += sizeof(AllocHeader);
648  prev = alloc;
649  alloc = alloc->next_adjacent();
650  }
651  TEST_CHECK((unsigned char*)alloc == pool_end);
652 
653  TEST_CHECK(allocated_bytes + free_bytes + oh_bytes == pool.pool_size_);
654 
655  FreeIndex::validate_index(pool.free_index_, pool.pool_ptr_, log);
656 
657  size_t prev_size = 0;
658  size_t free_bytes_in_list = 0;
659  FreeHeader* free_alloc = NULL;
660  FreeHeader* prev_free = NULL;
661 
662  // Check all free blocks in size order
663  for (free_alloc = pool.largest_free_;
664  free_alloc;
665  free_alloc = free_alloc->smaller_free(pool.pool_ptr_)) {
666  // Should be marked free
667  TEST_CHECK(free_alloc->is_free());
668  // Check for infinite loop
669  TEST_CHECK(++free_count < 10000);
670 
671  // Sum bytes found
672  free_bytes_in_list += free_alloc->size();
673 
674  // If not the first alloc
675  if (prev_size) {
676  TEST_CHECK(free_alloc->size() <= prev_size);
677  TEST_CHECK(free_alloc->size() > 0);
678  }
679  prev_size = free_alloc->size();
680  prev_free = free_alloc;
681  }
682 
683  TEST_CHECK(free_bytes == free_bytes_in_list);
684 
685  // Try again from smallest to largest
686  if (prev_free) {
687  free_bytes_in_list = 0;
688 
689  for (free_alloc = prev_free;
690  free_alloc;
691  free_alloc = free_alloc->larger_free(pool.pool_ptr_)) {
692  // Should be marked free
693  TEST_CHECK(free_alloc->is_free());
694 
695  // Sum bytes found
696  free_bytes_in_list += free_alloc->size();
697 
698  // If not the first alloc
699  if (free_alloc != prev_free) {
700  TEST_CHECK(free_alloc->size() >= prev_size);
701  TEST_CHECK(free_alloc->size() > 0);
702  }
703  prev_size = free_alloc->size();
704  }
705  TEST_CHECK(free_bytes == free_bytes_in_list);
706  }
707 }
708 #endif
709 
710 }}
711 
void add(FreeHeader *free_block)
Definition: MemoryPool.cpp:168
int alloc_size_
Size of my buffer, negative if free, positive if alloc.
Definition: MemoryPool.h:60
void set_sizes(size_t size, size_t limit)
Definition: MemoryPool.cpp:155
void insert_free_alloc(FreeHeader *block_freed)
Definition: MemoryPool.cpp:465
bool pool_free(void *ptr)
Definition: MemoryPool.cpp:376
void set_larger_free(FreeHeader *prev, unsigned char *pool_base)
Definition: MemoryPool.cpp:138
FreeHeader * larger_free(unsigned char *pool_base) const
Definition: MemoryPool.cpp:118
size_t size_
Number of index nodes.
Definition: MemoryPool.h:152
unsigned char * pool_ptr_
Pointer to pool.
Definition: MemoryPool.h:194
FreeIndex free_index_
Index of free nodex.
Definition: MemoryPool.h:197
int snprintf(char *buf, size_t maxlen, const char *format,...) ACE_GCC_FORMAT_ATTRIBUTE(printf
void remove_free_alloc(FreeHeader *block_to_alloc)
Definition: MemoryPool.cpp:438
#define TEST_CHECK(COND)
Definition: MemoryPool.cpp:22
static unsigned int node_index(size_t size)
Definition: MemoryPool.cpp:243
const size_t min_alloc_size_
Aligned minimum allocation size.
Definition: MemoryPool.h:191
bool includes(void *ptr) const
Definition: MemoryPool.h:169
size_t lwm_free_bytes_
Low water mark of available bytes.
Definition: MemoryPool.h:193
void join_free_allocs(FreeHeader *block_freed)
Definition: MemoryPool.cpp:408
bool joinable_prev(FreeHeader *freed)
Definition: MemoryPool.cpp:553
void set_ptr(FreeHeader *ptr)
Definition: MemoryPool.h:98
void init(FreeHeader *init_free_block)
Definition: MemoryPool.cpp:198
FreeHeader *& largest_free_
Memory pool&#39;s pointer to largest free block.
Definition: MemoryPool.h:153
size_t size_
size of buffer
Definition: MemoryPool.h:111
FreeHeader * largest_free_
Pointer to largest free index.
Definition: MemoryPool.h:196
FreeIndex(FreeHeader *&largest_free)
Definition: MemoryPool.cpp:161
FreeIndexNode nodes_[max_index_pow - min_index_pow+1]
Index nodes.
Definition: MemoryPool.h:154
Christopher Diggins *renamed files *fixing compilation errors *adding Visual C project file *removed make Max Lybbert *removed references to missing and unused header
Definition: CHANGELOG.txt:8
unsigned int size() const
Definition: MemoryPool.h:108
const size_t granularity_
Configured granularity.
Definition: MemoryPool.h:190
const size_t pool_size_
Configured pool size.
Definition: MemoryPool.h:192
size_t limit_
upper_limit of buffer size (one too large)
Definition: MemoryPool.h:112
size_t size_
static size_t align(size_t size, size_t granularity)
Definition: MemoryPool.h:184
unsigned char * ptr() const
Definition: MemoryPool.cpp:42
void set_smaller_free(FreeHeader *next, unsigned char *pool_base)
Definition: MemoryPool.cpp:128
bool joinable_next(FreeHeader *freed)
Definition: MemoryPool.cpp:544
OpenDDS_Dcps_Export void align(size_t &value, size_t by)
Align "value" by "by" if it&#39;s not already.
Definition: Serializer.inl:23
AllocHeader * prev_adjacent()
Definition: MemoryPool.cpp:55
int prev_size_
Size of previous buffer, or 0 if first, never negative.
Definition: MemoryPool.h:61
void remove(FreeHeader *free_block, FreeHeader *next_largest)
Definition: MemoryPool.cpp:181
void set_prev_size(int size)
Definition: MemoryPool.h:51
int printf(const char *format,...) ACE_GCC_FORMAT_ATTRIBUTE(printf
unsigned char * allocate(FreeHeader *free_block, size_t alloc_size)
Definition: MemoryPool.cpp:496
#define OPENDDS_END_VERSIONED_NAMESPACE_DECL
unsigned int prev_size() const
Definition: MemoryPool.h:32
unsigned int size() const
Definition: MemoryPool.h:30
AllocHeader * next_adjacent()
Definition: MemoryPool.cpp:49
MemoryPool(unsigned int pool_size, size_t granularity=8)
Definition: MemoryPool.cpp:300
void init_free_block(unsigned int pool_size)
Definition: MemoryPool.cpp:90
FreeHeader * smaller_free(unsigned char *pool_base) const
Definition: MemoryPool.cpp:108
size_t lwm_free_bytes() const
Definition: MemoryPool.cpp:328
FreeHeader * find(size_t size, unsigned char *base)
Definition: MemoryPool.cpp:209
The Internal API and Implementation of OpenDDS.
Definition: AddressCache.h:28
void allocate(size_t size)
Definition: MemoryPool.cpp:67
void set_size(size_t size)
Definition: MemoryPool.cpp:73
void * pool_alloc(size_t size)
Definition: MemoryPool.cpp:334