src/share/vm/memory/heap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8015774 Sdiff src/share/vm/memory

src/share/vm/memory/heap.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/heap.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/os.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 size_t CodeHeap::header_size() {
  32   return sizeof(HeapBlock);
  33 }
  34 
  35 
  36 // Implementation of Heap
  37 
  38 CodeHeap::CodeHeap() {


  39   _number_of_committed_segments = 0;
  40   _number_of_reserved_segments  = 0;
  41   _segment_size                 = 0;
  42   _log2_segment_size            = 0;
  43   _next_segment                 = 0;
  44   _freelist                     = NULL;
  45   _freelist_segments            = 0;
  46   _freelist_length              = 0;


  47 }
  48 
  49 
  50 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
  51   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  52   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  53   // setup _segmap pointers for faster indexing
  54   address p = (address)_segmap.low() + beg;
  55   address q = (address)_segmap.low() + end;
  56   // initialize interval
  57   while (p < q) *p++ = free_sentinel;
  58 }
  59 
  60 
  61 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
  62   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  63   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  64   // setup _segmap pointers for faster indexing
  65   address p = (address)_segmap.low() + beg;
  66   address q = (address)_segmap.low() + end;


  71     if (i == free_sentinel) i = 1;
  72   }
  73 }
  74 
  75 
  76 static size_t align_to_page_size(size_t size) {
  77   const size_t alignment = (size_t)os::vm_page_size();
  78   assert(is_power_of_2(alignment), "no kidding ???");
  79   return (size + alignment - 1) & ~(alignment - 1);
  80 }
  81 
  82 
  83 void CodeHeap::on_code_mapping(char* base, size_t size) {
  84 #ifdef LINUX
  85   extern void linux_wrap_code(char* base, size_t size);
  86   linux_wrap_code(base, size);
  87 #endif
  88 }
  89 
  90 
  91 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
  92                        size_t segment_size) {
  93   assert(reserved_size >= committed_size, "reserved < committed");
  94   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
  95   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
  96 
  97   _segment_size      = segment_size;
  98   _log2_segment_size = exact_log2(segment_size);
  99 
 100   // Reserve and initialize space for _memory.
 101   const size_t page_size = os::can_execute_large_page_memory() ?
 102           os::page_size_for_region(committed_size, reserved_size, 8) :
 103           os::vm_page_size();
 104   const size_t granularity = os::vm_allocation_granularity();
 105   const size_t r_align = MAX2(page_size, granularity);
 106   const size_t r_size = align_size_up(reserved_size, r_align);
 107   const size_t c_size = align_size_up(committed_size, page_size);
 108 
 109   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 110     MAX2(page_size, granularity);
 111   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 112   os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
 113                        rs.base(), rs.size());
 114   if (!_memory.initialize(rs, c_size)) {
 115     return false;
 116   }
 117 
 118   on_code_mapping(_memory.low(), _memory.committed_size());
 119   _number_of_committed_segments = size_to_segments(_memory.committed_size());
 120   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
 121   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 122   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
 123   const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
 124   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 125 
 126   // reserve space for _segmap
 127   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 128     return false;
 129   }
 130 
 131   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 132 


 165 
 166 void CodeHeap::clear() {
 167   _next_segment = 0;
 168   mark_segmap_as_free(0, _number_of_committed_segments);
 169 }
 170 
 171 
 172 void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
 173   size_t number_of_segments = size_to_segments(instance_size + header_size());
 174   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 175 
 176   // First check if we can satisfy request from freelist
 177   NOT_PRODUCT(verify());
 178   HeapBlock* block = search_freelist(number_of_segments, is_critical);
 179   NOT_PRODUCT(verify());
 180 
 181   if (block != NULL) {
 182     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
 183     assert(!block->free(), "must be marked free");
 184     DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));

 185     return block->allocated_space();
 186   }
 187 
 188   // Ensure minimum size for allocation to the heap.
 189   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 190 
 191   if (!is_critical) {
 192     // Make sure the allocation fits in the unallocated heap without using
 193     // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
 194     if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
 195       // Fail allocation
 196       return NULL;
 197     }
 198   }
 199 
 200   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
 201     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
 202     HeapBlock* b =  block_at(_next_segment);
 203     b->initialize(number_of_segments);
 204     _next_segment += number_of_segments;
 205     DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));

 206     return b->allocated_space();
 207   } else {
 208     return NULL;
 209   }
 210 }
 211 
 212 
 213 void CodeHeap::deallocate(void* p) {
 214   assert(p == find_start(p), "illegal deallocation");
 215   // Find start of HeapBlock
 216   HeapBlock* b = (((HeapBlock *)p) - 1);
 217   assert(b->allocated_space() == p, "sanity check");
 218   DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal,
 219              segments_to_size(b->length()) - sizeof(HeapBlock)));
 220   add_to_freelist(b);
 221   NOT_PRODUCT(verify());
 222 }
 223 
 224 /**
 225  * Uses segment map to find the the start (header) of a nmethod. This works as follows:




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/heap.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/os.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 size_t CodeHeap::header_size() {
  32   return sizeof(HeapBlock);
  33 }
  34 
  35 
  36 // Implementation of Heap
  37 
  38 CodeHeap::CodeHeap(const char* name, const int code_blob_type)
  39   : _code_blob_type(code_blob_type) {
  40   _name                         = name;
  41   _number_of_committed_segments = 0;
  42   _number_of_reserved_segments  = 0;
  43   _segment_size                 = 0;
  44   _log2_segment_size            = 0;
  45   _next_segment                 = 0;
  46   _freelist                     = NULL;
  47   _freelist_segments            = 0;
  48   _freelist_length              = 0;
  49   _max_allocated_capacity       = 0;
  50   _was_full                     = false;
  51 }
  52 
  53 
  54 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
  55   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  56   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  57   // setup _segmap pointers for faster indexing
  58   address p = (address)_segmap.low() + beg;
  59   address q = (address)_segmap.low() + end;
  60   // initialize interval
  61   while (p < q) *p++ = free_sentinel;
  62 }
  63 
  64 
  65 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
  66   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  67   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  68   // setup _segmap pointers for faster indexing
  69   address p = (address)_segmap.low() + beg;
  70   address q = (address)_segmap.low() + end;


  75     if (i == free_sentinel) i = 1;
  76   }
  77 }
  78 
  79 
  80 static size_t align_to_page_size(size_t size) {
  81   const size_t alignment = (size_t)os::vm_page_size();
  82   assert(is_power_of_2(alignment), "no kidding ???");
  83   return (size + alignment - 1) & ~(alignment - 1);
  84 }
  85 
  86 
  87 void CodeHeap::on_code_mapping(char* base, size_t size) {
  88 #ifdef LINUX
  89   extern void linux_wrap_code(char* base, size_t size);
  90   linux_wrap_code(base, size);
  91 #endif
  92 }
  93 
  94 
  95 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
  96   assert(rs.size() >= committed_size, "reserved < committed");

  97   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
  98   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
  99 
 100   _segment_size      = segment_size;
 101   _log2_segment_size = exact_log2(segment_size);
 102 
 103   // Reserve and initialize space for _memory.
 104   const size_t page_size = os::can_execute_large_page_memory() ?
 105           os::page_size_for_region(committed_size, rs.size(), 8) :
 106           os::vm_page_size();
 107   const size_t granularity = os::vm_allocation_granularity();


 108   const size_t c_size = align_size_up(committed_size, page_size);
 109 
 110   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,



 111                        rs.base(), rs.size());
 112   if (!_memory.initialize(rs, c_size)) {
 113     return false;
 114   }
 115 
 116   on_code_mapping(_memory.low(), _memory.committed_size());
 117   _number_of_committed_segments = size_to_segments(_memory.committed_size());
 118   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
 119   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 120   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
 121   const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
 122   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 123 
 124   // reserve space for _segmap
 125   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 126     return false;
 127   }
 128 
 129   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 130 


 163 
 164 void CodeHeap::clear() {
 165   _next_segment = 0;
 166   mark_segmap_as_free(0, _number_of_committed_segments);
 167 }
 168 
 169 
 170 void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
 171   size_t number_of_segments = size_to_segments(instance_size + header_size());
 172   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 173 
 174   // First check if we can satisfy request from freelist
 175   NOT_PRODUCT(verify());
 176   HeapBlock* block = search_freelist(number_of_segments, is_critical);
 177   NOT_PRODUCT(verify());
 178 
 179   if (block != NULL) {
 180     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
 181     assert(!block->free(), "must be marked free");
 182     DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
 183     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 184     return block->allocated_space();
 185   }
 186 
 187   // Ensure minimum size for allocation to the heap.
 188   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 189 
 190   if (!is_critical) {
 191     // Make sure the allocation fits in the unallocated heap without using
 192     // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
 193     if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
 194       // Fail allocation
 195       return NULL;
 196     }
 197   }
 198 
 199   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
 200     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
 201     HeapBlock* b =  block_at(_next_segment);
 202     b->initialize(number_of_segments);
 203     _next_segment += number_of_segments;
 204     DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
 205     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 206     return b->allocated_space();
 207   } else {
 208     return NULL;
 209   }
 210 }
 211 
 212 
 213 void CodeHeap::deallocate(void* p) {
 214   assert(p == find_start(p), "illegal deallocation");
 215   // Find start of HeapBlock
 216   HeapBlock* b = (((HeapBlock *)p) - 1);
 217   assert(b->allocated_space() == p, "sanity check");
 218   DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal,
 219              segments_to_size(b->length()) - sizeof(HeapBlock)));
 220   add_to_freelist(b);
 221   NOT_PRODUCT(verify());
 222 }
 223 
 224 /**
 225  * Uses segment map to find the the start (header) of a nmethod. This works as follows:


src/share/vm/memory/heap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File