src/share/vm/memory/heap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File JDK-8015774 Sdiff src/share/vm/memory

src/share/vm/memory/heap.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/heap.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/os.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 size_t CodeHeap::header_size() {
  32   return sizeof(HeapBlock);
  33 }
  34 
  35 
  36 // Implementation of Heap
  37 
  38 CodeHeap::CodeHeap() {


  39   _number_of_committed_segments = 0;
  40   _number_of_reserved_segments  = 0;
  41   _segment_size                 = 0;
  42   _log2_segment_size            = 0;
  43   _next_segment                 = 0;
  44   _freelist                     = NULL;
  45   _freelist_segments            = 0;


  46 }
  47 
  48 
  49 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
  50   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  51   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  52   // setup _segmap pointers for faster indexing
  53   address p = (address)_segmap.low() + beg;
  54   address q = (address)_segmap.low() + end;
  55   // initialize interval
  56   while (p < q) *p++ = 0xFF;
  57 }
  58 
  59 
  60 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
  61   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  62   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  63   // setup _segmap pointers for faster indexing
  64   address p = (address)_segmap.low() + beg;
  65   address q = (address)_segmap.low() + end;


  70     if (i == 0xFF) i = 1;
  71   }
  72 }
  73 
  74 
  75 static size_t align_to_page_size(size_t size) {
  76   const size_t alignment = (size_t)os::vm_page_size();
  77   assert(is_power_of_2(alignment), "no kidding ???");
  78   return (size + alignment - 1) & ~(alignment - 1);
  79 }
  80 
  81 
  82 void CodeHeap::on_code_mapping(char* base, size_t size) {
  83 #ifdef LINUX
  84   extern void linux_wrap_code(char* base, size_t size);
  85   linux_wrap_code(base, size);
  86 #endif
  87 }
  88 
  89 
  90 bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
  91                        size_t segment_size) {
  92   assert(reserved_size >= committed_size, "reserved < committed");
  93   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
  94   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
  95 
  96   _segment_size      = segment_size;
  97   _log2_segment_size = exact_log2(segment_size);
  98 
  99   // Reserve and initialize space for _memory.
 100   const size_t page_size = os::can_execute_large_page_memory() ?
 101           os::page_size_for_region(committed_size, reserved_size, 8) :
 102           os::vm_page_size();
 103   const size_t granularity = os::vm_allocation_granularity();
 104   const size_t r_align = MAX2(page_size, granularity);
 105   const size_t r_size = align_size_up(reserved_size, r_align);
 106   const size_t c_size = align_size_up(committed_size, page_size);
 107 
 108   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 109     MAX2(page_size, granularity);
 110   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 111   os::trace_page_sizes("code heap", committed_size, reserved_size, page_size,
 112                        rs.base(), rs.size());
 113   if (!_memory.initialize(rs, c_size)) {
 114     return false;
 115   }
 116 
 117   on_code_mapping(_memory.low(), _memory.committed_size());
 118   _number_of_committed_segments = size_to_segments(_memory.committed_size());
 119   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
 120   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 121   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
 122   const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
 123   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 124 
 125   // reserve space for _segmap
 126   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 127     return false;
 128   }
 129 
 130   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 131 


 176 void CodeHeap::clear() {
 177   _next_segment = 0;
 178   mark_segmap_as_free(0, _number_of_committed_segments);
 179 }
 180 
 181 
 182 void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
 183   size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
 184   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 185 
 186   // First check if we can satify request from freelist
 187   debug_only(verify());
 188   HeapBlock* block = search_freelist(number_of_segments, is_critical);
 189   debug_only(if (VerifyCodeCacheOften) verify());
 190   if (block != NULL) {
 191     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
 192     assert(!block->free(), "must be marked free");
 193 #ifdef ASSERT
 194     memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
 195 #endif

 196     return block->allocated_space();
 197   }
 198 
 199   // Ensure minimum size for allocation to the heap.
 200   if (number_of_segments < CodeCacheMinBlockLength) {
 201     number_of_segments = CodeCacheMinBlockLength;
 202   }
 203 
 204   if (!is_critical) {
 205     // Make sure the allocation fits in the unallocated heap without using
 206     // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
 207     if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
 208       // Fail allocation
 209       return NULL;
 210     }
 211   }
 212 
 213   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
 214     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
 215     HeapBlock* b =  block_at(_next_segment);
 216     b->initialize(number_of_segments);
 217     _next_segment += number_of_segments;
 218 #ifdef ASSERT
 219     memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
 220 #endif

 221     return b->allocated_space();
 222   } else {
 223     return NULL;
 224   }
 225 }
 226 
 227 
 228 void CodeHeap::deallocate(void* p) {
 229   assert(p == find_start(p), "illegal deallocation");
 230   // Find start of HeapBlock
 231   HeapBlock* b = (((HeapBlock *)p) - 1);
 232   assert(b->allocated_space() == p, "sanity check");
 233 #ifdef ASSERT
 234   memset((void *)b->allocated_space(),
 235          badCodeHeapFreeVal,
 236          segments_to_size(b->length()) - sizeof(HeapBlock));
 237 #endif
 238   add_to_freelist(b);
 239 
 240   debug_only(if (VerifyCodeCacheOften) verify());




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/heap.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/os.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 size_t CodeHeap::header_size() {
  32   return sizeof(HeapBlock);
  33 }
  34 
  35 
  36 // Implementation of Heap
  37 
  38 CodeHeap::CodeHeap(const char* name, const int code_blob_type)
  39   : _code_blob_type(code_blob_type) {
  40   _name                         = name;
  41   _number_of_committed_segments = 0;
  42   _number_of_reserved_segments  = 0;
  43   _segment_size                 = 0;
  44   _log2_segment_size            = 0;
  45   _next_segment                 = 0;
  46   _freelist                     = NULL;
  47   _freelist_segments            = 0;
  48   _max_allocated_capacity       = 0;
  49   _was_full                     = false;
  50 }
  51 
  52 
  53 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
  54   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  55   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  56   // setup _segmap pointers for faster indexing
  57   address p = (address)_segmap.low() + beg;
  58   address q = (address)_segmap.low() + end;
  59   // initialize interval
  60   while (p < q) *p++ = 0xFF;
  61 }
  62 
  63 
  64 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
  65   assert(0   <= beg && beg <  _number_of_committed_segments, "interval begin out of bounds");
  66   assert(beg <  end && end <= _number_of_committed_segments, "interval end   out of bounds");
  67   // setup _segmap pointers for faster indexing
  68   address p = (address)_segmap.low() + beg;
  69   address q = (address)_segmap.low() + end;


  74     if (i == 0xFF) i = 1;
  75   }
  76 }
  77 
  78 
  79 static size_t align_to_page_size(size_t size) {
  80   const size_t alignment = (size_t)os::vm_page_size();
  81   assert(is_power_of_2(alignment), "no kidding ???");
  82   return (size + alignment - 1) & ~(alignment - 1);
  83 }
  84 
  85 
  86 void CodeHeap::on_code_mapping(char* base, size_t size) {
  87 #ifdef LINUX
  88   extern void linux_wrap_code(char* base, size_t size);
  89   linux_wrap_code(base, size);
  90 #endif
  91 }
  92 
  93 
  94 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {


  95   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
  96   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
  97 
  98   _segment_size      = segment_size;
  99   _log2_segment_size = exact_log2(segment_size);
 100 
 101   // Reserve and initialize space for _memory.
 102   const size_t page_size = os::can_execute_large_page_memory() ?
 103           os::page_size_for_region(committed_size, rs.size(), 8) :
 104           os::vm_page_size();
 105   const size_t granularity = os::vm_allocation_granularity();


 106   const size_t c_size = align_size_up(committed_size, page_size);
 107 
 108   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,



 109                        rs.base(), rs.size());
 110   if (!_memory.initialize(rs, c_size)) {
 111     return false;
 112   }
 113 
 114   on_code_mapping(_memory.low(), _memory.committed_size());
 115   _number_of_committed_segments = size_to_segments(_memory.committed_size());
 116   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
 117   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 118   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
 119   const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
 120   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 121 
 122   // reserve space for _segmap
 123   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 124     return false;
 125   }
 126 
 127   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 128 


 173 void CodeHeap::clear() {
 174   _next_segment = 0;
 175   mark_segmap_as_free(0, _number_of_committed_segments);
 176 }
 177 
 178 
 179 void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
 180   size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
 181   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 182 
 183   // First check if we can satify request from freelist
 184   debug_only(verify());
 185   HeapBlock* block = search_freelist(number_of_segments, is_critical);
 186   debug_only(if (VerifyCodeCacheOften) verify());
 187   if (block != NULL) {
 188     assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
 189     assert(!block->free(), "must be marked free");
 190 #ifdef ASSERT
 191     memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
 192 #endif
 193     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 194     return block->allocated_space();
 195   }
 196 
 197   // Ensure minimum size for allocation to the heap.
 198   if (number_of_segments < CodeCacheMinBlockLength) {
 199     number_of_segments = CodeCacheMinBlockLength;
 200   }
 201 
 202   if (!is_critical) {
 203     // Make sure the allocation fits in the unallocated heap without using
 204     // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
 205     if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
 206       // Fail allocation
 207       return NULL;
 208     }
 209   }
 210 
 211   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
 212     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
 213     HeapBlock* b =  block_at(_next_segment);
 214     b->initialize(number_of_segments);
 215     _next_segment += number_of_segments;
 216 #ifdef ASSERT
 217     memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
 218 #endif
 219     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 220     return b->allocated_space();
 221   } else {
 222     return NULL;
 223   }
 224 }
 225 
 226 
 227 void CodeHeap::deallocate(void* p) {
 228   assert(p == find_start(p), "illegal deallocation");
 229   // Find start of HeapBlock
 230   HeapBlock* b = (((HeapBlock *)p) - 1);
 231   assert(b->allocated_space() == p, "sanity check");
 232 #ifdef ASSERT
 233   memset((void *)b->allocated_space(),
 234          badCodeHeapFreeVal,
 235          segments_to_size(b->length()) - sizeof(HeapBlock));
 236 #endif
 237   add_to_freelist(b);
 238 
 239   debug_only(if (VerifyCodeCacheOften) verify());


src/share/vm/memory/heap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File