< prev index next >

src/share/vm/memory/heap.cpp

Print this page




  95 }
  96 
  97 
  98 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
  99   assert(rs.size() >= committed_size, "reserved < committed");
 100   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
 101   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
 102 
 103   _segment_size      = segment_size;
 104   _log2_segment_size = exact_log2(segment_size);
 105 
 106   // Reserve and initialize space for _memory.
 107   size_t page_size = os::vm_page_size();
 108   if (os::can_execute_large_page_memory()) {
 109     const size_t min_pages = 8;
 110     page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
 111                      os::page_size_for_region_aligned(rs.size(), min_pages));
 112   }
 113 
 114   const size_t granularity = os::vm_allocation_granularity();
 115   const size_t c_size = align_size_up(committed_size, page_size);
 116 
 117   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
 118                        rs.base(), rs.size());
 119   if (!_memory.initialize(rs, c_size)) {
 120     return false;
 121   }
 122 
 123   on_code_mapping(_memory.low(), _memory.committed_size());
 124   _number_of_committed_segments = size_to_segments(_memory.committed_size());
 125   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
 126   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 127   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
 128   const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
 129   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 130 
 131   // reserve space for _segmap
 132   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 133     return false;
 134   }
 135 
 136   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 137 
 138   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
 139   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
 140   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
 141 
 142   // initialize remaining instance variables
 143   clear();
 144   return true;
 145 }
 146 
 147 
 148 bool CodeHeap::expand_by(size_t size) {




  95 }
  96 
  97 
  98 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
  99   assert(rs.size() >= committed_size, "reserved < committed");
 100   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
 101   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
 102 
 103   _segment_size      = segment_size;
 104   _log2_segment_size = exact_log2(segment_size);
 105 
 106   // Reserve and initialize space for _memory.
 107   size_t page_size = os::vm_page_size();
 108   if (os::can_execute_large_page_memory()) {
 109     const size_t min_pages = 8;
 110     page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
 111                      os::page_size_for_region_aligned(rs.size(), min_pages));
 112   }
 113 
 114   const size_t granularity = os::vm_allocation_granularity();
 115   const size_t c_size = align_up(committed_size, page_size);
 116 
 117   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
 118                        rs.base(), rs.size());
 119   if (!_memory.initialize(rs, c_size)) {
 120     return false;
 121   }
 122 
 123   on_code_mapping(_memory.low(), _memory.committed_size());
 124   _number_of_committed_segments = size_to_segments(_memory.committed_size());
 125   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
 126   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 127   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
 128   const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment);
 129   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 130 
 131   // reserve space for _segmap
 132   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 133     return false;
 134   }
 135 
 136   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 137 
 138   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
 139   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
 140   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
 141 
 142   // initialize remaining instance variables
 143   clear();
 144   return true;
 145 }
 146 
 147 
 148 bool CodeHeap::expand_by(size_t size) {


< prev index next >