1 /*
   2  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/epsilon/epsilonHeap.hpp"
  26 #include "gc/epsilon/epsilonMemoryPool.hpp"
  27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "memory/resourceArea.hpp"
  31 
  32 jint EpsilonHeap::initialize() {
  33   size_t init_byte_size = _policy->initial_heap_byte_size();
  34   size_t max_byte_size = _policy->max_heap_byte_size();
  35   size_t align = _policy->heap_alignment();
  36 
  37   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,  align);
  38   _virtual_space.initialize(heap_rs, init_byte_size);
  39 
  40   MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
  41   MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  42 
  43   initialize_reserved_region(reserved_region.start(), reserved_region.end());
  44 
  45   _space = new ContiguousSpace();
  46   _space->initialize(committed_region, true, true);
  47 
  48   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  49 
  50   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), EpsilonMaxTLABSize / HeapWordSize);
  51 
  52   _monitoring_support = new EpsilonMonitoringSupport(this);
  53   _last_counter_update = 0;
  54   _last_heap_print = 0;
  55 
  56   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  57   _step_heap_print = (EpsilonPrintHeapStep == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapStep);
  58 
  59   if (init_byte_size != max_byte_size) {
  60     log_info(gc)("Initialized with " SIZE_FORMAT "M heap, resizeable to up to " SIZE_FORMAT "M heap with " SIZE_FORMAT "M steps",
  61                  init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
  62   } else {
  63     log_info(gc)("Initialized with " SIZE_FORMAT "M non-resizeable heap", init_byte_size / M);
  64   }
  65   if (UseTLAB) {
  66     log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K, elasticity %.2f",
  67                                 _max_tlab_size * HeapWordSize / K,
  68                                 EpsilonTLABElasticity);
  69   } else {
  70     log_info(gc)("Not using TLAB allocation");
  71   }
  72 
  73   return JNI_OK;
  74 }
  75 
  76 void EpsilonHeap::post_initialize() {
  77   CollectedHeap::post_initialize();
  78 }
  79 
  80 void EpsilonHeap::initialize_serviceability() {
  81   _pool = new EpsilonMemoryPool(this);
  82   _memory_manager.add_pool(_pool);
  83 }
  84 
  85 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
  86   GrowableArray<GCMemoryManager*> memory_managers(1);
  87   memory_managers.append(&_memory_manager);
  88   return memory_managers;
  89 }
  90 
  91 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
  92   GrowableArray<MemoryPool*> memory_pools(1);
  93   memory_pools.append(_pool);
  94   return memory_pools;
  95 }
  96 
  97 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
  98   // Return max allocatable TLAB size, and let allocation path figure out
  99   // the actual TLAB allocation size.
 100   return _max_tlab_size;
 101 }
 102 
 103 EpsilonHeap* EpsilonHeap::heap() {
 104   CollectedHeap* heap = Universe::heap();
 105   assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
 106   assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
 107   return (EpsilonHeap*)heap;
 108 }
 109 
 110 HeapWord* EpsilonHeap::allocate_work(size_t size) {
 111   HeapWord* res = _space->par_allocate(size);
 112 
 113   while (res == NULL) {
 114     // Allocation failed, attempt expansion, and retry:
 115     MutexLockerEx ml(Heap_lock);
 116 
 117     size_t space_left = max_capacity() - capacity();
 118     size_t want_space = MAX2(size, EpsilonMinHeapExpand);
 119 
 120     if (want_space < space_left) {
 121       // Enough space to expand in bulk:
 122       bool expand = _virtual_space.expand_by(want_space);
 123       assert(expand, "Should be able to expand");
 124     } else if (size < space_left) {
 125       // No space to expand in bulk, and this allocation is still possible,
 126       // take all the space left:
 127       bool expand = _virtual_space.expand_by(space_left);
 128       assert(expand, "Should be able to expand");
 129     } else {
 130       // No space left:
 131       return NULL;
 132     }
 133 
 134     _space->set_end((HeapWord *) _virtual_space.high());
 135     res = _space->par_allocate(size);
 136   }
 137 
 138   size_t used = _space->used();
 139   if (used - _last_counter_update >= _step_counter_update) {
 140     _last_counter_update = used;
 141     _monitoring_support->update_counters();
 142   }
 143 
 144   if (used - _last_heap_print >= _step_heap_print) {
 145     log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 146                  max_capacity() / M, capacity() / M, used / M);
 147     _last_heap_print = used;
 148   }
 149 
 150   return res;
 151 }
 152 
 153 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
 154                                          size_t requested_size,
 155                                          size_t* actual_size) {
 156   size_t ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(Thread::current());
 157 
 158   bool fits = (requested_size <= ergo_tlab);
 159 
 160   // If we can fit the allocation under current TLAB size, do so.
 161   // Otherwise, we want to elastically increase the TLAB size.
 162   size_t size = fits ? requested_size : (size_t)(ergo_tlab * EpsilonTLABElasticity);
 163 
 164   // Honor boundaries
 165   size = MAX2(min_size, MIN2(_max_tlab_size, size));
 166 
 167   if (log_is_enabled(Trace, gc)) {
 168     ResourceMark rm;
 169     log_trace(gc)(
 170             "TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
 171                     "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
 172             Thread::current()->name(),
 173             requested_size * HeapWordSize / K,
 174             min_size       * HeapWordSize / K,
 175             _max_tlab_size * HeapWordSize / K,
 176             ergo_tlab      * HeapWordSize / K,
 177             size           * HeapWordSize / K);
 178   }
 179 
 180   HeapWord* res = allocate_work(size);
 181   if (res != NULL) {
 182     *actual_size = size;
 183 
 184     // Allocation successful, this our new TLAB size, if we requested expansion
 185     if (!fits) {
 186       EpsilonThreadLocalData::set_ergo_tlab_size(Thread::current(), size);
 187     }
 188   } else {
 189     // Allocation failed, reset ergonomics to try an fit smaller TLABs
 190     EpsilonThreadLocalData::set_ergo_tlab_size(Thread::current(), 0);
 191   }
 192 
 193   return res;
 194 }
 195 
 196 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 197   *gc_overhead_limit_was_exceeded = false;
 198   return allocate_work(size);
 199 }
 200 
 201 void EpsilonHeap::collect(GCCause::Cause cause) {
 202   log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
 203   _monitoring_support->update_counters();
 204 }
 205 
 206 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 207   log_info(gc)("Full GC request for \"%s\" is ignored", GCCause::to_string(gc_cause()));
 208   _monitoring_support->update_counters();
 209 }
 210 
 211 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 212   _space->safe_object_iterate(cl);
 213 }
 214 
 215 void EpsilonHeap::print_on(outputStream *st) const {
 216   st->print_cr("Epsilon Heap");
 217 
 218   // Cast away constness:
 219   ((VirtualSpace)_virtual_space).print_on(st);
 220 
 221   st->print_cr("Allocation space:");
 222   _space->print_on(st);
 223 }
 224 
 225 void EpsilonHeap::print_tracing_info() const {
 226   Log(gc) log;
 227   size_t allocated_kb = used() / K;
 228   log.info("Total allocated: " SIZE_FORMAT " KB",
 229            allocated_kb);
 230   log.info("Average allocation rate: " SIZE_FORMAT " KB/sec",
 231            (size_t)(allocated_kb * NANOSECS_PER_SEC / os::elapsed_counter()));
 232 }