1 /*
   2  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/epsilon/epsilonHeap.hpp"
  26 #include "gc/epsilon/epsilonMemoryPool.hpp"
  27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  28 #include "memory/allocation.hpp"
  29 #include "memory/allocation.inline.hpp"
  30 #include "memory/resourceArea.hpp"
  31 
  32 jint EpsilonHeap::initialize() {
  33   size_t init_byte_size = _policy->initial_heap_byte_size();
  34   size_t max_byte_size  = _policy->max_heap_byte_size();
  35 
  36   // Initialize backing storage
  37   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, _policy->heap_alignment());
  38   _virtual_space.initialize(heap_rs, init_byte_size);
  39 
  40   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
  41   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  42 
  43   initialize_reserved_region(reserved_region.start(), reserved_region.end());
  44 
  45   _space = new ContiguousSpace();
  46   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
  47 
  48   // Precompute hot fields
  49   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), EpsilonMaxTLABSize / HeapWordSize);
  50   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  51   _step_heap_print = (EpsilonPrintHeapStep == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapStep);
  52   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
  53 
  54   // Enable monitoring
  55   _monitoring_support = new EpsilonMonitoringSupport(this);
  56   _last_counter_update = 0;
  57   _last_heap_print = 0;
  58 
  59   // Install barrier set
  60   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  61 
  62   // All done, print out the configuration
  63   if (init_byte_size != max_byte_size) {
  64     log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
  65                  init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
  66   } else {
  67     log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
  68   }
  69 
  70   if (UseTLAB) {
  71     log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
  72     if (EpsilonElasticTLAB) {
  73       log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
  74     }
  75     if (EpsilonElasticTLABDecay) {
  76       log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
  77     }
  78   } else {
  79     log_info(gc)("Not using TLAB allocation");
  80   }
  81 
  82   return JNI_OK;
  83 }
  84 
  85 void EpsilonHeap::post_initialize() {
  86   CollectedHeap::post_initialize();
  87 }
  88 
  89 void EpsilonHeap::initialize_serviceability() {
  90   _pool = new EpsilonMemoryPool(this);
  91   _memory_manager.add_pool(_pool);
  92 }
  93 
  94 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
  95   GrowableArray<GCMemoryManager*> memory_managers(1);
  96   memory_managers.append(&_memory_manager);
  97   return memory_managers;
  98 }
  99 
 100 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
 101   GrowableArray<MemoryPool*> memory_pools(1);
 102   memory_pools.append(_pool);
 103   return memory_pools;
 104 }
 105 
 106 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 107   // Return max allocatable TLAB size, and let allocation path figure out
 108   // the actual TLAB allocation size.
 109   return _max_tlab_size;
 110 }
 111 
 112 EpsilonHeap* EpsilonHeap::heap() {
 113   CollectedHeap* heap = Universe::heap();
 114   assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
 115   assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
 116   return (EpsilonHeap*)heap;
 117 }
 118 
 119 HeapWord* EpsilonHeap::allocate_work(size_t size) {
 120   HeapWord* res = _space->par_allocate(size);
 121 
 122   while (res == NULL) {
 123     // Allocation failed, attempt expansion, and retry:
 124     MutexLockerEx ml(Heap_lock);
 125 
 126     size_t space_left = max_capacity() - capacity();
 127     size_t want_space = MAX2(size, EpsilonMinHeapExpand);
 128 
 129     if (want_space < space_left) {
 130       // Enough space to expand in bulk:
 131       bool expand = _virtual_space.expand_by(want_space);
 132       assert(expand, "Should be able to expand");
 133     } else if (size < space_left) {
 134       // No space to expand in bulk, and this allocation is still possible,
 135       // take all the remaining space:
 136       bool expand = _virtual_space.expand_by(space_left);
 137       assert(expand, "Should be able to expand");
 138     } else {
 139       // No space left:
 140       return NULL;
 141     }
 142 
 143     _space->set_end((HeapWord *) _virtual_space.high());
 144     res = _space->par_allocate(size);
 145   }
 146 
 147   // Allocation successful, update counters
 148   size_t used = _space->used();
 149   if (used - _last_counter_update >= _step_counter_update) {
 150     _last_counter_update = used;
 151     _monitoring_support->update_counters();
 152   }
 153 
 154   // ...and print the occupancy line, if needed
 155   if (used - _last_heap_print >= _step_heap_print) {
 156     log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 157                  max_capacity() / M, capacity() / M, used / M);
 158     _last_heap_print = used;
 159   }
 160 
 161   return res;
 162 }
 163 
 164 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
 165                                          size_t requested_size,
 166                                          size_t* actual_size) {
 167   Thread* thread = Thread::current();
 168 
 169   // Defaults in case elastic paths are not taken
 170   bool fits = true;
 171   size_t size = requested_size;
 172   size_t ergo_tlab = requested_size;
 173   int64_t time = 0;
 174 
 175   if (EpsilonElasticTLAB) {
 176     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
 177 
 178     if (EpsilonElasticTLABDecay) {
 179       int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
 180       time = (int64_t) os::javaTimeNanos();
 181 
 182       assert(last_time <= time, "time should be monotonic");
 183 
 184       // If the thread had not allocated recently, retract the ergonomic size.
 185       // This conserves memory when the thread had initial burst of allocations,
 186       // and then started allocating only sporadically.
 187       if (last_time != 0 && (time - last_time > _decay_time_ns)) {
 188         ergo_tlab = 0;
 189         EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 190       }
 191     }
 192 
 193     // If we can fit the allocation under current TLAB size, do so.
 194     // Otherwise, we want to elastically increase the TLAB size.
 195     fits = (requested_size <= ergo_tlab);
 196     if (!fits) {
 197       size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
 198     }
 199   }
 200 
 201   // Always honor boundaries
 202   size = MAX2(min_size, MIN2(_max_tlab_size, size));
 203 
 204   if (log_is_enabled(Trace, gc)) {
 205     ResourceMark rm;
 206     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
 207                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
 208                   thread->name(),
 209                   requested_size * HeapWordSize / K,
 210                   min_size * HeapWordSize / K,
 211                   _max_tlab_size * HeapWordSize / K,
 212                   ergo_tlab * HeapWordSize / K,
 213                   size * HeapWordSize / K);
 214   }
 215 
 216   // All prepared, let's do it!
 217   HeapWord* res = allocate_work(size);
 218 
 219   if (res != NULL) {
 220     // Allocation successful
 221     *actual_size = size;
 222     if (EpsilonElasticTLABDecay) {
 223       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
 224     }
 225     if (EpsilonElasticTLAB && !fits) {
 226       // If we requested expansion, this is our new ergonomic TLAB size
 227       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
 228     }
 229   } else {
 230     // Allocation failed, reset ergonomics to try and fit smaller TLABs
 231     if (EpsilonElasticTLAB) {
 232       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 233     }
 234   }
 235 
 236   return res;
 237 }
 238 
 239 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 240   *gc_overhead_limit_was_exceeded = false;
 241   return allocate_work(size);
 242 }
 243 
 244 void EpsilonHeap::collect(GCCause::Cause cause) {
 245   log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
 246   _monitoring_support->update_counters();
 247 }
 248 
 249 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 250   log_info(gc)("Full GC request for \"%s\" is ignored", GCCause::to_string(gc_cause()));
 251   _monitoring_support->update_counters();
 252 }
 253 
 254 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 255   _space->safe_object_iterate(cl);
 256 }
 257 
 258 void EpsilonHeap::print_on(outputStream *st) const {
 259   st->print_cr("Epsilon Heap");
 260 
 261   // Cast away constness:
 262   ((VirtualSpace)_virtual_space).print_on(st);
 263 
 264   st->print_cr("Allocation space:");
 265   _space->print_on(st);
 266 }
 267 
 268 void EpsilonHeap::print_tracing_info() const {
 269   Log(gc) log;
 270   size_t allocated_kb = used() / K;
 271   log.info("Total allocated: " SIZE_FORMAT " KB",
 272            allocated_kb);
 273   log.info("Average allocation rate: " SIZE_FORMAT " KB/sec",
 274            (size_t)(allocated_kb * NANOSECS_PER_SEC / os::elapsed_counter()));
 275 }