1 /*
   2  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/epsilon/epsilonHeap.hpp"
  27 #include "gc/epsilon/epsilonInitLogger.hpp"
  28 #include "gc/epsilon/epsilonMemoryPool.hpp"
  29 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  30 #include "gc/shared/gcArguments.hpp"
  31 #include "gc/shared/locationPrinter.inline.hpp"
  32 #include "memory/allocation.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/globals.hpp"
  38 
  39 jint EpsilonHeap::initialize() {
  40   size_t align = HeapAlignment;
  41   size_t init_byte_size = align_up(InitialHeapSize, align);
  42   size_t max_byte_size  = align_up(MaxHeapSize, align);
  43 
  44   // Initialize backing storage
  45   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
  46   _virtual_space.initialize(heap_rs, init_byte_size);
  47 
  48   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
  49   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  50 
  51   initialize_reserved_region(heap_rs);
  52 
  53   _space = new ContiguousSpace();
  54   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
  55 
  56   // Precompute hot fields
  57   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
  58   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  59   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
  60   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
  61 
  62   // Enable monitoring
  63   _monitoring_support = new EpsilonMonitoringSupport(this);
  64   _last_counter_update = 0;
  65   _last_heap_print = 0;
  66 
  67   // Install barrier set
  68   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  69 
  70   // All done, print out the configuration
  71   EpsilonInitLogger::print();
  72 
  73   return JNI_OK;
  74 }
  75 
  76 void EpsilonHeap::post_initialize() {
  77   CollectedHeap::post_initialize();
  78 }
  79 
  80 void EpsilonHeap::initialize_serviceability() {
  81   _pool = new EpsilonMemoryPool(this);
  82   _memory_manager.add_pool(_pool);
  83 }
  84 
  85 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
  86   GrowableArray<GCMemoryManager*> memory_managers(1);
  87   memory_managers.append(&_memory_manager);
  88   return memory_managers;
  89 }
  90 
  91 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
  92   GrowableArray<MemoryPool*> memory_pools(1);
  93   memory_pools.append(_pool);
  94   return memory_pools;
  95 }
  96 
  97 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
  98   // Return max allocatable TLAB size, and let allocation path figure out
  99   // the actual allocation size. Note: result should be in bytes.
 100   return _max_tlab_size * HeapWordSize;
 101 }
 102 
 103 EpsilonHeap* EpsilonHeap::heap() {
 104   CollectedHeap* heap = Universe::heap();
 105   assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
 106   assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
 107   return (EpsilonHeap*)heap;
 108 }
 109 
 110 HeapWord* EpsilonHeap::allocate_work(size_t size) {
 111   assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
 112 
 113   HeapWord* res = _space->par_allocate(size);
 114 
 115   while (res == NULL) {
 116     // Allocation failed, attempt expansion, and retry:
 117     MutexLocker ml(Heap_lock);
 118 
 119     size_t space_left = max_capacity() - capacity();
 120     size_t want_space = MAX2(size, EpsilonMinHeapExpand);
 121 
 122     if (want_space < space_left) {
 123       // Enough space to expand in bulk:
 124       bool expand = _virtual_space.expand_by(want_space);
 125       assert(expand, "Should be able to expand");
 126     } else if (size < space_left) {
 127       // No space to expand in bulk, and this allocation is still possible,
 128       // take all the remaining space:
 129       bool expand = _virtual_space.expand_by(space_left);
 130       assert(expand, "Should be able to expand");
 131     } else {
 132       // No space left:
 133       return NULL;
 134     }
 135 
 136     _space->set_end((HeapWord *) _virtual_space.high());
 137     res = _space->par_allocate(size);
 138   }
 139 
 140   size_t used = _space->used();
 141 
 142   // Allocation successful, update counters
 143   {
 144     size_t last = _last_counter_update;
 145     if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
 146       _monitoring_support->update_counters();
 147     }
 148   }
 149 
 150   // ...and print the occupancy line, if needed
 151   {
 152     size_t last = _last_heap_print;
 153     if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
 154       print_heap_info(used);
 155       print_metaspace_info();
 156     }
 157   }
 158 
 159   assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
 160   return res;
 161 }
 162 
 163 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
 164                                          size_t requested_size,
 165                                          size_t* actual_size) {
 166   Thread* thread = Thread::current();
 167 
 168   // Defaults in case elastic paths are not taken
 169   bool fits = true;
 170   size_t size = requested_size;
 171   size_t ergo_tlab = requested_size;
 172   int64_t time = 0;
 173 
 174   if (EpsilonElasticTLAB) {
 175     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
 176 
 177     if (EpsilonElasticTLABDecay) {
 178       int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
 179       time = (int64_t) os::javaTimeNanos();
 180 
 181       assert(last_time <= time, "time should be monotonic");
 182 
 183       // If the thread had not allocated recently, retract the ergonomic size.
 184       // This conserves memory when the thread had initial burst of allocations,
 185       // and then started allocating only sporadically.
 186       if (last_time != 0 && (time - last_time > _decay_time_ns)) {
 187         ergo_tlab = 0;
 188         EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 189       }
 190     }
 191 
 192     // If we can fit the allocation under current TLAB size, do so.
 193     // Otherwise, we want to elastically increase the TLAB size.
 194     fits = (requested_size <= ergo_tlab);
 195     if (!fits) {
 196       size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
 197     }
 198   }
 199 
 200   // Always honor boundaries
 201   size = clamp(size, min_size, _max_tlab_size);
 202 
 203   // Always honor alignment
 204   size = align_up(size, MinObjAlignment);
 205 
 206   // Check that adjustments did not break local and global invariants
 207   assert(is_object_aligned(size),
 208          "Size honors object alignment: " SIZE_FORMAT, size);
 209   assert(min_size <= size,
 210          "Size honors min size: "  SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
 211   assert(size <= _max_tlab_size,
 212          "Size honors max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
 213   assert(size <= CollectedHeap::max_tlab_size(),
 214          "Size honors global max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
 215 
 216   if (log_is_enabled(Trace, gc)) {
 217     ResourceMark rm;
 218     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
 219                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
 220                   thread->name(),
 221                   requested_size * HeapWordSize / K,
 222                   min_size * HeapWordSize / K,
 223                   _max_tlab_size * HeapWordSize / K,
 224                   ergo_tlab * HeapWordSize / K,
 225                   size * HeapWordSize / K);
 226   }
 227 
 228   // All prepared, let's do it!
 229   HeapWord* res = allocate_work(size);
 230 
 231   if (res != NULL) {
 232     // Allocation successful
 233     *actual_size = size;
 234     if (EpsilonElasticTLABDecay) {
 235       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
 236     }
 237     if (EpsilonElasticTLAB && !fits) {
 238       // If we requested expansion, this is our new ergonomic TLAB size
 239       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
 240     }
 241   } else {
 242     // Allocation failed, reset ergonomics to try and fit smaller TLABs
 243     if (EpsilonElasticTLAB) {
 244       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 245     }
 246   }
 247 
 248   return res;
 249 }
 250 
 251 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 252   *gc_overhead_limit_was_exceeded = false;
 253   return allocate_work(size);
 254 }
 255 
 256 void EpsilonHeap::collect(GCCause::Cause cause) {
 257   switch (cause) {
 258     case GCCause::_metadata_GC_threshold:
 259     case GCCause::_metadata_GC_clear_soft_refs:
 260       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
 261       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
 262       // re-enter the safepoint again very soon.
 263 
 264       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
 265       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
 266       MetaspaceGC::compute_new_size();
 267       print_metaspace_info();
 268       break;
 269     default:
 270       log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
 271   }
 272   _monitoring_support->update_counters();
 273 }
 274 
 275 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 276   collect(gc_cause());
 277 }
 278 
 279 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
 280   _space->object_iterate(cl);
 281 }
 282 
 283 void EpsilonHeap::print_on(outputStream *st) const {
 284   st->print_cr("Epsilon Heap");
 285 
 286   // Cast away constness:
 287   ((VirtualSpace)_virtual_space).print_on(st);
 288 
 289   st->print_cr("Allocation space:");
 290   _space->print_on(st);
 291 
 292   MetaspaceUtils::print_on(st);
 293 }
 294 
 295 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
 296   return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
 297 }
 298 
 299 void EpsilonHeap::print_tracing_info() const {
 300   print_heap_info(used());
 301   print_metaspace_info();
 302 }
 303 
 304 void EpsilonHeap::print_heap_info(size_t used) const {
 305   size_t reserved  = max_capacity();
 306   size_t committed = capacity();
 307 
 308   if (reserved != 0) {
 309     log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 310                  SIZE_FORMAT "%s (%.2f%%) used",
 311             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 312             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 313             committed * 100.0 / reserved,
 314             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 315             used * 100.0 / reserved);
 316   } else {
 317     log_info(gc)("Heap: no reliable data");
 318   }
 319 }
 320 
 321 void EpsilonHeap::print_metaspace_info() const {
 322   size_t reserved  = MetaspaceUtils::reserved_bytes();
 323   size_t committed = MetaspaceUtils::committed_bytes();
 324   size_t used      = MetaspaceUtils::used_bytes();
 325 
 326   if (reserved != 0) {
 327     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 328                             SIZE_FORMAT "%s (%.2f%%) used",
 329             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 330             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 331             committed * 100.0 / reserved,
 332             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 333             used * 100.0 / reserved);
 334   } else {
 335     log_info(gc, metaspace)("Metaspace: no reliable data");
 336   }
 337 }