1 /* 2 * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/epsilon/epsilonHeap.hpp" 27 #include "gc/epsilon/epsilonMemoryPool.hpp" 28 #include "gc/epsilon/epsilonThreadLocalData.hpp" 29 #include "gc/shared/gcArguments.hpp" 30 #include "gc/shared/locationPrinter.inline.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/allocation.inline.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "runtime/atomic.hpp" 36 #include "runtime/globals.hpp" 37 38 jint EpsilonHeap::initialize() { 39 size_t align = HeapAlignment; 40 size_t init_byte_size = align_up(InitialHeapSize, align); 41 size_t max_byte_size = align_up(MaxHeapSize, align); 42 43 // Initialize backing storage 44 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align); 45 _virtual_space.initialize(heap_rs, init_byte_size); 46 47 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); 48 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary()); 49 50 initialize_reserved_region(heap_rs); 51 52 _space = new ContiguousSpace(); 53 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true); 54 55 // Precompute hot fields 56 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize)); 57 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep); 58 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps); 59 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC; 60 61 // Enable monitoring 62 _monitoring_support = new EpsilonMonitoringSupport(this); 63 _last_counter_update = 0; 64 _last_heap_print = 0; 65 66 // Install barrier set 67 BarrierSet::set_barrier_set(new EpsilonBarrierSet()); 68 69 // All done, print out the configuration 70 if (init_byte_size != max_byte_size) { 71 log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M", 72 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M); 73 } else { 74 log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M); 75 } 76 77 if (UseTLAB) { 78 log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K); 79 if (EpsilonElasticTLAB) { 80 log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity); 81 } 82 if (EpsilonElasticTLABDecay) { 83 log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime); 84 } 85 } else { 86 log_info(gc)("Not using TLAB allocation"); 87 } 88 89 return JNI_OK; 90 } 91 92 void EpsilonHeap::post_initialize() { 93 CollectedHeap::post_initialize(); 94 } 95 96 void EpsilonHeap::initialize_serviceability() { 97 _pool = new EpsilonMemoryPool(this); 98 _memory_manager.add_pool(_pool); 99 } 100 101 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() { 102 GrowableArray<GCMemoryManager*> memory_managers(1); 103 memory_managers.append(&_memory_manager); 104 return memory_managers; 105 } 106 107 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() { 108 GrowableArray<MemoryPool*> memory_pools(1); 109 memory_pools.append(_pool); 110 return memory_pools; 111 } 112 113 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const { 114 // Return max allocatable TLAB size, and let allocation path figure out 115 // the actual allocation size. Note: result should be in bytes. 116 return _max_tlab_size * HeapWordSize; 117 } 118 119 EpsilonHeap* EpsilonHeap::heap() { 120 CollectedHeap* heap = Universe::heap(); 121 assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()"); 122 assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap"); 123 return (EpsilonHeap*)heap; 124 } 125 126 HeapWord* EpsilonHeap::allocate_work(size_t size) { 127 assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size); 128 129 HeapWord* res = _space->par_allocate(size); 130 131 while (res == NULL) { 132 // Allocation failed, attempt expansion, and retry: 133 MutexLocker ml(Heap_lock); 134 135 size_t space_left = max_capacity() - capacity(); 136 size_t want_space = MAX2(size, EpsilonMinHeapExpand); 137 138 if (want_space < space_left) { 139 // Enough space to expand in bulk: 140 bool expand = _virtual_space.expand_by(want_space); 141 assert(expand, "Should be able to expand"); 142 } else if (size < space_left) { 143 // No space to expand in bulk, and this allocation is still possible, 144 // take all the remaining space: 145 bool expand = _virtual_space.expand_by(space_left); 146 assert(expand, "Should be able to expand"); 147 } else { 148 // No space left: 149 return NULL; 150 } 151 152 _space->set_end((HeapWord *) _virtual_space.high()); 153 res = _space->par_allocate(size); 154 } 155 156 size_t used = _space->used(); 157 158 // Allocation successful, update counters 159 { 160 size_t last = _last_counter_update; 161 if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) { 162 _monitoring_support->update_counters(); 163 } 164 } 165 166 // ...and print the occupancy line, if needed 167 { 168 size_t last = _last_heap_print; 169 if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) { 170 print_heap_info(used); 171 print_metaspace_info(); 172 } 173 } 174 175 assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res)); 176 return res; 177 } 178 179 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size, 180 size_t requested_size, 181 size_t* actual_size) { 182 Thread* thread = Thread::current(); 183 184 // Defaults in case elastic paths are not taken 185 bool fits = true; 186 size_t size = requested_size; 187 size_t ergo_tlab = requested_size; 188 int64_t time = 0; 189 190 if (EpsilonElasticTLAB) { 191 ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread); 192 193 if (EpsilonElasticTLABDecay) { 194 int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread); 195 time = (int64_t) os::javaTimeNanos(); 196 197 assert(last_time <= time, "time should be monotonic"); 198 199 // If the thread had not allocated recently, retract the ergonomic size. 200 // This conserves memory when the thread had initial burst of allocations, 201 // and then started allocating only sporadically. 202 if (last_time != 0 && (time - last_time > _decay_time_ns)) { 203 ergo_tlab = 0; 204 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); 205 } 206 } 207 208 // If we can fit the allocation under current TLAB size, do so. 209 // Otherwise, we want to elastically increase the TLAB size. 210 fits = (requested_size <= ergo_tlab); 211 if (!fits) { 212 size = (size_t) (ergo_tlab * EpsilonTLABElasticity); 213 } 214 } 215 216 // Always honor boundaries 217 size = clamp(size, min_size, _max_tlab_size); 218 219 // Always honor alignment 220 size = align_up(size, MinObjAlignment); 221 222 // Check that adjustments did not break local and global invariants 223 assert(is_object_aligned(size), 224 "Size honors object alignment: " SIZE_FORMAT, size); 225 assert(min_size <= size, 226 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size); 227 assert(size <= _max_tlab_size, 228 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size); 229 assert(size <= CollectedHeap::max_tlab_size(), 230 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size()); 231 232 if (log_is_enabled(Trace, gc)) { 233 ResourceMark rm; 234 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT 235 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K", 236 thread->name(), 237 requested_size * HeapWordSize / K, 238 min_size * HeapWordSize / K, 239 _max_tlab_size * HeapWordSize / K, 240 ergo_tlab * HeapWordSize / K, 241 size * HeapWordSize / K); 242 } 243 244 // All prepared, let's do it! 245 HeapWord* res = allocate_work(size); 246 247 if (res != NULL) { 248 // Allocation successful 249 *actual_size = size; 250 if (EpsilonElasticTLABDecay) { 251 EpsilonThreadLocalData::set_last_tlab_time(thread, time); 252 } 253 if (EpsilonElasticTLAB && !fits) { 254 // If we requested expansion, this is our new ergonomic TLAB size 255 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size); 256 } 257 } else { 258 // Allocation failed, reset ergonomics to try and fit smaller TLABs 259 if (EpsilonElasticTLAB) { 260 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); 261 } 262 } 263 264 return res; 265 } 266 267 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) { 268 *gc_overhead_limit_was_exceeded = false; 269 return allocate_work(size); 270 } 271 272 void EpsilonHeap::collect(GCCause::Cause cause) { 273 switch (cause) { 274 case GCCause::_metadata_GC_threshold: 275 case GCCause::_metadata_GC_clear_soft_refs: 276 // Receiving these causes means the VM itself entered the safepoint for metadata collection. 277 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would 278 // re-enter the safepoint again very soon. 279 280 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint"); 281 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause)); 282 MetaspaceGC::compute_new_size(); 283 print_metaspace_info(); 284 break; 285 default: 286 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause)); 287 } 288 _monitoring_support->update_counters(); 289 } 290 291 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) { 292 collect(gc_cause()); 293 } 294 295 void EpsilonHeap::object_iterate(ObjectClosure *cl) { 296 _space->object_iterate(cl); 297 } 298 299 void EpsilonHeap::print_on(outputStream *st) const { 300 st->print_cr("Epsilon Heap"); 301 302 // Cast away constness: 303 ((VirtualSpace)_virtual_space).print_on(st); 304 305 st->print_cr("Allocation space:"); 306 _space->print_on(st); 307 308 MetaspaceUtils::print_on(st); 309 } 310 311 bool EpsilonHeap::print_location(outputStream* st, void* addr) const { 312 return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr); 313 } 314 315 void EpsilonHeap::print_tracing_info() const { 316 print_heap_info(used()); 317 print_metaspace_info(); 318 } 319 320 void EpsilonHeap::print_heap_info(size_t used) const { 321 size_t reserved = max_capacity(); 322 size_t committed = capacity(); 323 324 if (reserved != 0) { 325 log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " 326 SIZE_FORMAT "%s (%.2f%%) used", 327 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), 328 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), 329 committed * 100.0 / reserved, 330 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 331 used * 100.0 / reserved); 332 } else { 333 log_info(gc)("Heap: no reliable data"); 334 } 335 } 336 337 void EpsilonHeap::print_metaspace_info() const { 338 size_t reserved = MetaspaceUtils::reserved_bytes(); 339 size_t committed = MetaspaceUtils::committed_bytes(); 340 size_t used = MetaspaceUtils::used_bytes(); 341 342 if (reserved != 0) { 343 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " 344 SIZE_FORMAT "%s (%.2f%%) used", 345 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), 346 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), 347 committed * 100.0 / reserved, 348 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 349 used * 100.0 / reserved); 350 } else { 351 log_info(gc, metaspace)("Metaspace: no reliable data"); 352 } 353 }