1 /* 2 * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/epsilon/epsilonHeap.hpp" 26 #include "gc/epsilon/epsilonMemoryPool.hpp" 27 #include "gc/epsilon/epsilonThreadLocalData.hpp" 28 #include "memory/allocation.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "memory/resourceArea.hpp" 31 32 jint EpsilonHeap::initialize() { 33 size_t align = _policy->heap_alignment(); 34 size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align); 35 size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align); 36 37 // Initialize backing storage 38 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align); 39 _virtual_space.initialize(heap_rs, init_byte_size); 40 41 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); 42 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary()); 43 44 initialize_reserved_region(reserved_region.start(), reserved_region.end()); 45 46 _space = new ContiguousSpace(); 47 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true); 48 49 // Precompute hot fields 50 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize)); 51 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep); 52 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps); 53 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC; 54 55 // Enable monitoring 56 _monitoring_support = new EpsilonMonitoringSupport(this); 57 _last_counter_update = 0; 58 _last_heap_print = 0; 59 60 // Install barrier set 61 BarrierSet::set_barrier_set(new EpsilonBarrierSet()); 62 63 // All done, print out the configuration 64 if (init_byte_size != max_byte_size) { 65 log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M", 66 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M); 67 } else { 68 log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M); 69 } 70 71 if (UseTLAB) { 72 log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K); 73 if (EpsilonElasticTLAB) { 74 log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity); 75 } 76 if (EpsilonElasticTLABDecay) { 77 log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime); 78 } 79 } else { 80 log_info(gc)("Not using TLAB allocation"); 81 } 82 83 return JNI_OK; 84 } 85 86 void EpsilonHeap::post_initialize() { 87 CollectedHeap::post_initialize(); 88 } 89 90 void EpsilonHeap::initialize_serviceability() { 91 _pool = new EpsilonMemoryPool(this); 92 _memory_manager.add_pool(_pool); 93 } 94 95 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() { 96 GrowableArray<GCMemoryManager*> memory_managers(1); 97 memory_managers.append(&_memory_manager); 98 return memory_managers; 99 } 100 101 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() { 102 GrowableArray<MemoryPool*> memory_pools(1); 103 memory_pools.append(_pool); 104 return memory_pools; 105 } 106 107 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const { 108 // Return max allocatable TLAB size, and let allocation path figure out 109 // the actual TLAB allocation size. 110 return _max_tlab_size; 111 } 112 113 EpsilonHeap* EpsilonHeap::heap() { 114 CollectedHeap* heap = Universe::heap(); 115 assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()"); 116 assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap"); 117 return (EpsilonHeap*)heap; 118 } 119 120 HeapWord* EpsilonHeap::allocate_work(size_t size) { 121 assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size); 122 123 HeapWord* res = _space->par_allocate(size); 124 125 while (res == NULL) { 126 // Allocation failed, attempt expansion, and retry: 127 MutexLockerEx ml(Heap_lock); 128 129 size_t space_left = max_capacity() - capacity(); 130 size_t want_space = MAX2(size, EpsilonMinHeapExpand); 131 132 if (want_space < space_left) { 133 // Enough space to expand in bulk: 134 bool expand = _virtual_space.expand_by(want_space); 135 assert(expand, "Should be able to expand"); 136 } else if (size < space_left) { 137 // No space to expand in bulk, and this allocation is still possible, 138 // take all the remaining space: 139 bool expand = _virtual_space.expand_by(space_left); 140 assert(expand, "Should be able to expand"); 141 } else { 142 // No space left: 143 return NULL; 144 } 145 146 _space->set_end((HeapWord *) _virtual_space.high()); 147 res = _space->par_allocate(size); 148 } 149 150 size_t used = _space->used(); 151 152 // Allocation successful, update counters 153 { 154 size_t last = _last_counter_update; 155 if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) { 156 _monitoring_support->update_counters(); 157 } 158 } 159 160 // ...and print the occupancy line, if needed 161 { 162 size_t last = _last_heap_print; 163 if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) { 164 print_heap_info(used); 165 print_metaspace_info(); 166 } 167 } 168 169 assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res)); 170 return res; 171 } 172 173 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size, 174 size_t requested_size, 175 size_t* actual_size) { 176 Thread* thread = Thread::current(); 177 178 // Defaults in case elastic paths are not taken 179 bool fits = true; 180 size_t size = requested_size; 181 size_t ergo_tlab = requested_size; 182 int64_t time = 0; 183 184 if (EpsilonElasticTLAB) { 185 ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread); 186 187 if (EpsilonElasticTLABDecay) { 188 int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread); 189 time = (int64_t) os::javaTimeNanos(); 190 191 assert(last_time <= time, "time should be monotonic"); 192 193 // If the thread had not allocated recently, retract the ergonomic size. 194 // This conserves memory when the thread had initial burst of allocations, 195 // and then started allocating only sporadically. 196 if (last_time != 0 && (time - last_time > _decay_time_ns)) { 197 ergo_tlab = 0; 198 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); 199 } 200 } 201 202 // If we can fit the allocation under current TLAB size, do so. 203 // Otherwise, we want to elastically increase the TLAB size. 204 fits = (requested_size <= ergo_tlab); 205 if (!fits) { 206 size = (size_t) (ergo_tlab * EpsilonTLABElasticity); 207 } 208 } 209 210 // Always honor boundaries 211 size = MAX2(min_size, MIN2(_max_tlab_size, size)); 212 213 // Always honor alignment 214 size = align_up(size, MinObjAlignment); 215 216 // Check that adjustments did not break local and global invariants 217 assert(is_object_aligned(size), 218 "Size honors object alignment: " SIZE_FORMAT, size); 219 assert(min_size <= size, 220 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size); 221 assert(size <= _max_tlab_size, 222 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size); 223 assert(size <= CollectedHeap::max_tlab_size(), 224 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size()); 225 226 if (log_is_enabled(Trace, gc)) { 227 ResourceMark rm; 228 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT 229 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K", 230 thread->name(), 231 requested_size * HeapWordSize / K, 232 min_size * HeapWordSize / K, 233 _max_tlab_size * HeapWordSize / K, 234 ergo_tlab * HeapWordSize / K, 235 size * HeapWordSize / K); 236 } 237 238 // All prepared, let's do it! 239 HeapWord* res = allocate_work(size); 240 241 if (res != NULL) { 242 // Allocation successful 243 *actual_size = size; 244 if (EpsilonElasticTLABDecay) { 245 EpsilonThreadLocalData::set_last_tlab_time(thread, time); 246 } 247 if (EpsilonElasticTLAB && !fits) { 248 // If we requested expansion, this is our new ergonomic TLAB size 249 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size); 250 } 251 } else { 252 // Allocation failed, reset ergonomics to try and fit smaller TLABs 253 if (EpsilonElasticTLAB) { 254 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); 255 } 256 } 257 258 return res; 259 } 260 261 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) { 262 *gc_overhead_limit_was_exceeded = false; 263 return allocate_work(size); 264 } 265 266 void EpsilonHeap::collect(GCCause::Cause cause) { 267 switch (cause) { 268 case GCCause::_metadata_GC_threshold: 269 case GCCause::_metadata_GC_clear_soft_refs: 270 // Receiving these causes means the VM itself entered the safepoint for metadata collection. 271 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would 272 // re-enter the safepoint again very soon. 273 274 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint"); 275 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause)); 276 MetaspaceGC::compute_new_size(); 277 print_metaspace_info(); 278 break; 279 default: 280 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause)); 281 } 282 _monitoring_support->update_counters(); 283 } 284 285 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) { 286 collect(gc_cause()); 287 } 288 289 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) { 290 _space->safe_object_iterate(cl); 291 } 292 293 void EpsilonHeap::print_on(outputStream *st) const { 294 st->print_cr("Epsilon Heap"); 295 296 // Cast away constness: 297 ((VirtualSpace)_virtual_space).print_on(st); 298 299 st->print_cr("Allocation space:"); 300 _space->print_on(st); 301 302 MetaspaceUtils::print_on(st); 303 } 304 305 void EpsilonHeap::print_tracing_info() const { 306 print_heap_info(used()); 307 print_metaspace_info(); 308 } 309 310 void EpsilonHeap::print_heap_info(size_t used) const { 311 size_t reserved = max_capacity(); 312 size_t committed = capacity(); 313 314 if (reserved != 0) { 315 log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " 316 SIZE_FORMAT "%s (%.2f%%) used", 317 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), 318 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), 319 committed * 100.0 / reserved, 320 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 321 used * 100.0 / reserved); 322 } else { 323 log_info(gc)("Heap: no reliable data"); 324 } 325 } 326 327 void EpsilonHeap::print_metaspace_info() const { 328 size_t reserved = MetaspaceUtils::reserved_bytes(); 329 size_t committed = MetaspaceUtils::committed_bytes(); 330 size_t used = MetaspaceUtils::used_bytes(); 331 332 if (reserved != 0) { 333 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " 334 SIZE_FORMAT "%s (%.2f%%) used", 335 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), 336 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), 337 committed * 100.0 / reserved, 338 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 339 used * 100.0 / reserved); 340 } else { 341 log_info(gc, metaspace)("Metaspace: no reliable data"); 342 } 343 }