1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/shenandoahFreeSet.hpp" 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc/shenandoah/shenandoahPacer.hpp" 30 #include "runtime/atomic.hpp" 31 #include "runtime/mutexLocker.hpp" 32 33 /* 34 * In normal concurrent cycle, we have to pace the application to let GC finish. 35 * 36 * Here, we do not know how large would be the collection set, and what are the 37 * relative performances of the each stage in the concurrent cycle, and so we have to 38 * make some assumptions. 39 * 40 * For concurrent mark, there is no clear notion of progress. The moderately accurate 41 * and easy to get metric is the amount of live objects the mark had encountered. But, 42 * that does directly correlate with the used heap, because the heap might be fully 43 * dead or fully alive. We cannot assume either of the extremes: we would either allow 44 * application to run out of memory if we assume heap is fully dead but it is not, and, 45 * conversely, we would pacify application excessively if we assume heap is fully alive 46 * but it is not. So we need to guesstimate the particular expected value for heap liveness. 47 * The best way to do this is apparently recording the past history. 48 * 49 * For concurrent evac and update-refs, we are walking the heap per-region, and so the 50 * notion of progress is clear: we get reported the "used" size from the processed regions 51 * and use the global heap-used as the baseline. 52 * 53 * The allocatable space when GC is running is "free" at the start of phase, but the 54 * accounted budget is based on "used". So, we need to adjust the tax knowing that. 55 */ 56 57 void ShenandoahPacer::setup_for_mark() { 58 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 59 60 size_t live = update_and_get_progress_history(); 61 size_t free = _heap->free_set()->available(); 62 63 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 64 size_t taxable = free - non_taxable; 65 66 double tax = 1.0 * live / taxable; // base tax for available free space 67 tax *= 1; // mark can succeed with immediate garbage, claim all available space 68 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 69 70 restart_with(non_taxable, tax); 71 72 log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 73 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 74 byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), 75 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 76 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 77 tax); 78 } 79 80 void ShenandoahPacer::setup_for_evac() { 81 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 82 83 size_t used = _heap->collection_set()->used(); 84 size_t free = _heap->free_set()->available(); 85 86 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 87 size_t taxable = free - non_taxable; 88 89 double tax = 1.0 * used / taxable; // base tax for available free space 90 tax *= 2; // evac is followed by update-refs, claim 1/2 of remaining free 91 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 92 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 93 94 restart_with(non_taxable, tax); 95 96 log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 97 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 98 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 99 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 100 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 101 tax); 102 } 103 104 void ShenandoahPacer::setup_for_updaterefs() { 105 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 106 107 size_t used = _heap->used(); 108 size_t free = _heap->free_set()->available(); 109 110 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 111 size_t taxable = free - non_taxable; 112 113 double tax = 1.0 * used / taxable; // base tax for available free space 114 tax *= 1; // update-refs is the last phase, claim the remaining free 115 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 116 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 117 118 restart_with(non_taxable, tax); 119 120 log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 121 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 122 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 123 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 124 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 125 tax); 126 } 127 128 /* 129 * In idle phase, we have to pace the application to let control thread react with GC start. 130 * 131 * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges 132 * it had seen recent allocations. It will naturally pace the allocations if control thread is 133 * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget 134 * for applications to allocate at. 135 */ 136 137 void ShenandoahPacer::setup_for_idle() { 138 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 139 140 size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack; 141 double tax = 1; 142 143 restart_with(initial, tax); 144 145 log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 146 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), 147 tax); 148 } 149 150 /* 151 * There is no useful notion of progress for these operations. To avoid stalling 152 * the allocators unnecessarily, allow them to run unimpeded. 153 */ 154 155 void ShenandoahPacer::setup_for_preclean() { 156 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 157 158 size_t initial = _heap->max_capacity(); 159 restart_with(initial, 1.0); 160 161 log_info(gc, ergo)("Pacer for Precleaning. Non-Taxable: " SIZE_FORMAT "%s", 162 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); 163 } 164 165 void ShenandoahPacer::setup_for_reset() { 166 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 167 168 size_t initial = _heap->max_capacity(); 169 restart_with(initial, 1.0); 170 171 log_info(gc, ergo)("Pacer for Reset. Non-Taxable: " SIZE_FORMAT "%s", 172 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); 173 } 174 175 size_t ShenandoahPacer::update_and_get_progress_history() { 176 if (_progress == -1) { 177 // First initialization, report some prior 178 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO); 179 return (size_t) (_heap->max_capacity() * 0.1); 180 } else { 181 // Record history, and reply historical data 182 _progress_history->add(_progress); 183 Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO); 184 return (size_t) (_progress_history->avg() * HeapWordSize); 185 } 186 } 187 188 void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) { 189 size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; 190 STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); 191 Atomic::xchg(&_budget, (intptr_t)initial); 192 Atomic::store(&_tax_rate, tax_rate); 193 Atomic::inc(&_epoch); 194 } 195 196 bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) { 197 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 198 199 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); 200 201 intptr_t cur = 0; 202 intptr_t new_val = 0; 203 do { 204 cur = Atomic::load(&_budget); 205 if (cur < tax && !force) { 206 // Progress depleted, alas. 207 return false; 208 } 209 new_val = cur - tax; 210 } while (Atomic::cmpxchg(&_budget, cur, new_val) != cur); 211 return true; 212 } 213 214 void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) { 215 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 216 217 if (_epoch != epoch) { 218 // Stale ticket, no need to unpace. 219 return; 220 } 221 222 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); 223 Atomic::add(&_budget, tax); 224 } 225 226 intptr_t ShenandoahPacer::epoch() { 227 return Atomic::load(&_epoch); 228 } 229 230 void ShenandoahPacer::pace_for_alloc(size_t words) { 231 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 232 233 // Fast path: try to allocate right away 234 if (claim_for_alloc(words, false)) { 235 return; 236 } 237 238 // Threads that are attaching should not block at all: they are not 239 // fully initialized yet. Blocking them would be awkward. 240 // This is probably the path that allocates the thread oop itself. 241 // Forcefully claim without waiting. 242 if (JavaThread::current()->is_attaching_via_jni()) { 243 claim_for_alloc(words, true); 244 return; 245 } 246 247 size_t max = ShenandoahPacingMaxDelay; 248 double start = os::elapsedTime(); 249 250 size_t total = 0; 251 size_t cur = 0; 252 253 while (true) { 254 // We could instead assist GC, but this would suffice for now. 255 // This code should also participate in safepointing. 256 // Perform the exponential backoff, limited by max. 257 258 cur = cur * 2; 259 if (total + cur > max) { 260 cur = (max > total) ? (max - total) : 0; 261 } 262 cur = MAX2<size_t>(1, cur); 263 264 wait(cur); 265 266 double end = os::elapsedTime(); 267 total = (size_t)((end - start) * 1000); 268 269 if (total > max) { 270 // Spent local time budget to wait for enough GC progress. 271 // Breaking out and allocating anyway, which may mean we outpace GC, 272 // and start Degenerated GC cycle. 273 _delays.add(total); 274 275 // Forcefully claim the budget: it may go negative at this point, and 276 // GC should replenish for this and subsequent allocations 277 claim_for_alloc(words, true); 278 break; 279 } 280 281 if (claim_for_alloc(words, false)) { 282 // Acquired enough permit, nice. Can allocate now. 283 _delays.add(total); 284 break; 285 } 286 } 287 } 288 289 void ShenandoahPacer::wait(long time_ms) { 290 // Perform timed wait. It works like like sleep(), except without modifying 291 // the thread interruptible status. MonitorLocker also checks for safepoints. 292 assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify"); 293 MonitorLocker locker(_wait_monitor); 294 _wait_monitor->wait(time_ms); 295 } 296 297 void ShenandoahPacer::notify_waiters() { 298 MonitorLocker locker(_wait_monitor); 299 _wait_monitor->notify_all(); 300 } 301 302 void ShenandoahPacer::print_on(outputStream* out) const { 303 out->print_cr("ALLOCATION PACING:"); 304 out->cr(); 305 306 out->print_cr("Max pacing delay is set for " UINTX_FORMAT " ms.", ShenandoahPacingMaxDelay); 307 out->cr(); 308 309 out->print_cr("Higher delay would prevent application outpacing the GC, but it will hide the GC latencies"); 310 out->print_cr("from the STW pause times. Pacing affects the individual threads, and so it would also be"); 311 out->print_cr("invisible to the usual profiling tools, but would add up to end-to-end application latency."); 312 out->print_cr("Raise max pacing delay with care."); 313 out->cr(); 314 315 out->print_cr("Actual pacing delays histogram:"); 316 out->cr(); 317 318 out->print_cr("%10s - %10s %12s%12s", "From", "To", "Count", "Sum"); 319 320 size_t total_count = 0; 321 size_t total_sum = 0; 322 for (int c = _delays.min_level(); c <= _delays.max_level(); c++) { 323 int l = (c == 0) ? 0 : 1 << (c - 1); 324 int r = 1 << c; 325 size_t count = _delays.level(c); 326 size_t sum = count * (r - l) / 2; 327 total_count += count; 328 total_sum += sum; 329 330 out->print_cr("%7d ms - %7d ms: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", l, r, count, sum); 331 } 332 out->print_cr("%23s: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", "Total", total_count, total_sum); 333 out->cr(); 334 out->print_cr("Pacing delays are measured from entering the pacing code till exiting it. Therefore,"); 335 out->print_cr("observed pacing delays may be higher than the threshold when paced thread spent more"); 336 out->print_cr("time in the pacing code. It usually happens when thread is de-scheduled while paced,"); 337 out->print_cr("OS takes longer to unblock the thread, or JVM experiences an STW pause."); 338 out->cr(); 339 }