1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shenandoah/shenandoahFreeSet.hpp" 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 28 #include "gc/shenandoah/shenandoahPacer.hpp" 29 #include "runtime/mutexLocker.hpp" 30 31 /* 32 * In normal concurrent cycle, we have to pace the application to let GC finish. 33 * 34 * Here, we do not know how large would be the collection set, and what are the 35 * relative performances of the each stage in the concurrent cycle, and so we have to 36 * make some assumptions. 37 * 38 * For concurrent mark, there is no clear notion of progress. The moderately accurate 39 * and easy to get metric is the amount of live objects the mark had encountered. But, 40 * that does directly correlate with the used heap, because the heap might be fully 41 * dead or fully alive. We cannot assume either of the extremes: we would either allow 42 * application to run out of memory if we assume heap is fully dead but it is not, and, 43 * conversely, we would pacify application excessively if we assume heap is fully alive 44 * but it is not. So we need to guesstimate the particular expected value for heap liveness. 45 * The best way to do this is apparently recording the past history. 46 * 47 * For concurrent evac and update-refs, we are walking the heap per-region, and so the 48 * notion of progress is clear: we get reported the "used" size from the processed regions 49 * and use the global heap-used as the baseline. 50 * 51 * The allocatable space when GC is running is "free" at the start of phase, but the 52 * accounted budget is based on "used". So, we need to adjust the tax knowing that. 53 */ 54 55 void ShenandoahPacer::setup_for_mark() { 56 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 57 58 size_t live = update_and_get_progress_history(); 59 size_t free = _heap->free_set()->available(); 60 61 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 62 size_t taxable = free - non_taxable; 63 64 double tax = 1.0 * live / taxable; // base tax for available free space 65 tax *= 1; // mark can succeed with immediate garbage, claim all available space 66 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 67 68 restart_with(non_taxable, tax); 69 70 log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 71 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 72 byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), 73 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 74 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 75 tax); 76 } 77 78 void ShenandoahPacer::setup_for_evac() { 79 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 80 81 size_t used = _heap->collection_set()->used(); 82 size_t free = _heap->free_set()->available(); 83 84 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 85 size_t taxable = free - non_taxable; 86 87 double tax = 1.0 * used / taxable; // base tax for available free space 88 tax *= 2; // evac is followed by update-refs, claim 1/2 of remaining free 89 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 90 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 91 92 restart_with(non_taxable, tax); 93 94 log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 95 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 96 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 97 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 98 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 99 tax); 100 } 101 102 void ShenandoahPacer::setup_for_updaterefs() { 103 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 104 105 size_t used = _heap->used(); 106 size_t free = _heap->free_set()->available(); 107 108 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 109 size_t taxable = free - non_taxable; 110 111 double tax = 1.0 * used / taxable; // base tax for available free space 112 tax *= 1; // update-refs is the last phase, claim the remaining free 113 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 114 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 115 116 restart_with(non_taxable, tax); 117 118 log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 119 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 120 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 121 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 122 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 123 tax); 124 } 125 126 /* 127 * In idle phase, we have to pace the application to let control thread react with GC start. 128 * 129 * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges 130 * it had seen recent allocations. It will naturally pace the allocations if control thread is 131 * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget 132 * for applications to allocate at. 133 */ 134 135 void ShenandoahPacer::setup_for_idle() { 136 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 137 138 size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack; 139 double tax = 1; 140 141 restart_with(initial, tax); 142 143 log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 144 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), 145 tax); 146 } 147 148 /* 149 * There is no useful notion of progress for these operations. To avoid stalling 150 * the allocators unnecessarily, allow them to run unimpeded. 151 */ 152 153 void ShenandoahPacer::setup_for_preclean() { 154 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 155 156 size_t initial = _heap->max_capacity(); 157 restart_with(initial, 1.0); 158 159 log_info(gc, ergo)("Pacer for Precleaning. Non-Taxable: " SIZE_FORMAT "%s", 160 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); 161 } 162 163 void ShenandoahPacer::setup_for_reset() { 164 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 165 166 size_t initial = _heap->max_capacity(); 167 restart_with(initial, 1.0); 168 169 log_info(gc, ergo)("Pacer for Reset. Non-Taxable: " SIZE_FORMAT "%s", 170 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); 171 } 172 173 size_t ShenandoahPacer::update_and_get_progress_history() { 174 if (_progress == -1) { 175 // First initialization, report some prior 176 Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); 177 return (size_t) (_heap->max_capacity() * 0.1); 178 } else { 179 // Record history, and reply historical data 180 _progress_history->add(_progress); 181 Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); 182 return (size_t) (_progress_history->avg() * HeapWordSize); 183 } 184 } 185 186 void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) { 187 size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; 188 STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); 189 Atomic::xchg((intptr_t)initial, &_budget); 190 Atomic::store(tax_rate, &_tax_rate); 191 Atomic::inc(&_epoch); 192 } 193 194 bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) { 195 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 196 197 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); 198 199 intptr_t cur = 0; 200 intptr_t new_val = 0; 201 do { 202 cur = Atomic::load(&_budget); 203 if (cur < tax && !force) { 204 // Progress depleted, alas. 205 return false; 206 } 207 new_val = cur - tax; 208 } while (Atomic::cmpxchg(new_val, &_budget, cur) != cur); 209 return true; 210 } 211 212 void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) { 213 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 214 215 if (_epoch != epoch) { 216 // Stale ticket, no need to unpace. 217 return; 218 } 219 220 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); 221 Atomic::add(tax, &_budget); 222 } 223 224 intptr_t ShenandoahPacer::epoch() { 225 return Atomic::load(&_epoch); 226 } 227 228 void ShenandoahPacer::pace_for_alloc(size_t words) { 229 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 230 231 // Fast path: try to allocate right away 232 if (claim_for_alloc(words, false)) { 233 return; 234 } 235 236 // Threads that are attaching should not block at all: they are not 237 // fully initialized yet. Blocking them would be awkward. 238 // This is probably the path that allocates the thread oop itself. 239 // Forcefully claim without waiting. 240 if (JavaThread::current()->is_attaching_via_jni()) { 241 claim_for_alloc(words, true); 242 return; 243 } 244 245 size_t max = ShenandoahPacingMaxDelay; 246 double start = os::elapsedTime(); 247 248 size_t total = 0; 249 size_t cur = 0; 250 251 while (true) { 252 // We could instead assist GC, but this would suffice for now. 253 // This code should also participate in safepointing. 254 // Perform the exponential backoff, limited by max. 255 256 cur = cur * 2; 257 if (total + cur > max) { 258 cur = (max > total) ? (max - total) : 0; 259 } 260 cur = MAX2<size_t>(1, cur); 261 262 wait(cur); 263 264 double end = os::elapsedTime(); 265 total = (size_t)((end - start) * 1000); 266 267 if (total > max) { 268 // Spent local time budget to wait for enough GC progress. 269 // Breaking out and allocating anyway, which may mean we outpace GC, 270 // and start Degenerated GC cycle. 271 _delays.add(total); 272 273 // Forcefully claim the budget: it may go negative at this point, and 274 // GC should replenish for this and subsequent allocations 275 claim_for_alloc(words, true); 276 break; 277 } 278 279 if (claim_for_alloc(words, false)) { 280 // Acquired enough permit, nice. Can allocate now. 281 _delays.add(total); 282 break; 283 } 284 } 285 } 286 287 void ShenandoahPacer::wait(size_t time_ms) { 288 // Perform timed wait. It works like like sleep(), except without modifying 289 // the thread interruptible status. MonitorLocker also checks for safepoints. 290 assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify"); 291 assert(time_ms <= LONG_MAX, "Sanity"); 292 MonitorLockerEx locker(_wait_monitor); 293 _wait_monitor->wait(!Mutex::_no_safepoint_check_flag, (long)time_ms); 294 } 295 296 void ShenandoahPacer::notify_waiters() { 297 MonitorLockerEx locker(_wait_monitor); 298 _wait_monitor->notify_all(); 299 } 300 301 void ShenandoahPacer::print_on(outputStream* out) const { 302 out->print_cr("ALLOCATION PACING:"); 303 out->cr(); 304 305 out->print_cr("Max pacing delay is set for " UINTX_FORMAT " ms.", ShenandoahPacingMaxDelay); 306 out->cr(); 307 308 out->print_cr("Higher delay would prevent application outpacing the GC, but it will hide the GC latencies"); 309 out->print_cr("from the STW pause times. Pacing affects the individual threads, and so it would also be"); 310 out->print_cr("invisible to the usual profiling tools, but would add up to end-to-end application latency."); 311 out->print_cr("Raise max pacing delay with care."); 312 out->cr(); 313 314 out->print_cr("Actual pacing delays histogram:"); 315 out->cr(); 316 317 out->print_cr("%10s - %10s %12s%12s", "From", "To", "Count", "Sum"); 318 319 size_t total_count = 0; 320 size_t total_sum = 0; 321 for (int c = _delays.min_level(); c <= _delays.max_level(); c++) { 322 int l = (c == 0) ? 0 : 1 << (c - 1); 323 int r = 1 << c; 324 size_t count = _delays.level(c); 325 size_t sum = count * (r - l) / 2; 326 total_count += count; 327 total_sum += sum; 328 329 out->print_cr("%7d ms - %7d ms: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", l, r, count, sum); 330 } 331 out->print_cr("%23s: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", "Total", total_count, total_sum); 332 out->cr(); 333 out->print_cr("Pacing delays are measured from entering the pacing code till exiting it. Therefore,"); 334 out->print_cr("observed pacing delays may be higher than the threshold when paced thread spent more"); 335 out->print_cr("time in the pacing code. It usually happens when thread is de-scheduled while paced,"); 336 out->print_cr("OS takes longer to unblock the thread, or JVM experiences an STW pause."); 337 out->cr(); 338 }