1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shenandoah/shenandoahFreeSet.hpp" 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 28 #include "gc/shenandoah/shenandoahPacer.hpp" 29 30 /* 31 * In normal concurrent cycle, we have to pace the application to let GC finish. 32 * 33 * Here, we do not know how large would be the collection set, and what are the 34 * relative performances of the each stage in the concurrent cycle, and so we have to 35 * make some assumptions. 36 * 37 * For concurrent mark, there is no clear notion of progress. The moderately accurate 38 * and easy to get metric is the amount of live objects the mark had encountered. But, 39 * that does directly correlate with the used heap, because the heap might be fully 40 * dead or fully alive. We cannot assume either of the extremes: we would either allow 41 * application to run out of memory if we assume heap is fully dead but it is not, and, 42 * conversely, we would pacify application excessively if we assume heap is fully alive 43 * but it is not. So we need to guesstimate the particular expected value for heap liveness. 44 * The best way to do this is apparently recording the past history. 45 * 46 * For concurrent evac and update-refs, we are walking the heap per-region, and so the 47 * notion of progress is clear: we get reported the "used" size from the processed regions 48 * and use the global heap-used as the baseline. 49 * 50 * The allocatable space when GC is running is "free" at the start of cycle, but the 51 * accounted budget is based on "used". So, we need to adjust the tax knowing that. 52 * Also, since we effectively count the used space three times (mark, evac, update-refs), 53 * we need to multiply the tax by 3. Example: for 10 MB free and 90 MB used, GC would 54 * come back with 3*90 MB budget, and thus for each 1 MB of allocation, we have to pay 55 * 3*90 / 10 MBs. In the end, we would pay back the entire budget. 56 */ 57 58 void ShenandoahPacer::setup_for_mark() { 59 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 60 61 size_t live = update_and_get_progress_history(); 62 size_t free = _heap->free_set()->available(); 63 64 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 65 size_t taxable = free - non_taxable; 66 67 double tax = 1.0 * live / taxable; // base tax for available free space 68 tax *= 3; // mark is phase 1 of 3, claim 1/3 of free for it 69 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 70 71 restart_with(non_taxable, tax); 72 73 log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 74 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 75 byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), 76 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 77 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 78 tax); 79 } 80 81 void ShenandoahPacer::setup_for_evac() { 82 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 83 84 size_t used = _heap->collection_set()->used(); 85 size_t free = _heap->free_set()->available(); 86 87 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 88 size_t taxable = free - non_taxable; 89 90 double tax = 1.0 * used / taxable; // base tax for available free space 91 tax *= 2; // evac is phase 2 of 3, claim 1/2 of remaining free 92 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 93 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 94 95 restart_with(non_taxable, tax); 96 97 log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 98 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 99 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 100 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 101 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 102 tax); 103 } 104 105 void ShenandoahPacer::setup_for_updaterefs() { 106 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 107 108 size_t used = _heap->used(); 109 size_t free = _heap->free_set()->available(); 110 111 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 112 size_t taxable = free - non_taxable; 113 114 double tax = 1.0 * used / taxable; // base tax for available free space 115 tax *= 1; // update-refs is phase 3 of 3, claim the remaining free 116 tax = MAX2<double>(1, tax); // never allocate more than GC processes during the phase 117 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 118 119 restart_with(non_taxable, tax); 120 121 log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 122 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 123 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), 124 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 125 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 126 tax); 127 } 128 129 /* 130 * Traversal walks the entire heap once, and therefore we have to make assumptions about its 131 * liveness, like concurrent mark does. 132 */ 133 134 void ShenandoahPacer::setup_for_traversal() { 135 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 136 137 size_t live = update_and_get_progress_history(); 138 size_t free = _heap->free_set()->available(); 139 140 size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; 141 size_t taxable = free - non_taxable; 142 143 double tax = 1.0 * live / taxable; // base tax for available free space 144 tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap 145 146 restart_with(non_taxable, tax); 147 148 log_info(gc, ergo)("Pacer for Traversal. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " 149 "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 150 byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), 151 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 152 byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), 153 tax); 154 } 155 156 /* 157 * In idle phase, we have to pace the application to let control thread react with GC start. 158 * 159 * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges 160 * it had seen recent allocations. It will naturally pace the allocations if control thread is 161 * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget 162 * for applications to allocate at. 163 */ 164 165 void ShenandoahPacer::setup_for_idle() { 166 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 167 168 size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack; 169 double tax = 1; 170 171 restart_with(initial, tax); 172 173 log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", 174 byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), 175 tax); 176 } 177 178 size_t ShenandoahPacer::update_and_get_progress_history() { 179 if (_progress == -1) { 180 // First initialization, report some prior 181 Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); 182 return (size_t) (_heap->max_capacity() * 0.1); 183 } else { 184 // Record history, and reply historical data 185 _progress_history->add(_progress); 186 Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress); 187 return (size_t) (_progress_history->avg() * HeapWordSize); 188 } 189 } 190 191 void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) { 192 size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize; 193 STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); 194 Atomic::xchg((intptr_t)initial, &_budget); 195 Atomic::store(tax_rate, &_tax_rate); 196 Atomic::inc(&_epoch); 197 } 198 199 bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) { 200 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 201 202 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); 203 204 intptr_t cur = 0; 205 intptr_t new_val = 0; 206 do { 207 cur = Atomic::load(&_budget); 208 if (cur < tax && !force) { 209 // Progress depleted, alas. 210 return false; 211 } 212 new_val = cur - tax; 213 } while (Atomic::cmpxchg(new_val, &_budget, cur) != cur); 214 return true; 215 } 216 217 void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) { 218 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 219 220 if (_epoch != epoch) { 221 // Stale ticket, no need to unpace. 222 return; 223 } 224 225 intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate)); 226 Atomic::add(tax, &_budget); 227 } 228 229 intptr_t ShenandoahPacer::epoch() { 230 return Atomic::load(&_epoch); 231 } 232 233 void ShenandoahPacer::pace_for_alloc(size_t words) { 234 assert(ShenandoahPacing, "Only be here when pacing is enabled"); 235 236 // Fast path: try to allocate right away 237 if (claim_for_alloc(words, false)) { 238 return; 239 } 240 241 // Threads that are attaching should not block at all: they are not 242 // fully initialized yet. Calling sleep() on them would be awkward. 243 // This is probably the path that allocates the thread oop itself. 244 // Forcefully claim without waiting. 245 if (JavaThread::current()->is_attaching_via_jni()) { 246 claim_for_alloc(words, true); 247 return; 248 } 249 250 size_t max = ShenandoahPacingMaxDelay; 251 double start = os::elapsedTime(); 252 253 size_t total = 0; 254 size_t cur = 0; 255 256 while (true) { 257 // We could instead assist GC, but this would suffice for now. 258 // This code should also participate in safepointing. 259 // Perform the exponential backoff, limited by max. 260 261 cur = cur * 2; 262 if (total + cur > max) { 263 cur = (max > total) ? (max - total) : 0; 264 } 265 cur = MAX2<size_t>(1, cur); 266 267 JavaThread::current()->sleep(cur); 268 269 double end = os::elapsedTime(); 270 total = (size_t)((end - start) * 1000); 271 272 if (total > max) { 273 // Spent local time budget to wait for enough GC progress. 274 // Breaking out and allocating anyway, which may mean we outpace GC, 275 // and start Degenerated GC cycle. 276 _delays.add(total); 277 278 // Forcefully claim the budget: it may go negative at this point, and 279 // GC should replenish for this and subsequent allocations 280 claim_for_alloc(words, true); 281 break; 282 } 283 284 if (claim_for_alloc(words, false)) { 285 // Acquired enough permit, nice. Can allocate now. 286 _delays.add(total); 287 break; 288 } 289 } 290 } 291 292 void ShenandoahPacer::print_on(outputStream* out) const { 293 out->print_cr("ALLOCATION PACING:"); 294 out->cr(); 295 296 out->print_cr("Max pacing delay is set for " UINTX_FORMAT " ms.", ShenandoahPacingMaxDelay); 297 out->cr(); 298 299 out->print_cr("Higher delay would prevent application outpacing the GC, but it will hide the GC latencies"); 300 out->print_cr("from the STW pause times. Pacing affects the individual threads, and so it would also be"); 301 out->print_cr("invisible to the usual profiling tools, but would add up to end-to-end application latency."); 302 out->print_cr("Raise max pacing delay with care."); 303 out->cr(); 304 305 out->print_cr("Actual pacing delays histogram:"); 306 out->cr(); 307 308 out->print_cr("%10s - %10s %12s%12s", "From", "To", "Count", "Sum"); 309 310 size_t total_count = 0; 311 size_t total_sum = 0; 312 for (int c = _delays.min_level(); c <= _delays.max_level(); c++) { 313 int l = (c == 0) ? 0 : 1 << (c - 1); 314 int r = 1 << c; 315 size_t count = _delays.level(c); 316 size_t sum = count * (r - l) / 2; 317 total_count += count; 318 total_sum += sum; 319 320 out->print_cr("%7d ms - %7d ms: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", l, r, count, sum); 321 } 322 out->print_cr("%23s: " SIZE_FORMAT_W(12) SIZE_FORMAT_W(12) " ms", "Total", total_count, total_sum); 323 out->cr(); 324 out->print_cr("Pacing delays are measured from entering the pacing code till exiting it. Therefore,"); 325 out->print_cr("observed pacing delays may be higher than the threshold when paced thread spent more"); 326 out->print_cr("time in the pacing code. It usually happens when thread is de-scheduled while paced,"); 327 out->print_cr("OS takes longer to unblock the thread, or JVM experiences an STW pause."); 328 out->cr(); 329 }