1 /* 2 * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gcTraceTime.inline.hpp" 26 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 27 #include "gc/shenandoah/shenandoahConcurrentThread.hpp" 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 29 #include "gc/shenandoah/shenandoahFreeSet.hpp" 30 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 32 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" 33 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 34 #include "gc/shenandoah/shenandoahUtils.hpp" 35 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 36 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/universe.hpp" 39 #include "runtime/vmThread.hpp" 40 41 ShenandoahConcurrentThread::ShenandoahConcurrentThread() : 42 ConcurrentGCThread(), 43 _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always), 44 _explicit_gc_waiters_lock(Mutex::leaf, "ShenandoahExplicitGC_lock", true, Monitor::_safepoint_check_always), 45 _periodic_task(this), 46 _explicit_gc_cause(GCCause::_no_cause_specified), 47 _degen_point(ShenandoahHeap::_degenerated_outside_cycle), 48 _allocs_seen(0) 49 { 50 create_and_start(); 51 _periodic_task.enroll(); 52 } 53 54 ShenandoahConcurrentThread::~ShenandoahConcurrentThread() { 55 // This is here so that super is called. 56 } 57 58 void ShenandoahPeriodicTask::task() { 59 _thread->handle_force_counters_update(); 60 _thread->handle_counters_update(); 61 } 62 63 void ShenandoahConcurrentThread::run_service() { 64 ShenandoahHeap* heap = ShenandoahHeap::heap(); 65 66 int sleep = ShenandoahControlIntervalMin; 67 68 double last_shrink_time = os::elapsedTime(); 69 double last_sleep_adjust_time = os::elapsedTime(); 70 71 // Shrink period avoids constantly polling regions for shrinking. 72 // Having a period 10x lower than the delay would mean we hit the 73 // shrinking with lag of less than 1/10-th of true delay. 74 // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. 75 double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; 76 77 ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy(); 78 79 while (!in_graceful_shutdown() && !should_terminate()) { 80 // Figure out if we have pending requests. 81 bool alloc_failure_pending = _alloc_failure_gc.is_set(); 82 bool explicit_gc_requested = _explicit_gc.is_set(); 83 84 // This control loop iteration have seen this much allocations. 85 size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen); 86 87 // Choose which GC mode to run in. The block below should select a single mode. 88 GCMode mode = none; 89 GCCause::Cause cause = GCCause::_last_gc_cause; 90 ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset; 91 92 if (alloc_failure_pending) { 93 // Allocation failure takes precedence: we have to deal with it first thing 94 cause = GCCause::_allocation_failure; 95 96 // Consume the degen point, and seed it with default value 97 degen_point = _degen_point; 98 _degen_point = ShenandoahHeap::_degenerated_outside_cycle; 99 100 if (ShenandoahDegeneratedGC && policy->should_degenerate_cycle()) { 101 policy->record_alloc_failure_to_degenerated(degen_point); 102 mode = stw_degenerated; 103 } else { 104 policy->record_alloc_failure_to_full(); 105 mode = stw_full; 106 } 107 108 } else if (explicit_gc_requested) { 109 // Honor explicit GC requests 110 if (ExplicitGCInvokesConcurrent) { 111 policy->record_explicit_to_concurrent(); 112 if (policy->can_do_traversal_gc()) { 113 mode = concurrent_traversal; 114 } else { 115 mode = concurrent_normal; 116 } 117 } else { 118 policy->record_explicit_to_full(); 119 mode = stw_full; 120 } 121 cause = _explicit_gc_cause; 122 } else { 123 // Potential normal cycle: ask heuristics if it wants to act 124 ShenandoahHeap::GCCycleMode traversal_mode = policy->should_start_traversal_gc(); 125 if (traversal_mode != ShenandoahHeap::NONE) { 126 mode = concurrent_traversal; 127 cause = GCCause::_shenandoah_traversal_gc; 128 heap->set_cycle_mode(traversal_mode); 129 } else if (policy->should_start_normal_gc()) { 130 mode = concurrent_normal; 131 cause = GCCause::_shenandoah_concurrent_gc; 132 heap->set_cycle_mode(ShenandoahHeap::MAJOR); 133 } 134 135 // Ask policy if this cycle wants to process references or unload classes 136 heap->set_process_references(policy->should_process_references()); 137 heap->set_unload_classes(policy->should_unload_classes()); 138 } 139 140 bool gc_requested = (mode != none); 141 assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); 142 143 if (gc_requested) { 144 heap->reset_bytes_allocated_since_gc_start(); 145 146 // If GC was requested, we are sampling the counters even without actual triggers 147 // from allocation machinery. This captures GC phases more accurately. 148 set_forced_counters_update(true); 149 150 // If GC was requested, we better dump freeset data for performance debugging 151 { 152 ShenandoahHeapLocker locker(heap->lock()); 153 heap->free_set()->log_status_verbose(); 154 } 155 } 156 157 switch (mode) { 158 case none: 159 break; 160 case concurrent_traversal: 161 service_concurrent_traversal_cycle(cause); 162 break; 163 case concurrent_normal: 164 service_concurrent_normal_cycle(cause); 165 break; 166 case stw_degenerated: 167 service_stw_degenerated_cycle(cause, degen_point); 168 break; 169 case stw_full: 170 service_stw_full_cycle(cause); 171 break; 172 default: 173 ShouldNotReachHere(); 174 } 175 176 heap->set_cycle_mode(ShenandoahHeap::NONE); 177 178 if (gc_requested) { 179 heap->set_used_at_last_gc(); 180 181 // If this was the explicit GC cycle, notify waiters about it 182 if (explicit_gc_requested) { 183 notify_explicit_gc_waiters(); 184 185 // Explicit GC tries to uncommit everything 186 heap->handle_heap_shrinkage(os::elapsedTime()); 187 } 188 189 // If this was the allocation failure GC cycle, notify waiters about it 190 if (alloc_failure_pending) { 191 notify_alloc_failure_waiters(); 192 } 193 194 // Report current free set state at the end of cycle, whether 195 // it is a normal completion, or the abort. 196 { 197 ShenandoahHeapLocker locker(heap->lock()); 198 heap->free_set()->log_status_verbose(); 199 } 200 201 // Disable forced counters update, and update counters one more time 202 // to capture the state at the end of GC session. 203 handle_force_counters_update(); 204 set_forced_counters_update(false); 205 206 // GC is over, we are at idle now 207 if (ShenandoahPacing) { 208 heap->pacer()->setup_for_idle(); 209 } 210 } else { 211 // Allow allocators to know we have seen this much regions 212 if (ShenandoahPacing && (allocs_seen > 0)) { 213 heap->pacer()->report_alloc(allocs_seen); 214 } 215 } 216 217 double current = os::elapsedTime(); 218 219 // Try to uncommit stale regions 220 if (current - last_shrink_time > shrink_period) { 221 heap->handle_heap_shrinkage(current - (ShenandoahUncommitDelay / 1000.0)); 222 last_shrink_time = current; 223 } 224 225 // Wait before performing the next action. If allocation happened during this wait, 226 // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, 227 // back off exponentially. 228 if (_heap_changed.try_unset()) { 229 sleep = ShenandoahControlIntervalMin; 230 } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ 231 sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); 232 last_sleep_adjust_time = current; 233 } 234 os::naked_short_sleep(sleep); 235 } 236 237 // Wait for the actual stop(), can't leave run_service() earlier. 238 while (!should_terminate()) { 239 os::naked_short_sleep(ShenandoahControlIntervalMin); 240 } 241 } 242 243 void ShenandoahConcurrentThread::service_concurrent_traversal_cycle(GCCause::Cause cause) { 244 GCIdMark gc_id_mark; 245 ShenandoahGCSession session; 246 247 ShenandoahHeap* heap = ShenandoahHeap::heap(); 248 bool is_minor = heap->is_minor_gc(); 249 TraceCollectorStats tcs(is_minor ? heap->monitoring_support()->partial_collection_counters() 250 : heap->monitoring_support()->concurrent_collection_counters()); 251 252 heap->vmop_entry_init_traversal(); 253 254 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; 255 256 heap->entry_traversal(); 257 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; 258 259 heap->vmop_entry_final_traversal(); 260 261 heap->entry_cleanup_traversal(); 262 263 heap->shenandoahPolicy()->record_success_concurrent(); 264 } 265 266 void ShenandoahConcurrentThread::service_concurrent_normal_cycle(GCCause::Cause cause) { 267 // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during 268 // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. 269 // If second allocation failure happens during Degenerated GC cycle (for example, when GC 270 // tries to evac something and no memory is available), cycle degrades to Full GC. 271 // 272 // The only current exception is allocation failure in Conc Evac: it goes straight to Full GC, 273 // because we don't recover well from the case of incompletely evacuated heap in STW cycle. 274 // 275 // There are also two shortcuts through the normal cycle: a) immediate garbage shortcut, when 276 // heuristics says there are no regions to compact, and all the collection comes from immediately 277 // reclaimable regions; b) coalesced UR shortcut, when heuristics decides to coalesce UR with the 278 // mark from the next cycle. 279 // 280 // ................................................................................................ 281 // 282 // (immediate garbage shortcut) Concurrent GC 283 // /-------------------------------------------\ 284 // | (coalesced UR) v 285 // | /----------------------->o 286 // | | | 287 // | | v 288 // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] 289 // | | | ^ 290 // | (af) | (af) | (af) | 291 // ..................|....................|.................|..............|....................... 292 // | | | | 293 // | /---------/ | | Degenerated GC 294 // v | v | 295 // STW Mark ------+---> STW Evac ----> STW Update-Refs ----->o 296 // | | | | ^ 297 // | (af) | | (af) | (af) | 298 // ..................|..........|.........|.................|..............|....................... 299 // | | | | | 300 // | v v | | Full GC 301 // \--------->o-------->o<----------------/ | 302 // | | 303 // v | 304 // Full GC --------------------------/ 305 // 306 ShenandoahHeap* heap = ShenandoahHeap::heap(); 307 308 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; 309 310 GCIdMark gc_id_mark; 311 ShenandoahGCSession session; 312 313 // Capture peak occupancy right after starting the cycle 314 heap->shenandoahPolicy()->record_peak_occupancy(); 315 316 TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); 317 318 // Start initial mark under STW 319 heap->vmop_entry_init_mark(); 320 321 // Continue concurrent mark 322 heap->entry_mark(); 323 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; 324 325 // If not cancelled, can try to concurrently pre-clean 326 heap->entry_preclean(); 327 328 // Complete marking under STW, and start evacuation 329 heap->vmop_entry_final_mark(); 330 331 // Continue the cycle with evacuation and optional update-refs. 332 // This may be skipped if there is nothing to evacuate. 333 // If so, evac_in_progress would be unset by collection set preparation code. 334 if (heap->is_evacuation_in_progress()) { 335 // Final mark had reclaimed some immediate garbage, kick cleanup to reclaim the space 336 // for the rest of the cycle. 337 heap->entry_cleanup(); 338 339 // Concurrently evacuate 340 heap->entry_evac(); 341 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return; 342 343 // Perform update-refs phase, if required. This phase can be skipped if heuristics 344 // decides to piggy-back the update-refs on the next marking cycle. On either path, 345 // we need to turn off evacuation: either in init-update-refs, or in final-evac. 346 if (heap->shenandoahPolicy()->should_start_update_refs()) { 347 heap->vmop_entry_init_updaterefs(); 348 heap->entry_updaterefs(); 349 if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return; 350 351 heap->vmop_entry_final_updaterefs(); 352 } else { 353 heap->vmop_entry_final_evac(); 354 } 355 } 356 357 // Reclaim space and prepare for the next normal cycle: 358 heap->entry_cleanup_bitmaps(); 359 360 // Cycle is complete 361 heap->shenandoahPolicy()->record_success_concurrent(); 362 } 363 364 bool ShenandoahConcurrentThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) { 365 ShenandoahHeap* heap = ShenandoahHeap::heap(); 366 if (heap->cancelled_concgc()) { 367 assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); 368 if (!in_graceful_shutdown()) { 369 assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle, 370 "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point)); 371 _degen_point = point; 372 } 373 return true; 374 } 375 return false; 376 } 377 378 void ShenandoahConcurrentThread::stop_service() { 379 // Nothing to do here. 380 } 381 382 void ShenandoahConcurrentThread::service_stw_full_cycle(GCCause::Cause cause) { 383 GCIdMark gc_id_mark; 384 ShenandoahGCSession session; 385 386 ShenandoahHeap* heap = ShenandoahHeap::heap(); 387 heap->vmop_entry_full(cause); 388 389 heap->shenandoahPolicy()->record_success_full(); 390 } 391 392 void ShenandoahConcurrentThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) { 393 assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set"); 394 395 GCIdMark gc_id_mark; 396 ShenandoahGCSession session; 397 398 ShenandoahHeap* heap = ShenandoahHeap::heap(); 399 heap->vmop_degenerated(point); 400 401 heap->shenandoahPolicy()->record_success_degenerated(); 402 } 403 404 void ShenandoahConcurrentThread::handle_explicit_gc(GCCause::Cause cause) { 405 assert(GCCause::is_user_requested_gc(cause) || GCCause::is_serviceability_requested_gc(cause), 406 "only requested GCs here"); 407 if (!DisableExplicitGC) { 408 _explicit_gc_cause = cause; 409 410 _explicit_gc.set(); 411 MonitorLockerEx ml(&_explicit_gc_waiters_lock); 412 while (_explicit_gc.is_set()) { 413 ml.wait(); 414 } 415 } 416 } 417 418 void ShenandoahConcurrentThread::handle_alloc_failure(size_t words) { 419 ShenandoahHeap* heap = ShenandoahHeap::heap(); 420 421 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 422 assert(current()->is_Java_thread(), "expect Java thread here"); 423 424 if (try_set_alloc_failure_gc()) { 425 // Only report the first allocation failure 426 log_info(gc)("Failed to allocate " SIZE_FORMAT "K", words * HeapWordSize / K); 427 428 // Now that alloc failure GC is scheduled, we can abort everything else 429 heap->cancel_concgc(GCCause::_allocation_failure); 430 } 431 432 MonitorLockerEx ml(&_alloc_failure_waiters_lock); 433 while (is_alloc_failure_gc()) { 434 ml.wait(); 435 } 436 assert(!is_alloc_failure_gc(), "expect alloc failure GC to have completed"); 437 } 438 439 void ShenandoahConcurrentThread::handle_alloc_failure_evac(size_t words) { 440 log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule GC by thread %d", 441 Thread::current()->osthread()->thread_id()); 442 443 ShenandoahHeap* heap = ShenandoahHeap::heap(); 444 heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); 445 446 if (try_set_alloc_failure_gc()) { 447 // Only report the first allocation failure 448 log_info(gc)("Failed to allocate " SIZE_FORMAT "K for evacuation", words * HeapWordSize / K); 449 } 450 451 // Forcefully report allocation failure 452 heap->cancel_concgc(GCCause::_shenandoah_allocation_failure_evac); 453 } 454 455 void ShenandoahConcurrentThread::notify_alloc_failure_waiters() { 456 _alloc_failure_gc.unset(); 457 MonitorLockerEx ml(&_alloc_failure_waiters_lock); 458 ml.notify_all(); 459 } 460 461 bool ShenandoahConcurrentThread::try_set_alloc_failure_gc() { 462 return _alloc_failure_gc.try_set(); 463 } 464 465 bool ShenandoahConcurrentThread::is_alloc_failure_gc() { 466 return _alloc_failure_gc.is_set(); 467 } 468 469 void ShenandoahConcurrentThread::notify_explicit_gc_waiters() { 470 _explicit_gc.unset(); 471 MonitorLockerEx ml(&_explicit_gc_waiters_lock); 472 ml.notify_all(); 473 } 474 475 void ShenandoahConcurrentThread::handle_counters_update() { 476 if (_do_counters_update.is_set()) { 477 _do_counters_update.unset(); 478 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 479 } 480 } 481 482 void ShenandoahConcurrentThread::handle_force_counters_update() { 483 if (_force_counters_update.is_set()) { 484 _do_counters_update.unset(); // reset these too, we do update now! 485 ShenandoahHeap::heap()->monitoring_support()->update_counters(); 486 } 487 } 488 489 void ShenandoahConcurrentThread::notify_heap_changed() { 490 // This is called from allocation path, and thus should be fast. 491 492 // Update monitoring counters when we took a new region. This amortizes the 493 // update costs on slow path. 494 if (_do_counters_update.is_unset()) { 495 _do_counters_update.set(); 496 } 497 // Notify that something had changed. 498 if (_heap_changed.is_unset()) { 499 _heap_changed.set(); 500 } 501 } 502 503 void ShenandoahConcurrentThread::pacing_notify_alloc(size_t words) { 504 assert(ShenandoahPacing, "should only call when pacing is enabled"); 505 Atomic::add(words, &_allocs_seen); 506 } 507 508 void ShenandoahConcurrentThread::set_forced_counters_update(bool value) { 509 _force_counters_update.set_cond(value); 510 } 511 512 void ShenandoahConcurrentThread::print() const { 513 print_on(tty); 514 } 515 516 void ShenandoahConcurrentThread::print_on(outputStream* st) const { 517 st->print("Shenandoah Concurrent Thread"); 518 Thread::print_on(st); 519 st->cr(); 520 } 521 522 void ShenandoahConcurrentThread::start() { 523 create_and_start(); 524 } 525 526 void ShenandoahConcurrentThread::prepare_for_graceful_shutdown() { 527 _graceful_shutdown.set(); 528 } 529 530 bool ShenandoahConcurrentThread::in_graceful_shutdown() { 531 return _graceful_shutdown.is_set(); 532 }