< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp

Print this page
rev 11461 : [backport] 8226757: Shenandoah: Make traversal and passive modes explicit
rev 11463 : Backport Traversal GC


  34 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
  42 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  43 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp"
  44 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc_implementation/shenandoah/shenandoahMetrics.hpp"
  48 #include "gc_implementation/shenandoah/shenandoahNormalMode.hpp"
  49 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  50 #include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp"
  51 #include "gc_implementation/shenandoah/shenandoahPassiveMode.hpp"
  52 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  53 #include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"

  54 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  55 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
  56 #include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp"
  57 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
  58 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  59 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
  60 
  61 #include "memory/metaspace.hpp"
  62 #include "runtime/vmThread.hpp"
  63 #include "services/mallocTracker.hpp"
  64 
  65 #ifdef ASSERT
  66 template <class T>
  67 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  68   T o = oopDesc::load_heap_oop(p);
  69   if (! oopDesc::is_null(o)) {
  70     oop obj = oopDesc::decode_heap_oop_not_null(o);
  71     shenandoah_assert_not_forwarded(p, obj);
  72   }
  73 }


 340                                                SATB_Q_FL_lock,
 341                                                20 /*G1SATBProcessCompletedThreshold */,
 342                                                Shared_SATB_Q_lock);
 343 
 344   _monitoring_support = new ShenandoahMonitoringSupport(this);
 345   _phase_timings = new ShenandoahPhaseTimings();
 346   ShenandoahStringDedup::initialize();
 347   ShenandoahCodeRoots::initialize();
 348 
 349   if (ShenandoahAllocationTrace) {
 350     _alloc_tracker = new ShenandoahAllocTracker();
 351   }
 352 
 353   if (ShenandoahPacing) {
 354     _pacer = new ShenandoahPacer(this);
 355     _pacer->setup_for_idle();
 356   } else {
 357     _pacer = NULL;
 358   }
 359 




 360   _control_thread = new ShenandoahControlThread();
 361 
 362   log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
 363                      byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
 364                      byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
 365                      byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
 366   );
 367 
 368   return JNI_OK;
 369 }
 370 
 371 #ifdef _MSC_VER
 372 #pragma warning( push )
 373 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 374 #endif
 375 
 376 void ShenandoahHeap::initialize_heuristics() {
 377   if (ShenandoahGCMode != NULL) {
 378     if (strcmp(ShenandoahGCMode, "normal") == 0) {


 379       _gc_mode = new ShenandoahNormalMode();
 380     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 381       _gc_mode = new ShenandoahPassiveMode();
 382     } else {
 383       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 384     }
 385   } else {
 386     ShouldNotReachHere();
 387   }
 388   _gc_mode->initialize_flags();
 389   _heuristics = _gc_mode->initialize_heuristics();
 390 
 391   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 392     vm_exit_during_initialization(
 393             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 394                     _heuristics->name()));
 395   }
 396   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 397     vm_exit_during_initialization(
 398             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 399                     _heuristics->name()));
 400   }
 401   log_info(gc, init)("Shenandoah heuristics: %s",
 402                      _heuristics->name());
 403 }
 404 
 405 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 406   SharedHeap(policy),
 407   _shenandoah_policy(policy),
 408   _heap_region_special(false),
 409   _regions(NULL),
 410   _free_set(NULL),
 411   _collection_set(NULL),

 412   _update_refs_iterator(this),
 413   _bytes_allocated_since_gc_start(0),
 414   _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),
 415   _ref_processor(NULL),
 416   _marking_context(NULL),
 417   _bitmap_size(0),
 418   _bitmap_regions_per_slice(0),
 419   _bitmap_bytes_per_slice(0),
 420   _bitmap_region_special(false),
 421   _aux_bitmap_region_special(false),
 422   _liveness_cache(NULL),
 423   _aux_bit_map(),
 424   _verifier(NULL),
 425   _pacer(NULL),
 426   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 427   _phase_timings(NULL),
 428   _alloc_tracker(NULL)
 429 {
 430   log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 431   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 432 
 433   _scm = new ShenandoahConcurrentMark();

 434   _full_gc = new ShenandoahMarkCompact();
 435   _used = 0;
 436 
 437   _max_workers = MAX2(_max_workers, 1U);
 438 
 439   // SharedHeap did not initialize this for us, and we want our own workgang anyway.
 440   assert(SharedHeap::_workers == NULL && _workers == NULL, "Should not be initialized yet");
 441   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 442                             /* are_GC_task_threads */true,
 443                             /* are_ConcurrentGC_threads */false);
 444   if (_workers == NULL) {
 445     vm_exit_during_initialization("Failed necessary allocation.");
 446   } else {
 447     _workers->initialize_workers();
 448   }
 449   assert(SharedHeap::_workers == _workers, "Sanity: initialized the correct field");
 450 }
 451 
 452 #ifdef _MSC_VER
 453 #pragma warning( pop )


 481   ShenandoahResetBitmapTask task;
 482   _workers->run_task(&task);
 483 }
 484 
 485 void ShenandoahHeap::print_on(outputStream* st) const {
 486   st->print_cr("Shenandoah Heap");
 487   st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 488                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 489                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 490                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 491   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 492                num_regions(),
 493                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 494                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 495 
 496   st->print("Status: ");
 497   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 498   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 499   if (is_evacuation_in_progress())           st->print("evacuating, ");
 500   if (is_update_refs_in_progress())          st->print("updating refs, ");

 501   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 502   if (is_full_gc_in_progress())              st->print("full gc, ");
 503   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 504 
 505   if (cancelled_gc()) {
 506     st->print("cancelled");
 507   } else {
 508     st->print("not cancelled");
 509   }
 510   st->cr();
 511 
 512   st->print_cr("Reserved region:");
 513   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 514                p2i(reserved_region().start()),
 515                p2i(reserved_region().end()));
 516 
 517   ShenandoahCollectionSet* cset = collection_set();
 518   st->print_cr("Collection set:");
 519   if (cset != NULL) {
 520     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));


1563   ShenandoahEvacuationTask task(this, _collection_set, false);
1564   workers()->run_task(&task);
1565 }
1566 
1567 void ShenandoahHeap::op_updaterefs() {
1568   update_heap_references(true);
1569 }
1570 
1571 void ShenandoahHeap::op_cleanup() {
1572   free_set()->recycle_trash();
1573 }
1574 
1575 void ShenandoahHeap::op_reset() {
1576   reset_mark_bitmap();
1577 }
1578 
1579 void ShenandoahHeap::op_preclean() {
1580   concurrent_mark()->preclean_weak_refs();
1581 }
1582 












1583 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1584   ShenandoahMetricsSnapshot metrics;
1585   metrics.snap_before();
1586 
1587   full_gc()->do_it(cause);
1588 
1589   metrics.snap_after();
1590 
1591   if (metrics.is_good_progress()) {
1592     _progress_last_gc.set();
1593   } else {
1594     // Nothing to do. Tell the allocation path that we have failed to make
1595     // progress, and it can finally fail.
1596     _progress_last_gc.unset();
1597   }
1598 }
1599 
1600 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1601   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1602   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1603   // some phase, we have to upgrade the Degenerate GC to Full GC.
1604 
1605   clear_cancelled_gc();
1606 
1607   ShenandoahMetricsSnapshot metrics;
1608   metrics.snap_before();
1609 
1610   switch (point) {


















1611     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1612     // but enters it at different points, depending on which concurrent phase had
1613     // degenerated.
1614 
1615     case _degenerated_outside_cycle:
1616       // We have degenerated from outside the cycle, which means something is bad with
1617       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1618       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1619       // we can do the most aggressive degen cycle, which includes processing references and
1620       // class unloading, unless those features are explicitly disabled.
1621       //
1622       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1623       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1624       set_process_references(heuristics()->can_process_references());
1625       set_unload_classes(heuristics()->can_unload_classes());
1626 







1627       op_reset();
1628 
1629       op_init_mark();
1630       if (cancelled_gc()) {
1631         op_degenerated_fail();
1632         return;
1633       }
1634 
1635     case _degenerated_mark:
1636       op_final_mark();
1637       if (cancelled_gc()) {
1638         op_degenerated_fail();
1639         return;
1640       }
1641 
1642       op_cleanup();
1643 
1644     case _degenerated_evac:
1645       // If heuristics thinks we should do the cycle, this flag would be set,
1646       // and we can do evacuation. Otherwise, it would be the shortcut cycle.


1736   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1737 }
1738 
1739 void ShenandoahHeap::op_degenerated_futile() {
1740   shenandoah_policy()->record_degenerated_upgrade_to_full();
1741   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1742 }
1743 
1744 void ShenandoahHeap::stop_concurrent_marking() {
1745   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1746   if (!cancelled_gc()) {
1747     // If we needed to update refs, and concurrent marking has been cancelled,
1748     // we need to finish updating references.
1749     set_has_forwarded_objects(false);
1750     mark_complete_marking_context();
1751   }
1752   set_concurrent_mark_in_progress(false);
1753 }
1754 
1755 void ShenandoahHeap::force_satb_flush_all_threads() {
1756   if (!is_concurrent_mark_in_progress()) {
1757     // No need to flush SATBs
1758     return;
1759   }
1760 
1761   // Do not block if Threads lock is busy. This avoids the potential deadlock
1762   // when this code is called from the periodic task, and something else is
1763   // expecting the periodic task to complete without blocking. On the off-chance
1764   // Threads lock is busy momentarily, try to acquire several times.
1765   for (int t = 0; t < 10; t++) {
1766     if (Threads_lock->try_lock()) {
1767       JavaThread::set_force_satb_flush_all_threads(true);
1768       Threads_lock->unlock();
1769 
1770       // The threads are not "acquiring" their thread-local data, but it does not
1771       // hurt to "release" the updates here anyway.
1772       OrderAccess::fence();
1773       break;
1774     }
1775     os::naked_short_sleep(1);
1776   }
1777 }
1778 
1779 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1780   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1781   _gc_state.set_cond(mask, value);
1782   JavaThread::set_gc_state_all_threads(_gc_state.raw_value());
1783 }
1784 
1785 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1786   set_gc_state_mask(MARKING, in_progress);
1787   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1788 }
1789 





1790 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1791   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1792   set_gc_state_mask(EVACUATION, in_progress);
1793 }
1794 
1795 void ShenandoahHeap::ref_processing_init() {
1796   MemRegion mr = reserved_region();
1797 
1798   assert(_max_workers > 0, "Sanity");
1799 
1800   _ref_processor =
1801     new ReferenceProcessor(mr,    // span
1802                            ParallelRefProcEnabled,  // MT processing
1803                            _max_workers,            // Degree of MT processing
1804                            true,                    // MT discovery
1805                            _max_workers,            // Degree of MT discovery
1806                            false,                   // Reference discovery is not atomic
1807                            NULL);                   // No closure, should be installed before use
1808 
1809   shenandoah_assert_rp_isalive_not_installed();


2293 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2294   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2295   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2296   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2297 
2298   try_inject_alloc_failure();
2299   VM_ShenandoahInitUpdateRefs op;
2300   VMThread::execute(&op);
2301 }
2302 
2303 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2304   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2305   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2306   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2307 
2308   try_inject_alloc_failure();
2309   VM_ShenandoahFinalUpdateRefs op;
2310   VMThread::execute(&op);
2311 }
2312 




















2313 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2314   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2315   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2316   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2317 
2318   try_inject_alloc_failure();
2319   VM_ShenandoahFullGC op(cause);
2320   VMThread::execute(&op);
2321 }
2322 
2323 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2324   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2325   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2326   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2327 
2328   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2329   VMThread::execute(&degenerated_gc);
2330 }
2331 
2332 void ShenandoahHeap::entry_init_mark() {


2381   // No workers used in this phase, no setup required
2382 
2383   op_init_updaterefs();
2384 }
2385 
2386 void ShenandoahHeap::entry_final_updaterefs() {
2387   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2388   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2389 
2390   static const char* msg = "Pause Final Update Refs";
2391   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2392   EventMark em("%s", msg);
2393 
2394   ShenandoahWorkerScope scope(workers(),
2395                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2396                               "final reference update");
2397 
2398   op_final_updaterefs();
2399 }
2400 






























2401 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2402   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2403   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2404 
2405   static const char* msg = "Pause Full";
2406   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2407   EventMark em("%s", msg);
2408 
2409   ShenandoahWorkerScope scope(workers(),
2410                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2411                               "full gc");
2412 
2413   op_full(cause);
2414 }
2415 
2416 void ShenandoahHeap::entry_degenerated(int point) {
2417   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2418   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2419 
2420   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;


2506 }
2507 
2508 void ShenandoahHeap::entry_preclean() {
2509   if (ShenandoahPreclean && process_references()) {
2510     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2511 
2512     static const char* msg = "Concurrent precleaning";
2513     GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2514     EventMark em("%s", msg);
2515 
2516     ShenandoahWorkerScope scope(workers(),
2517                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2518                                 "concurrent preclean",
2519                                 /* check_workers = */ false);
2520 
2521     try_inject_alloc_failure();
2522     op_preclean();
2523   }
2524 }
2525 















2526 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2527   static const char *msg = "Concurrent uncommit";
2528   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2529   EventMark em("%s", msg);
2530 
2531   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2532 
2533   op_uncommit(shrink_before);
2534 }
2535 
2536 void ShenandoahHeap::try_inject_alloc_failure() {
2537   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2538     _inject_alloc_failure.set();
2539     os::naked_short_sleep(1);
2540     if (cancelled_gc()) {
2541       log_info(gc)("Allocation failure was successfully injected");
2542     }
2543   }
2544 }
2545 


2634     return "Concurrent marking (update refs) (process weakrefs)";
2635   } else if (update_refs && unload_cls) {
2636     return "Concurrent marking (update refs) (unload classes)";
2637   } else if (proc_refs && unload_cls) {
2638     return "Concurrent marking (process weakrefs) (unload classes)";
2639   } else if (update_refs) {
2640     return "Concurrent marking (update refs)";
2641   } else if (proc_refs) {
2642     return "Concurrent marking (process weakrefs)";
2643   } else if (unload_cls) {
2644     return "Concurrent marking (unload classes)";
2645   } else {
2646     return "Concurrent marking";
2647   }
2648 }
2649 
2650 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2651   switch (point) {
2652     case _degenerated_unset:
2653       return "Pause Degenerated GC (<UNSET>)";


2654     case _degenerated_outside_cycle:
2655       return "Pause Degenerated GC (Outside of Cycle)";
2656     case _degenerated_mark:
2657       return "Pause Degenerated GC (Mark)";
2658     case _degenerated_evac:
2659       return "Pause Degenerated GC (Evacuation)";
2660     case _degenerated_updaterefs:
2661       return "Pause Degenerated GC (Update Refs)";
2662     default:
2663       ShouldNotReachHere();
2664       return "ERROR";
2665   }
2666 }
2667 
2668 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2669 #ifdef ASSERT
2670   assert(_liveness_cache != NULL, "sanity");
2671   assert(worker_id < _max_workers, "sanity");
2672   for (uint i = 0; i < num_regions(); i++) {
2673     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");


  34 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
  42 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  43 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp"
  44 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc_implementation/shenandoah/shenandoahMetrics.hpp"
  48 #include "gc_implementation/shenandoah/shenandoahNormalMode.hpp"
  49 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  50 #include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp"
  51 #include "gc_implementation/shenandoah/shenandoahPassiveMode.hpp"
  52 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  53 #include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"
  54 #include "gc_implementation/shenandoah/shenandoahTraversalMode.hpp"
  55 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  56 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
  57 #include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp"
  58 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp"
  59 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  60 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
  61 
  62 #include "memory/metaspace.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "services/mallocTracker.hpp"
  65 
  66 #ifdef ASSERT
  67 template <class T>
  68 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  69   T o = oopDesc::load_heap_oop(p);
  70   if (! oopDesc::is_null(o)) {
  71     oop obj = oopDesc::decode_heap_oop_not_null(o);
  72     shenandoah_assert_not_forwarded(p, obj);
  73   }
  74 }


 341                                                SATB_Q_FL_lock,
 342                                                20 /*G1SATBProcessCompletedThreshold */,
 343                                                Shared_SATB_Q_lock);
 344 
 345   _monitoring_support = new ShenandoahMonitoringSupport(this);
 346   _phase_timings = new ShenandoahPhaseTimings();
 347   ShenandoahStringDedup::initialize();
 348   ShenandoahCodeRoots::initialize();
 349 
 350   if (ShenandoahAllocationTrace) {
 351     _alloc_tracker = new ShenandoahAllocTracker();
 352   }
 353 
 354   if (ShenandoahPacing) {
 355     _pacer = new ShenandoahPacer(this);
 356     _pacer->setup_for_idle();
 357   } else {
 358     _pacer = NULL;
 359   }
 360 
 361   _traversal_gc = strcmp(ShenandoahGCMode, "traversal") == 0 ?
 362                   new ShenandoahTraversalGC(this, _num_regions) :
 363                   NULL;
 364 
 365   _control_thread = new ShenandoahControlThread();
 366 
 367   log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
 368                      byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
 369                      byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
 370                      byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
 371   );
 372 
 373   return JNI_OK;
 374 }
 375 
 376 #ifdef _MSC_VER
 377 #pragma warning( push )
 378 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 379 #endif
 380 
 381 void ShenandoahHeap::initialize_heuristics() {
 382   if (ShenandoahGCMode != NULL) {
 383     if (strcmp(ShenandoahGCMode, "traversal") == 0) {
 384       _gc_mode = new ShenandoahTraversalMode();
 385     } else if (strcmp(ShenandoahGCMode, "normal") == 0) {
 386       _gc_mode = new ShenandoahNormalMode();
 387     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 388       _gc_mode = new ShenandoahPassiveMode();
 389     } else {
 390       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 391     }
 392   } else {
 393     ShouldNotReachHere();
 394   }
 395   _gc_mode->initialize_flags();
 396   _heuristics = _gc_mode->initialize_heuristics();
 397 
 398   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 399     vm_exit_during_initialization(
 400             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 401                     _heuristics->name()));
 402   }
 403   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 404     vm_exit_during_initialization(
 405             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 406                     _heuristics->name()));
 407   }
 408   log_info(gc, init)("Shenandoah heuristics: %s",
 409                      _heuristics->name());
 410 }
 411 
 412 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 413   SharedHeap(policy),
 414   _shenandoah_policy(policy),
 415   _heap_region_special(false),
 416   _regions(NULL),
 417   _free_set(NULL),
 418   _collection_set(NULL),
 419   _traversal_gc(NULL),
 420   _update_refs_iterator(this),
 421   _bytes_allocated_since_gc_start(0),
 422   _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),
 423   _ref_processor(NULL),
 424   _marking_context(NULL),
 425   _bitmap_size(0),
 426   _bitmap_regions_per_slice(0),
 427   _bitmap_bytes_per_slice(0),
 428   _bitmap_region_special(false),
 429   _aux_bitmap_region_special(false),
 430   _liveness_cache(NULL),
 431   _aux_bit_map(),
 432   _verifier(NULL),
 433   _pacer(NULL),
 434   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 435   _phase_timings(NULL),
 436   _alloc_tracker(NULL)
 437 {
 438   log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 439   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 440 
 441   _scm = new ShenandoahConcurrentMark();
 442 
 443   _full_gc = new ShenandoahMarkCompact();
 444   _used = 0;
 445 
 446   _max_workers = MAX2(_max_workers, 1U);
 447 
 448   // SharedHeap did not initialize this for us, and we want our own workgang anyway.
 449   assert(SharedHeap::_workers == NULL && _workers == NULL, "Should not be initialized yet");
 450   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 451                             /* are_GC_task_threads */true,
 452                             /* are_ConcurrentGC_threads */false);
 453   if (_workers == NULL) {
 454     vm_exit_during_initialization("Failed necessary allocation.");
 455   } else {
 456     _workers->initialize_workers();
 457   }
 458   assert(SharedHeap::_workers == _workers, "Sanity: initialized the correct field");
 459 }
 460 
 461 #ifdef _MSC_VER
 462 #pragma warning( pop )


 490   ShenandoahResetBitmapTask task;
 491   _workers->run_task(&task);
 492 }
 493 
 494 void ShenandoahHeap::print_on(outputStream* st) const {
 495   st->print_cr("Shenandoah Heap");
 496   st->print_cr(" " SIZE_FORMAT "%s total, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 497                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 498                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 499                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 500   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 501                num_regions(),
 502                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 503                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 504 
 505   st->print("Status: ");
 506   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 507   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 508   if (is_evacuation_in_progress())           st->print("evacuating, ");
 509   if (is_update_refs_in_progress())          st->print("updating refs, ");
 510   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 511   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 512   if (is_full_gc_in_progress())              st->print("full gc, ");
 513   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 514 
 515   if (cancelled_gc()) {
 516     st->print("cancelled");
 517   } else {
 518     st->print("not cancelled");
 519   }
 520   st->cr();
 521 
 522   st->print_cr("Reserved region:");
 523   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 524                p2i(reserved_region().start()),
 525                p2i(reserved_region().end()));
 526 
 527   ShenandoahCollectionSet* cset = collection_set();
 528   st->print_cr("Collection set:");
 529   if (cset != NULL) {
 530     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));


1573   ShenandoahEvacuationTask task(this, _collection_set, false);
1574   workers()->run_task(&task);
1575 }
1576 
1577 void ShenandoahHeap::op_updaterefs() {
1578   update_heap_references(true);
1579 }
1580 
1581 void ShenandoahHeap::op_cleanup() {
1582   free_set()->recycle_trash();
1583 }
1584 
1585 void ShenandoahHeap::op_reset() {
1586   reset_mark_bitmap();
1587 }
1588 
1589 void ShenandoahHeap::op_preclean() {
1590   concurrent_mark()->preclean_weak_refs();
1591 }
1592 
1593 void ShenandoahHeap::op_init_traversal() {
1594   traversal_gc()->init_traversal_collection();
1595 }
1596 
1597 void ShenandoahHeap::op_traversal() {
1598   traversal_gc()->concurrent_traversal_collection();
1599 }
1600 
1601 void ShenandoahHeap::op_final_traversal() {
1602   traversal_gc()->final_traversal_collection();
1603 }
1604 
1605 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1606   ShenandoahMetricsSnapshot metrics;
1607   metrics.snap_before();
1608 
1609   full_gc()->do_it(cause);
1610 
1611   metrics.snap_after();
1612 
1613   if (metrics.is_good_progress()) {
1614     _progress_last_gc.set();
1615   } else {
1616     // Nothing to do. Tell the allocation path that we have failed to make
1617     // progress, and it can finally fail.
1618     _progress_last_gc.unset();
1619   }
1620 }
1621 
1622 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1623   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1624   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1625   // some phase, we have to upgrade the Degenerate GC to Full GC.
1626 
1627   clear_cancelled_gc();
1628 
1629   ShenandoahMetricsSnapshot metrics;
1630   metrics.snap_before();
1631 
1632   switch (point) {
1633     case _degenerated_traversal:
1634       {
1635         // Drop the collection set. Note: this leaves some already forwarded objects
1636         // behind, which may be problematic, see comments for ShenandoahEvacAssist
1637         // workarounds in ShenandoahTraversalHeuristics.
1638 
1639         ShenandoahHeapLocker locker(lock());
1640         collection_set()->clear_current_index();
1641         for (size_t i = 0; i < collection_set()->count(); i++) {
1642           ShenandoahHeapRegion* r = collection_set()->next();
1643           r->make_regular_bypass();
1644         }
1645         collection_set()->clear();
1646       }
1647       op_final_traversal();
1648       op_cleanup();
1649       return;
1650 
1651     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1652     // but enters it at different points, depending on which concurrent phase had
1653     // degenerated.
1654 
1655     case _degenerated_outside_cycle:
1656       // We have degenerated from outside the cycle, which means something is bad with
1657       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1658       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1659       // we can do the most aggressive degen cycle, which includes processing references and
1660       // class unloading, unless those features are explicitly disabled.
1661       //
1662       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1663       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1664       set_process_references(heuristics()->can_process_references());
1665       set_unload_classes(heuristics()->can_unload_classes());
1666 
1667       if (is_traversal_mode()) {
1668         // Not possible to degenerate from here, upgrade to Full GC right away.
1669         cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1670         op_degenerated_fail();
1671         return;
1672       }
1673 
1674       op_reset();
1675 
1676       op_init_mark();
1677       if (cancelled_gc()) {
1678         op_degenerated_fail();
1679         return;
1680       }
1681 
1682     case _degenerated_mark:
1683       op_final_mark();
1684       if (cancelled_gc()) {
1685         op_degenerated_fail();
1686         return;
1687       }
1688 
1689       op_cleanup();
1690 
1691     case _degenerated_evac:
1692       // If heuristics thinks we should do the cycle, this flag would be set,
1693       // and we can do evacuation. Otherwise, it would be the shortcut cycle.


1783   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1784 }
1785 
1786 void ShenandoahHeap::op_degenerated_futile() {
1787   shenandoah_policy()->record_degenerated_upgrade_to_full();
1788   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1789 }
1790 
1791 void ShenandoahHeap::stop_concurrent_marking() {
1792   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1793   if (!cancelled_gc()) {
1794     // If we needed to update refs, and concurrent marking has been cancelled,
1795     // we need to finish updating references.
1796     set_has_forwarded_objects(false);
1797     mark_complete_marking_context();
1798   }
1799   set_concurrent_mark_in_progress(false);
1800 }
1801 
1802 void ShenandoahHeap::force_satb_flush_all_threads() {
1803   if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
1804     // No need to flush SATBs
1805     return;
1806   }
1807 
1808   // Do not block if Threads lock is busy. This avoids the potential deadlock
1809   // when this code is called from the periodic task, and something else is
1810   // expecting the periodic task to complete without blocking. On the off-chance
1811   // Threads lock is busy momentarily, try to acquire several times.
1812   for (int t = 0; t < 10; t++) {
1813     if (Threads_lock->try_lock()) {
1814       JavaThread::set_force_satb_flush_all_threads(true);
1815       Threads_lock->unlock();
1816 
1817       // The threads are not "acquiring" their thread-local data, but it does not
1818       // hurt to "release" the updates here anyway.
1819       OrderAccess::fence();
1820       break;
1821     }
1822     os::naked_short_sleep(1);
1823   }
1824 }
1825 
1826 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1827   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1828   _gc_state.set_cond(mask, value);
1829   JavaThread::set_gc_state_all_threads(_gc_state.raw_value());
1830 }
1831 
1832 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1833   set_gc_state_mask(MARKING, in_progress);
1834   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1835 }
1836 
1837 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1838   set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1839   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1840 }
1841 
1842 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1843   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1844   set_gc_state_mask(EVACUATION, in_progress);
1845 }
1846 
1847 void ShenandoahHeap::ref_processing_init() {
1848   MemRegion mr = reserved_region();
1849 
1850   assert(_max_workers > 0, "Sanity");
1851 
1852   _ref_processor =
1853     new ReferenceProcessor(mr,    // span
1854                            ParallelRefProcEnabled,  // MT processing
1855                            _max_workers,            // Degree of MT processing
1856                            true,                    // MT discovery
1857                            _max_workers,            // Degree of MT discovery
1858                            false,                   // Reference discovery is not atomic
1859                            NULL);                   // No closure, should be installed before use
1860 
1861   shenandoah_assert_rp_isalive_not_installed();


2345 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2346   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2347   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2348   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2349 
2350   try_inject_alloc_failure();
2351   VM_ShenandoahInitUpdateRefs op;
2352   VMThread::execute(&op);
2353 }
2354 
2355 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2356   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2357   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2358   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2359 
2360   try_inject_alloc_failure();
2361   VM_ShenandoahFinalUpdateRefs op;
2362   VMThread::execute(&op);
2363 }
2364 
2365 void ShenandoahHeap::vmop_entry_init_traversal() {
2366   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2367   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2368   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2369 
2370   try_inject_alloc_failure();
2371   VM_ShenandoahInitTraversalGC op;
2372   VMThread::execute(&op);
2373 }
2374 
2375 void ShenandoahHeap::vmop_entry_final_traversal() {
2376   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2377   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2378   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2379 
2380   try_inject_alloc_failure();
2381   VM_ShenandoahFinalTraversalGC op;
2382   VMThread::execute(&op);
2383 }
2384 
2385 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2386   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2387   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2388   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2389 
2390   try_inject_alloc_failure();
2391   VM_ShenandoahFullGC op(cause);
2392   VMThread::execute(&op);
2393 }
2394 
2395 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2396   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2397   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2398   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2399 
2400   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2401   VMThread::execute(&degenerated_gc);
2402 }
2403 
2404 void ShenandoahHeap::entry_init_mark() {


2453   // No workers used in this phase, no setup required
2454 
2455   op_init_updaterefs();
2456 }
2457 
2458 void ShenandoahHeap::entry_final_updaterefs() {
2459   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2460   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2461 
2462   static const char* msg = "Pause Final Update Refs";
2463   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2464   EventMark em("%s", msg);
2465 
2466   ShenandoahWorkerScope scope(workers(),
2467                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2468                               "final reference update");
2469 
2470   op_final_updaterefs();
2471 }
2472 
2473 void ShenandoahHeap::entry_init_traversal() {
2474   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2475   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2476 
2477   static const char* msg = "Pause Init Traversal";
2478   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2479   EventMark em("%s", msg);
2480 
2481   ShenandoahWorkerScope scope(workers(),
2482                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2483                               "init traversal");
2484 
2485   op_init_traversal();
2486 }
2487 
2488 void ShenandoahHeap::entry_final_traversal() {
2489   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2490   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2491 
2492   static const char* msg = "Pause Final Traversal";
2493   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2494   EventMark em("%s", msg);
2495 
2496   ShenandoahWorkerScope scope(workers(),
2497                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2498                               "final traversal");
2499 
2500   op_final_traversal();
2501 }
2502 
2503 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2504   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2505   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2506 
2507   static const char* msg = "Pause Full";
2508   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2509   EventMark em("%s", msg);
2510 
2511   ShenandoahWorkerScope scope(workers(),
2512                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2513                               "full gc");
2514 
2515   op_full(cause);
2516 }
2517 
2518 void ShenandoahHeap::entry_degenerated(int point) {
2519   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2520   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2521 
2522   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;


2608 }
2609 
2610 void ShenandoahHeap::entry_preclean() {
2611   if (ShenandoahPreclean && process_references()) {
2612     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2613 
2614     static const char* msg = "Concurrent precleaning";
2615     GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2616     EventMark em("%s", msg);
2617 
2618     ShenandoahWorkerScope scope(workers(),
2619                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2620                                 "concurrent preclean",
2621                                 /* check_workers = */ false);
2622 
2623     try_inject_alloc_failure();
2624     op_preclean();
2625   }
2626 }
2627 
2628 void ShenandoahHeap::entry_traversal() {
2629   static const char* msg = "Concurrent traversal";
2630   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2631   EventMark em("%s", msg);
2632 
2633   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2634 
2635   ShenandoahWorkerScope scope(workers(),
2636                               ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
2637                               "concurrent traversal");
2638 
2639   try_inject_alloc_failure();
2640   op_traversal();
2641 }
2642 
2643 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2644   static const char *msg = "Concurrent uncommit";
2645   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2646   EventMark em("%s", msg);
2647 
2648   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2649 
2650   op_uncommit(shrink_before);
2651 }
2652 
2653 void ShenandoahHeap::try_inject_alloc_failure() {
2654   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2655     _inject_alloc_failure.set();
2656     os::naked_short_sleep(1);
2657     if (cancelled_gc()) {
2658       log_info(gc)("Allocation failure was successfully injected");
2659     }
2660   }
2661 }
2662 


2751     return "Concurrent marking (update refs) (process weakrefs)";
2752   } else if (update_refs && unload_cls) {
2753     return "Concurrent marking (update refs) (unload classes)";
2754   } else if (proc_refs && unload_cls) {
2755     return "Concurrent marking (process weakrefs) (unload classes)";
2756   } else if (update_refs) {
2757     return "Concurrent marking (update refs)";
2758   } else if (proc_refs) {
2759     return "Concurrent marking (process weakrefs)";
2760   } else if (unload_cls) {
2761     return "Concurrent marking (unload classes)";
2762   } else {
2763     return "Concurrent marking";
2764   }
2765 }
2766 
2767 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2768   switch (point) {
2769     case _degenerated_unset:
2770       return "Pause Degenerated GC (<UNSET>)";
2771     case _degenerated_traversal:
2772       return "Pause Degenerated GC (Traversal)";
2773     case _degenerated_outside_cycle:
2774       return "Pause Degenerated GC (Outside of Cycle)";
2775     case _degenerated_mark:
2776       return "Pause Degenerated GC (Mark)";
2777     case _degenerated_evac:
2778       return "Pause Degenerated GC (Evacuation)";
2779     case _degenerated_updaterefs:
2780       return "Pause Degenerated GC (Update Refs)";
2781     default:
2782       ShouldNotReachHere();
2783       return "ERROR";
2784   }
2785 }
2786 
2787 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2788 #ifdef ASSERT
2789   assert(_liveness_cache != NULL, "sanity");
2790   assert(worker_id < _max_workers, "sanity");
2791   for (uint i = 0; i < num_regions(); i++) {
2792     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
< prev index next >