< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 59271 : 8240870: Shenandoah: merge evac and update phases
Reviewed-by: XXX


 907 
 908   // Start full GC
 909   collect(GCCause::_metadata_GC_clear_soft_refs);
 910 
 911   // Retry allocation
 912   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 913   if (result != NULL) {
 914     return result;
 915   }
 916 
 917   // Expand and retry allocation
 918   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 919   if (result != NULL) {
 920     return result;
 921   }
 922 
 923   // Out of memory
 924   return NULL;
 925 }
 926 























































 927 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 928 private:
 929   ShenandoahHeap* const _heap;
 930   Thread* const _thread;
 931 public:
 932   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 933     _heap(heap), _thread(Thread::current()) {}
 934 
 935   void do_object(oop p) {
 936     shenandoah_assert_marked(NULL, p);
 937     if (!p->is_forwarded()) {
 938       _heap->evacuate_object(p, _thread);
 939     }
 940   }
 941 };
 942 
 943 class ShenandoahEvacuationTask : public AbstractGangTask {
 944 private:
 945   ShenandoahHeap* const _sh;
 946   ShenandoahCollectionSet* const _cs;
 947   bool _concurrent;
 948 public:
 949   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 950                            ShenandoahCollectionSet* cs,
 951                            bool concurrent) :
 952     AbstractGangTask("Parallel Evacuation Task"),
 953     _sh(sh),
 954     _cs(cs),
 955     _concurrent(concurrent)
 956   {}
 957 
 958   void work(uint worker_id) {
 959     if (_concurrent) {
 960       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 961       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 962       ShenandoahEvacOOMScope oom_evac_scope;
 963       do_work();
 964     } else {
 965       ShenandoahParallelWorkerSession worker_session(worker_id);
 966       ShenandoahEvacOOMScope oom_evac_scope;
 967       do_work();
 968     }
 969   }
 970 
 971 private:
 972   void do_work() {
 973     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 974     ShenandoahHeapRegion* r;
 975     while ((r =_cs->claim_next()) != NULL) {
 976       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 977       _sh->marked_object_iterate(r, &cl);

 978 












 979       if (ShenandoahPacing) {
 980         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 981       }
 982 
 983       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 984         break;
 985       }

 986     }
 987   }
 988 };
 989 
 990 void ShenandoahHeap::trash_cset_regions() {
 991   ShenandoahHeapLocker locker(lock());
 992 
 993   ShenandoahCollectionSet* set = collection_set();
 994   ShenandoahHeapRegion* r;
 995   set->clear_current_index();
 996   while ((r = set->next()) != NULL) {
 997     r->make_trash();
 998   }
 999   collection_set()->clear();
1000 }
1001 
1002 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1003   st->print_cr("Heap Regions:");
1004   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1005   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");


1553       prepare_concurrent_unloading();
1554     }
1555 
1556     // If collection set has candidates, start evacuation.
1557     // Otherwise, bypass the rest of the cycle.
1558     if (!collection_set()->is_empty()) {
1559       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1560 
1561       if (ShenandoahVerify) {
1562         verifier()->verify_before_evacuation();
1563       }
1564 
1565       set_evacuation_in_progress(true);
1566       // From here on, we need to update references.
1567       set_has_forwarded_objects(true);
1568 
1569       if (!is_degenerated_gc_in_progress()) {
1570         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1571           ShenandoahCodeRoots::arm_nmethods();
1572         }
1573         evacuate_and_update_roots();
1574       }
1575 
1576       if (ShenandoahPacing) {
1577         pacer()->setup_for_evac();
1578       }
1579 
1580       if (ShenandoahVerify) {
1581         ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
1582         if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1583           types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
1584           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
1585           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots);
1586         }
1587 
1588         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1589           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
1590         }
1591         verifier()->verify_roots_no_forwarded_except(types);
1592         verifier()->verify_during_evacuation();
1593       }





1594     } else {
1595       if (ShenandoahVerify) {
1596         verifier()->verify_after_concmark();
1597       }
1598 
1599       if (VerifyAfterGC) {
1600         Universe::verify();
1601       }
1602     }
1603 
1604   } else {
1605     // If this cycle was updating references, we need to keep the has_forwarded_objects
1606     // flag on, for subsequent phases to deal with it.
1607     concurrent_mark()->cancel();
1608     set_concurrent_mark_in_progress(false);
1609 
1610     if (process_references()) {
1611       // Abandon reference processing right away: pre-cleaning must have failed.
1612       ReferenceProcessor *rp = ref_processor();
1613       rp->disable_discovery();
1614       rp->abandon_partial_discovery();
1615       rp->verify_no_references_recorded();
1616     }
1617   }
1618 }
1619 
1620 void ShenandoahHeap::op_conc_evac() {
1621   ShenandoahEvacuationTask task(this, _collection_set, true);
1622   workers()->run_task(&task);
1623 }
1624 
1625 void ShenandoahHeap::op_stw_evac() {
1626   ShenandoahEvacuationTask task(this, _collection_set, false);
1627   workers()->run_task(&task);
1628 }
1629 
1630 void ShenandoahHeap::op_updaterefs() {
1631   update_heap_references(true);
1632 }
1633 
1634 void ShenandoahHeap::op_cleanup_early() {
1635   free_set()->recycle_trash();
1636 }
1637 
1638 void ShenandoahHeap::op_cleanup_complete() {
1639   free_set()->recycle_trash();
1640 }
1641 
1642 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1643 private:
1644   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1645   ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1646   ShenandoahConcurrentStringDedupRoots          _dedup_roots;
1647 
1648 public:
1649   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1650     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
1651     _vm_roots(phase),
1652     _cld_roots(phase) {}
1653 


1695 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
1696   _heap(ShenandoahHeap::heap()),
1697   _mark_context(ShenandoahHeap::heap()->marking_context()),
1698   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
1699   _thread(Thread::current()),
1700   _dead_counter(0) {
1701 }
1702 
1703 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
1704   const oop obj = RawAccess<>::oop_load(p);
1705   if (!CompressedOops::is_null(obj)) {
1706     if (!_mark_context->is_marked(obj)) {
1707       shenandoah_assert_correct(p, obj);
1708       oop old = Atomic::cmpxchg(p, obj, oop(NULL));
1709       if (obj == old) {
1710         _dead_counter ++;
1711       }
1712     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
1713       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1714       if (resolved == obj) {
1715         resolved = _heap->evacuate_object(obj, _thread);
1716       }
1717       Atomic::cmpxchg(p, obj, resolved);
1718       assert(_heap->cancelled_gc() ||
1719              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
1720              "Sanity");
1721     }
1722   }
1723 }
1724 
1725 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
1726   ShouldNotReachHere();
1727 }
1728 
1729 size_t ShenandoahEvacUpdateCleanupOopStorageRootsClosure::dead_counter() const {
1730   return _dead_counter;
1731 }
1732 
1733 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::reset_dead_counter() {
1734   _dead_counter = 0;
1735 }


1944         op_degenerated_fail();
1945         return;
1946       }
1947 
1948     case _degenerated_mark:
1949       op_final_mark();
1950       if (cancelled_gc()) {
1951         op_degenerated_fail();
1952         return;
1953       }
1954 
1955       if (!has_forwarded_objects() && ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
1956         // Disarm nmethods that armed for concurrent mark. On normal cycle, it would
1957         // be disarmed while conc-roots phase is running.
1958         // TODO: Call op_conc_roots() here instead
1959         ShenandoahCodeRoots::disarm_nmethods();
1960       }
1961 
1962       op_cleanup_early();
1963 
1964     case _degenerated_evac:
1965       // If heuristics thinks we should do the cycle, this flag would be set,
1966       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1967       if (is_evacuation_in_progress()) {
1968 







1969         // Degeneration under oom-evac protocol might have left some objects in
1970         // collection set un-evacuated. Restart evacuation from the beginning to
1971         // capture all objects. For all the objects that are already evacuated,
1972         // it would be a simple check, which is supposed to be fast. This is also
1973         // safe to do even without degeneration, as CSet iterator is at beginning
1974         // in preparation for evacuation anyway.
1975         //
1976         // Before doing that, we need to make sure we never had any cset-pinned
1977         // regions. This may happen if allocation failure happened when evacuating
1978         // the about-to-be-pinned object, oom-evac protocol left the object in
1979         // the collection set, and then the pin reached the cset region. If we continue
1980         // the cycle here, we would trash the cset and alive objects in it. To avoid
1981         // it, we fail degeneration right away and slide into Full GC to recover.
1982 
1983         {
1984           sync_pinned_region_status();
1985           collection_set()->clear_current_index();
1986 
1987           ShenandoahHeapRegion* r;
1988           while ((r = collection_set()->next()) != NULL) {
1989             if (r->is_pinned()) {
1990               cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1991               op_degenerated_fail();
1992               return;
1993             }
1994           }
1995 
1996           collection_set()->clear_current_index();
1997         }
1998 
1999         op_stw_evac();
2000         if (cancelled_gc()) {
2001           op_degenerated_fail();
2002           return;
2003         }
2004       }
2005 
2006       // If heuristics thinks we should do the cycle, this flag would be set,
2007       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
2008       if (has_forwarded_objects()) {
2009         op_init_updaterefs();
2010         if (cancelled_gc()) {
2011           op_degenerated_fail();
2012           return;
2013         }
2014       }
2015 
2016     case _degenerated_updaterefs:
2017       if (has_forwarded_objects()) {
2018         op_final_updaterefs();
2019         if (cancelled_gc()) {
2020           op_degenerated_fail();
2021           return;
2022         }
2023       }
2024 
2025       op_cleanup_complete();
2026       break;
2027 
2028     default:
2029       ShouldNotReachHere();
2030   }
2031 
2032   if (ShenandoahVerify) {
2033     verifier()->verify_after_degenerated();
2034   }
2035 
2036   if (VerifyAfterGC) {
2037     Universe::verify();
2038   }


2420       // Use ParallelGCThreads inside safepoints
2421       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2422     }
2423   } else {
2424     if (UseDynamicNumberOfGCThreads) {
2425       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2426     } else {
2427       // Use ConcGCThreads outside safepoints
2428       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2429     }
2430   }
2431 }
2432 #endif
2433 
2434 ShenandoahVerifier* ShenandoahHeap::verifier() {
2435   guarantee(ShenandoahVerify, "Should be enabled");
2436   assert (_verifier != NULL, "sanity");
2437   return _verifier;
2438 }
2439 
2440 template<class T>
2441 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2442 private:
2443   T cl;
2444   ShenandoahHeap* _heap;
2445   ShenandoahRegionIterator* _regions;
2446   bool _concurrent;
2447 public:
2448   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2449     AbstractGangTask("Concurrent Update References Task"),
2450     cl(T()),
2451     _heap(ShenandoahHeap::heap()),
2452     _regions(regions),
2453     _concurrent(concurrent) {
2454   }
2455 
2456   void work(uint worker_id) {
2457     if (_concurrent) {
2458       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2459       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2460       do_work();
2461     } else {
2462       ShenandoahParallelWorkerSession worker_session(worker_id);
2463       do_work();
2464     }
2465   }
2466 
2467 private:
2468   void do_work() {
2469     ShenandoahHeapRegion* r = _regions->next();
2470     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2471     while (r != NULL) {
2472       HeapWord* update_watermark = r->get_update_watermark();
2473       assert (update_watermark >= r->bottom(), "sanity");
2474       if (r->is_active() && !r->is_cset()) {
2475         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2476       }
2477       if (ShenandoahPacing) {
2478         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2479       }
2480       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2481         return;
2482       }
2483       r = _regions->next();
2484     }
2485   }
2486 };
2487 
2488 void ShenandoahHeap::update_heap_references(bool concurrent) {
2489   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2490   workers()->run_task(&task);
2491 }
2492 
2493 void ShenandoahHeap::op_init_updaterefs() {
2494   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2495 
2496   set_evacuation_in_progress(false);
2497 
2498   {
2499     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_retire_gclabs);
2500     retire_and_reset_gclabs();
2501   }
2502 
2503   if (ShenandoahVerify) {
2504     if (!is_degenerated_gc_in_progress()) {
2505       verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2506     }
2507     verifier()->verify_before_updaterefs();
2508   }
2509 
2510   set_update_refs_in_progress(true);
2511 
2512   _update_refs_iterator.reset();
2513 
2514   if (ShenandoahPacing) {
2515     pacer()->setup_for_updaterefs();
2516   }
2517 }
2518 
2519 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2520 private:
2521   ShenandoahHeapLock* const _lock;
2522 
2523 public:
2524   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2525 
2526   void heap_region_do(ShenandoahHeapRegion* r) {
2527     // Drop unnecessary "pinned" state from regions that does not have CP marks
2528     // anymore, as this would allow trashing them.
2529 
2530     if (r->is_active()) {
2531       if (r->is_pinned()) {
2532         if (r->pin_count() == 0) {
2533           ShenandoahHeapLocker locker(_lock);
2534           r->make_unpinned();
2535         }
2536       } else {
2537         if (r->pin_count() > 0) {
2538           ShenandoahHeapLocker locker(_lock);
2539           r->make_pinned();
2540         }
2541       }
2542     }
2543   }
2544 
2545   bool is_thread_safe() { return true; }
2546 };
2547 
2548 void ShenandoahHeap::op_final_updaterefs() {
2549   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2550 


2551   finish_concurrent_unloading();
2552 
2553   // Check if there is left-over work, and finish it
2554   if (_update_refs_iterator.has_next()) {
2555     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
2556 
2557     // Finish updating references where we left off.
2558     clear_cancelled_gc();
2559     update_heap_references(false);
2560   }
2561 
2562   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2563   // everything. On degenerated paths, cancelled gc would not be set anyway.
2564   if (cancelled_gc()) {
2565     clear_cancelled_gc();
2566   }
2567   assert(!cancelled_gc(), "Should have been done right before");
2568 
2569   if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
2570     verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2571   }
2572 
2573   if (is_degenerated_gc_in_progress()) {
2574     concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2575   } else {
2576     concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2577   }
2578 
2579   // Has to be done before cset is clear
2580   if (ShenandoahVerify) {
2581     verifier()->verify_roots_in_to_space();
2582   }
2583 
2584   {
2585     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
2586     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2587     parallel_heap_region_iterate(&cl);
2588 
2589     assert_pinned_region_status();
2590   }
2591 
2592   {
2593     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
2594     trash_cset_regions();
2595   }
2596 
2597   set_has_forwarded_objects(false);
2598   set_update_refs_in_progress(false);
2599 
2600   if (ShenandoahVerify) {
2601     verifier()->verify_after_updaterefs();
2602   }
2603 
2604   if (VerifyAfterGC) {
2605     Universe::verify();
2606   }
2607 
2608   {
2609     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
2610     ShenandoahHeapLocker locker(lock());
2611     _free_set->rebuild();
2612   }
2613 }
2614 
2615 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2616   print_on(st);
2617   print_heap_regions_on(st);
2618 }
2619 
2620 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2621   size_t slice = r->index() / _bitmap_regions_per_slice;
2622 
2623   size_t regions_from = _bitmap_regions_per_slice * slice;
2624   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2625   for (size_t g = regions_from; g < regions_to; g++) {
2626     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2627     if (skip_self && g == r->index()) continue;
2628     if (get_region(g)->is_committed()) {
2629       return true;


2693 }
2694 
2695 void ShenandoahHeap::vmop_entry_init_mark() {
2696   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2697   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
2698 
2699   try_inject_alloc_failure();
2700   VM_ShenandoahInitMark op;
2701   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2702 }
2703 
2704 void ShenandoahHeap::vmop_entry_final_mark() {
2705   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2706   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
2707 
2708   try_inject_alloc_failure();
2709   VM_ShenandoahFinalMarkStartEvac op;
2710   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2711 }
2712 
2713 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2714   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2715   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
2716 
2717   try_inject_alloc_failure();
2718   VM_ShenandoahInitUpdateRefs op;
2719   VMThread::execute(&op);
2720 }
2721 
2722 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2723   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2724   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
2725 
2726   try_inject_alloc_failure();
2727   VM_ShenandoahFinalUpdateRefs op;
2728   VMThread::execute(&op);
2729 }
2730 
2731 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2732   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2733   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
2734 
2735   try_inject_alloc_failure();
2736   VM_ShenandoahFullGC op(cause);
2737   VMThread::execute(&op);
2738 }
2739 
2740 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2741   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2742   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
2743 
2744   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2745   VMThread::execute(&degenerated_gc);
2746 }
2747 


2752 
2753   ShenandoahWorkerScope scope(workers(),
2754                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2755                               "init marking");
2756 
2757   op_init_mark();
2758 }
2759 
2760 void ShenandoahHeap::entry_final_mark() {
2761   const char* msg = final_mark_event_message();
2762   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
2763   EventMark em("%s", msg);
2764 
2765   ShenandoahWorkerScope scope(workers(),
2766                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2767                               "final marking");
2768 
2769   op_final_mark();
2770 }
2771 
2772 void ShenandoahHeap::entry_init_updaterefs() {
2773   static const char* msg = "Pause Init Update Refs";
2774   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
2775   EventMark em("%s", msg);
2776 
2777   // No workers used in this phase, no setup required
2778 
2779   op_init_updaterefs();
2780 }
2781 
2782 void ShenandoahHeap::entry_final_updaterefs() {
2783   static const char* msg = "Pause Final Update Refs";
2784   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
2785   EventMark em("%s", msg);
2786 
2787   ShenandoahWorkerScope scope(workers(),
2788                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2789                               "final reference update");
2790 
2791   op_final_updaterefs();
2792 }
2793 
2794 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2795   static const char* msg = "Pause Full";
2796   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
2797   EventMark em("%s", msg);
2798 
2799   ShenandoahWorkerScope scope(workers(),
2800                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2801                               "full gc");
2802 
2803   op_full(cause);
2804 }
2805 
2806 void ShenandoahHeap::entry_degenerated(int point) {
2807   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2808   const char* msg = degen_event_message(dpoint);
2809   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
2810   EventMark em("%s", msg);
2811 


2816   set_degenerated_gc_in_progress(true);
2817   op_degenerated(dpoint);
2818   set_degenerated_gc_in_progress(false);
2819 }
2820 
2821 void ShenandoahHeap::entry_mark() {
2822   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2823 
2824   const char* msg = conc_mark_event_message();
2825   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
2826   EventMark em("%s", msg);
2827 
2828   ShenandoahWorkerScope scope(workers(),
2829                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2830                               "concurrent marking");
2831 
2832   try_inject_alloc_failure();
2833   op_mark();
2834 }
2835 
2836 void ShenandoahHeap::entry_evac() {
2837   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2838 
2839   static const char* msg = "Concurrent evacuation";
2840   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
2841   EventMark em("%s", msg);
2842 
2843   ShenandoahWorkerScope scope(workers(),
2844                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2845                               "concurrent evacuation");
2846 
2847   try_inject_alloc_failure();
2848   op_conc_evac();
2849 }
2850 
2851 void ShenandoahHeap::entry_updaterefs() {
2852   static const char* msg = "Concurrent update references";
2853   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
2854   EventMark em("%s", msg);
2855 
2856   ShenandoahWorkerScope scope(workers(),
2857                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2858                               "concurrent reference update");
2859 
2860   try_inject_alloc_failure();
2861   op_updaterefs();
2862 }
2863 
2864 void ShenandoahHeap::entry_weak_roots() {
2865   static const char* msg = "Concurrent weak roots";
2866   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
2867   EventMark em("%s", msg);
2868 
2869   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots);
2870 
2871   ShenandoahWorkerScope scope(workers(),
2872                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2873                               "concurrent weak root");
2874 
2875   try_inject_alloc_failure();
2876   op_weak_roots();
2877 }
2878 
2879 void ShenandoahHeap::entry_class_unloading() {
2880   static const char* msg = "Concurrent class unloading";
2881   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);


3078 
3079   if (proc_refs && unload_cls) {
3080     return "Concurrent marking (process weakrefs) (unload classes)";
3081   } else if (proc_refs) {
3082     return "Concurrent marking (process weakrefs)";
3083   } else if (unload_cls) {
3084     return "Concurrent marking (unload classes)";
3085   } else {
3086     return "Concurrent marking";
3087   }
3088 }
3089 
3090 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
3091   switch (point) {
3092     case _degenerated_unset:
3093       return "Pause Degenerated GC (<UNSET>)";
3094     case _degenerated_outside_cycle:
3095       return "Pause Degenerated GC (Outside of Cycle)";
3096     case _degenerated_mark:
3097       return "Pause Degenerated GC (Mark)";
3098     case _degenerated_evac:
3099       return "Pause Degenerated GC (Evacuation)";
3100     case _degenerated_updaterefs:
3101       return "Pause Degenerated GC (Update Refs)";
3102     default:
3103       ShouldNotReachHere();
3104       return "ERROR";
3105   }
3106 }
3107 
3108 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
3109 #ifdef ASSERT
3110   assert(_liveness_cache != NULL, "sanity");
3111   assert(worker_id < _max_workers, "sanity");
3112   for (uint i = 0; i < num_regions(); i++) {
3113     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
3114   }
3115 #endif
3116   return _liveness_cache[worker_id];
3117 }
3118 
3119 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
3120   assert(worker_id < _max_workers, "sanity");
3121   assert(_liveness_cache != NULL, "sanity");


 907 
 908   // Start full GC
 909   collect(GCCause::_metadata_GC_clear_soft_refs);
 910 
 911   // Retry allocation
 912   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 913   if (result != NULL) {
 914     return result;
 915   }
 916 
 917   // Expand and retry allocation
 918   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 919   if (result != NULL) {
 920     return result;
 921   }
 922 
 923   // Out of memory
 924   return NULL;
 925 }
 926 
 927 typedef Stack<oop, mtGC> ShenandoahEvacUpdateStack;
 928 
 929 class ShenandoahEvacuateContentsOopClosure : public OopIterateClosure {
 930 private:
 931   ShenandoahHeap* const _heap;
 932   ShenandoahEvacUpdateStack* const _stack;
 933 
 934   template<class T>
 935   void do_work(T* p) {
 936     oop fwd = _heap->evac_update_with_forwarded(p);
 937     if (fwd != NULL) {
 938       _stack->push(fwd);
 939     }
 940   }
 941 
 942 public:
 943   ShenandoahEvacuateContentsOopClosure(ShenandoahEvacUpdateStack* stack) : _heap(ShenandoahHeap::heap()), _stack(stack) {}
 944 
 945   void do_oop(oop* p)       { do_work(p); }
 946   void do_oop(narrowOop* p) { do_work(p); }
 947 
 948   virtual bool do_metadata() { return false; }
 949   virtual void do_klass(Klass *k) { }
 950   virtual void do_cld(ClassLoaderData *cld) { }
 951 };
 952 
 953 template<class T>
 954 oop ShenandoahHeap::evacuate_object_recursively(oop obj, T* load_addr, Thread* thread) {
 955   assert(load_addr != NULL || !is_in(load_addr), "Should know the on-heap address.");
 956 
 957   oop fwd = obj;
 958   if (in_collection_set(obj)) {
 959     fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 960     if (obj == fwd) {
 961       fwd = evacuate_object(obj, thread);
 962     }
 963     // TODO: note, we also filter out-of-heap stores
 964     if (load_addr != NULL && fwd != obj && is_in(load_addr)) {
 965       ShenandoahHeap::cas_oop(fwd, load_addr, obj);
 966     }
 967   }
 968 
 969   Stack<oop, mtGC> stack;
 970 
 971   ShenandoahEvacuateContentsOopClosure cl(&stack);
 972   fwd->oop_iterate(&cl);
 973 
 974   while (!stack.is_empty()) {
 975     oop obj = stack.pop();
 976     obj->oop_iterate(&cl);
 977   }
 978   
 979   return fwd;
 980 }
 981 
 982 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 983 private:
 984   ShenandoahHeap* const _heap;
 985   Thread* const _thread;
 986 public:
 987   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 988     _heap(heap), _thread(Thread::current()) {}
 989 
 990   void do_object(oop p) {
 991     shenandoah_assert_not_in_cset(NULL, p);
 992 
 993     // fine to pass NULL as source address here, since the object is never in collection set
 994     _heap->evacuate_object_recursively(p, (oop*)NULL, _thread);
 995   }
 996 };
 997 
 998 class ShenandoahEvacuationTask : public AbstractGangTask {
 999 private:
1000   ShenandoahHeap* const _sh;
1001   ShenandoahRegionIterator* const _regions;
1002   bool _concurrent;
1003 public:
1004   ShenandoahEvacuationTask(ShenandoahHeap* sh,
1005                            ShenandoahRegionIterator* regions,
1006                            bool concurrent) :
1007     AbstractGangTask("Parallel Evacuation Task"),
1008     _sh(sh),
1009     _regions(regions),
1010     _concurrent(concurrent)
1011   {}
1012 
1013   void work(uint worker_id) {
1014     if (_concurrent) {
1015       ShenandoahConcurrentWorkerSession worker_session(worker_id);
1016       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1017       ShenandoahEvacOOMScope oom_evac_scope;
1018       do_work();
1019     } else {
1020       ShenandoahParallelWorkerSession worker_session(worker_id);
1021       ShenandoahEvacOOMScope oom_evac_scope;
1022       do_work();
1023     }
1024   }
1025 
1026 private:
1027   void do_work() {
1028     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1029     ShenandoahHeapRegion *r = _regions->next();
1030     ShenandoahMarkingContext *const ctx = _sh->complete_marking_context();
1031     while (r != NULL) {
1032       HeapWord* update_watermark = r->get_update_watermark();
1033       assert (update_watermark >= r->bottom(), "sanity");
1034 
1035       if (r->is_humongous()) {
1036         if (r->is_humongous_start()) {
1037           HeapWord* bottom = r->bottom();
1038           oop obj = oop(bottom);
1039           if (update_watermark > bottom && ctx->is_marked(obj)) {
1040             cl.do_object(obj);
1041           }
1042         }
1043       } else if (r->is_active() && !r->is_cset()) {
1044         _sh->marked_object_iterate(r, &cl, update_watermark);
1045       }
1046       r->set_update_watermark(r->bottom());
1047       if (ShenandoahPacing) {
1048         _sh->pacer()->report_evac_update(pointer_delta(update_watermark, r->bottom()));
1049       }

1050       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1051         break;
1052       }
1053       r = _regions->next();
1054     }
1055   }
1056 };
1057 
1058 void ShenandoahHeap::trash_cset_regions() {
1059   ShenandoahHeapLocker locker(lock());
1060 
1061   ShenandoahCollectionSet* set = collection_set();
1062   ShenandoahHeapRegion* r;
1063   set->clear_current_index();
1064   while ((r = set->next()) != NULL) {
1065     r->make_trash();
1066   }
1067   collection_set()->clear();
1068 }
1069 
1070 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1071   st->print_cr("Heap Regions:");
1072   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1073   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");


1621       prepare_concurrent_unloading();
1622     }
1623 
1624     // If collection set has candidates, start evacuation.
1625     // Otherwise, bypass the rest of the cycle.
1626     if (!collection_set()->is_empty()) {
1627       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1628 
1629       if (ShenandoahVerify) {
1630         verifier()->verify_before_evacuation();
1631       }
1632 
1633       set_evacuation_in_progress(true);
1634       // From here on, we need to update references.
1635       set_has_forwarded_objects(true);
1636 
1637       if (!is_degenerated_gc_in_progress()) {
1638         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1639           ShenandoahCodeRoots::arm_nmethods();
1640         }

1641       }
1642 
1643       // Need to evac all roots, because we would not visit them in collection set
1644       // with evac-update otherwise.
1645       evacuate_and_update_roots();
1646 
1647       if (ShenandoahVerify) {
1648         ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
1649         if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1650           types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
1651           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
1652           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots);
1653         }
1654 
1655         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1656           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
1657         }
1658         verifier()->verify_roots_no_forwarded_except(types);
1659         verifier()->verify_during_evacuation();
1660       }
1661 
1662       if (ShenandoahPacing) {
1663         pacer()->setup_for_evac_update(); // TODO: Really?
1664       }
1665 
1666     } else {
1667       if (ShenandoahVerify) {
1668         verifier()->verify_after_concmark();
1669       }
1670 
1671       if (VerifyAfterGC) {
1672         Universe::verify();
1673       }
1674     }
1675 
1676   } else {
1677     // If this cycle was updating references, we need to keep the has_forwarded_objects
1678     // flag on, for subsequent phases to deal with it.
1679     concurrent_mark()->cancel();
1680     set_concurrent_mark_in_progress(false);
1681 
1682     if (process_references()) {
1683       // Abandon reference processing right away: pre-cleaning must have failed.
1684       ReferenceProcessor *rp = ref_processor();
1685       rp->disable_discovery();
1686       rp->abandon_partial_discovery();
1687       rp->verify_no_references_recorded();
1688     }
1689   }
1690 }
1691 
1692 void ShenandoahHeap::op_conc_evac_update() {
1693   ShenandoahEvacuationTask task(this, &_update_refs_iterator,true);
1694   workers()->run_task(&task);
1695 }
1696 
1697 void ShenandoahHeap::op_stw_evac_update() {
1698   ShenandoahEvacuationTask task(this,  &_update_refs_iterator, false);
1699   workers()->run_task(&task);
1700 }
1701 




1702 void ShenandoahHeap::op_cleanup_early() {
1703   free_set()->recycle_trash();
1704 }
1705 
1706 void ShenandoahHeap::op_cleanup_complete() {
1707   free_set()->recycle_trash();
1708 }
1709 
1710 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1711 private:
1712   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
1713   ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1714   ShenandoahConcurrentStringDedupRoots          _dedup_roots;
1715 
1716 public:
1717   ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
1718     AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots Task"),
1719     _vm_roots(phase),
1720     _cld_roots(phase) {}
1721 


1763 ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
1764   _heap(ShenandoahHeap::heap()),
1765   _mark_context(ShenandoahHeap::heap()->marking_context()),
1766   _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
1767   _thread(Thread::current()),
1768   _dead_counter(0) {
1769 }
1770 
1771 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
1772   const oop obj = RawAccess<>::oop_load(p);
1773   if (!CompressedOops::is_null(obj)) {
1774     if (!_mark_context->is_marked(obj)) {
1775       shenandoah_assert_correct(p, obj);
1776       oop old = Atomic::cmpxchg(p, obj, oop(NULL));
1777       if (obj == old) {
1778         _dead_counter ++;
1779       }
1780     } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
1781       oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1782       if (resolved == obj) {
1783         resolved = _heap->evacuate_object_recursively(obj, p, _thread);
1784       }
1785       Atomic::cmpxchg(p, obj, resolved);
1786       assert(_heap->cancelled_gc() ||
1787              _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
1788              "Sanity");
1789     }
1790   }
1791 }
1792 
1793 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
1794   ShouldNotReachHere();
1795 }
1796 
1797 size_t ShenandoahEvacUpdateCleanupOopStorageRootsClosure::dead_counter() const {
1798   return _dead_counter;
1799 }
1800 
1801 void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::reset_dead_counter() {
1802   _dead_counter = 0;
1803 }


2012         op_degenerated_fail();
2013         return;
2014       }
2015 
2016     case _degenerated_mark:
2017       op_final_mark();
2018       if (cancelled_gc()) {
2019         op_degenerated_fail();
2020         return;
2021       }
2022 
2023       if (!has_forwarded_objects() && ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
2024         // Disarm nmethods that armed for concurrent mark. On normal cycle, it would
2025         // be disarmed while conc-roots phase is running.
2026         // TODO: Call op_conc_roots() here instead
2027         ShenandoahCodeRoots::disarm_nmethods();
2028       }
2029 
2030       op_cleanup_early();
2031 
2032     case _degenerated_evac_update:
2033       // If heuristics thinks we should do the cycle, this flag would be set,
2034       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
2035       if (is_evacuation_in_progress()) {
2036 
2037         if (point == _degenerated_evac_update) {
2038           // TODO: Cannot handle it properly yet, Full GC.
2039           cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
2040           op_degenerated_fail();
2041           return;
2042         }
2043 
2044         // Degeneration under oom-evac protocol might have left some objects in
2045         // collection set un-evacuated. Restart evacuation from the beginning to
2046         // capture all objects. For all the objects that are already evacuated,
2047         // it would be a simple check, which is supposed to be fast. This is also
2048         // safe to do even without degeneration, as CSet iterator is at beginning
2049         // in preparation for evacuation anyway.
2050         //
2051         // Before doing that, we need to make sure we never had any cset-pinned
2052         // regions. This may happen if allocation failure happened when evacuating
2053         // the about-to-be-pinned object, oom-evac protocol left the object in
2054         // the collection set, and then the pin reached the cset region. If we continue
2055         // the cycle here, we would trash the cset and alive objects in it. To avoid
2056         // it, we fail degeneration right away and slide into Full GC to recover.
2057 
2058         {
2059           sync_pinned_region_status();
2060           collection_set()->clear_current_index();
2061 
2062           ShenandoahHeapRegion* r;
2063           while ((r = collection_set()->next()) != NULL) {
2064             if (r->is_pinned()) {
2065               cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
2066               op_degenerated_fail();
2067               return;
2068             }
2069           }
2070 
2071           collection_set()->clear_current_index();
2072         }
2073 
2074         op_init_evac_update();










2075         if (cancelled_gc()) {
2076           op_degenerated_fail();
2077           return;
2078         }
2079       }
2080 

2081       if (has_forwarded_objects()) {
2082         op_final_evac_update();
2083         if (cancelled_gc()) {
2084           op_degenerated_fail();
2085           return;
2086         }
2087       }
2088 
2089       op_cleanup_complete();
2090       break;
2091 
2092     default:
2093       ShouldNotReachHere();
2094   }
2095 
2096   if (ShenandoahVerify) {
2097     verifier()->verify_after_degenerated();
2098   }
2099 
2100   if (VerifyAfterGC) {
2101     Universe::verify();
2102   }


2484       // Use ParallelGCThreads inside safepoints
2485       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
2486     }
2487   } else {
2488     if (UseDynamicNumberOfGCThreads) {
2489       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2490     } else {
2491       // Use ConcGCThreads outside safepoints
2492       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2493     }
2494   }
2495 }
2496 #endif
2497 
2498 ShenandoahVerifier* ShenandoahHeap::verifier() {
2499   guarantee(ShenandoahVerify, "Should be enabled");
2500   assert (_verifier != NULL, "sanity");
2501   return _verifier;
2502 }
2503 
2504 void ShenandoahHeap::op_init_evac_update() {





















































2505   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2506 


2507   {
2508     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_evac_update_retire_gclabs);
2509     retire_and_reset_gclabs();
2510   }
2511 
2512   if (ShenandoahVerify) {
2513     if (!is_degenerated_gc_in_progress() && !cancelled_gc()) {
2514       verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2515     }
2516     verifier()->verify_before_evac_update();
2517   }
2518 
2519   set_update_refs_in_progress(true);
2520 
2521   _update_refs_iterator.reset();
2522 
2523   if (ShenandoahPacing) {
2524     pacer()->setup_for_evac_update();
2525   }
2526 }
2527 
2528 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2529 private:
2530   ShenandoahHeapLock* const _lock;
2531 
2532 public:
2533   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2534 
2535   void heap_region_do(ShenandoahHeapRegion* r) {
2536     // Drop unnecessary "pinned" state from regions that does not have CP marks
2537     // anymore, as this would allow trashing them.
2538 
2539     if (r->is_active()) {
2540       if (r->is_pinned()) {
2541         if (r->pin_count() == 0) {
2542           ShenandoahHeapLocker locker(_lock);
2543           r->make_unpinned();
2544         }
2545       } else {
2546         if (r->pin_count() > 0) {
2547           ShenandoahHeapLocker locker(_lock);
2548           r->make_pinned();
2549         }
2550       }
2551     }
2552   }
2553 
2554   bool is_thread_safe() { return true; }
2555 };
2556 
2557 void ShenandoahHeap::op_final_evac_update() {
2558   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2559 
2560   set_evacuation_in_progress(false);
2561 
2562   finish_concurrent_unloading();
2563 
2564   // Check if there is left-over work, and finish it
2565   if (_update_refs_iterator.has_next()) {
2566     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_update_finish_work);
2567 
2568     // Finish updating references where we left off.
2569     clear_cancelled_gc();
2570     op_stw_evac_update();
2571   }
2572 
2573   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2574   // everything. On degenerated paths, cancelled gc would not be set anyway.
2575   if (cancelled_gc()) {
2576     clear_cancelled_gc();
2577   }
2578   assert(!cancelled_gc(), "Should have been done right before");
2579 
2580   if (ShenandoahVerify && !is_degenerated_gc_in_progress() && !cancelled_gc()) {
2581     verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
2582   }
2583 
2584   if (is_degenerated_gc_in_progress()) {
2585     concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
2586   } else {
2587     concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_evac_update_roots);
2588   }
2589 
2590   // Has to be done before cset is clear
2591   if (ShenandoahVerify) {
2592     verifier()->verify_roots_in_to_space();
2593   }
2594 
2595   {
2596     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_update_update_region_states);
2597     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2598     parallel_heap_region_iterate(&cl);
2599 
2600     assert_pinned_region_status();
2601   }
2602 
2603   {
2604     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_update_trash_cset);
2605     trash_cset_regions();
2606   }
2607 
2608   set_has_forwarded_objects(false);
2609   set_update_refs_in_progress(false);
2610 
2611   if (ShenandoahVerify) {
2612     verifier()->verify_after_evac_update();
2613   }
2614 
2615   if (VerifyAfterGC) {
2616     Universe::verify();
2617   }
2618 
2619   {
2620     ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_update_rebuild_freeset);
2621     ShenandoahHeapLocker locker(lock());
2622     _free_set->rebuild();
2623   }
2624 }
2625 
2626 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2627   print_on(st);
2628   print_heap_regions_on(st);
2629 }
2630 
2631 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2632   size_t slice = r->index() / _bitmap_regions_per_slice;
2633 
2634   size_t regions_from = _bitmap_regions_per_slice * slice;
2635   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2636   for (size_t g = regions_from; g < regions_to; g++) {
2637     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2638     if (skip_self && g == r->index()) continue;
2639     if (get_region(g)->is_committed()) {
2640       return true;


2704 }
2705 
2706 void ShenandoahHeap::vmop_entry_init_mark() {
2707   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2708   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
2709 
2710   try_inject_alloc_failure();
2711   VM_ShenandoahInitMark op;
2712   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2713 }
2714 
2715 void ShenandoahHeap::vmop_entry_final_mark() {
2716   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2717   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
2718 
2719   try_inject_alloc_failure();
2720   VM_ShenandoahFinalMarkStartEvac op;
2721   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2722 }
2723 
2724 void ShenandoahHeap::vmop_entry_init_evac_update() {
2725   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2726   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_evac_update_gross);
2727 
2728   try_inject_alloc_failure();
2729   VM_ShenandoahInitEvacUpdate op;
2730   VMThread::execute(&op);
2731 }
2732 
2733 void ShenandoahHeap::vmop_entry_final_evac_update() {
2734   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2735   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_evac_update_gross);
2736 
2737   try_inject_alloc_failure();
2738   VM_ShenandoahFinalEvacUpdate op;
2739   VMThread::execute(&op);
2740 }
2741 
2742 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2743   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2744   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
2745 
2746   try_inject_alloc_failure();
2747   VM_ShenandoahFullGC op(cause);
2748   VMThread::execute(&op);
2749 }
2750 
2751 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2752   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2753   ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
2754 
2755   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2756   VMThread::execute(&degenerated_gc);
2757 }
2758 


2763 
2764   ShenandoahWorkerScope scope(workers(),
2765                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2766                               "init marking");
2767 
2768   op_init_mark();
2769 }
2770 
2771 void ShenandoahHeap::entry_final_mark() {
2772   const char* msg = final_mark_event_message();
2773   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
2774   EventMark em("%s", msg);
2775 
2776   ShenandoahWorkerScope scope(workers(),
2777                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2778                               "final marking");
2779 
2780   op_final_mark();
2781 }
2782 
2783 void ShenandoahHeap::entry_init_evac_update() {
2784   static const char* msg = "Pause Init Evac-Update";
2785   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_evac_update);
2786   EventMark em("%s", msg);
2787 
2788   // No workers used in this phase, no setup required
2789 
2790   op_init_evac_update();
2791 }
2792 
2793 void ShenandoahHeap::entry_final_evac_update() {
2794   static const char* msg = "Pause Final Evac-Update";
2795   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_evac_update);
2796   EventMark em("%s", msg);
2797 
2798   ShenandoahWorkerScope scope(workers(),
2799                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2800                               "final evac-update");
2801 
2802   op_final_evac_update();
2803 }
2804 
2805 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2806   static const char* msg = "Pause Full";
2807   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
2808   EventMark em("%s", msg);
2809 
2810   ShenandoahWorkerScope scope(workers(),
2811                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2812                               "full gc");
2813 
2814   op_full(cause);
2815 }
2816 
2817 void ShenandoahHeap::entry_degenerated(int point) {
2818   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2819   const char* msg = degen_event_message(dpoint);
2820   ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
2821   EventMark em("%s", msg);
2822 


2827   set_degenerated_gc_in_progress(true);
2828   op_degenerated(dpoint);
2829   set_degenerated_gc_in_progress(false);
2830 }
2831 
2832 void ShenandoahHeap::entry_mark() {
2833   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2834 
2835   const char* msg = conc_mark_event_message();
2836   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
2837   EventMark em("%s", msg);
2838 
2839   ShenandoahWorkerScope scope(workers(),
2840                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2841                               "concurrent marking");
2842 
2843   try_inject_alloc_failure();
2844   op_mark();
2845 }
2846 
2847 void ShenandoahHeap::entry_evac_update() {
2848   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2849 
2850   static const char* msg = "Concurrent evac-update";
2851   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac_update);













2852   EventMark em("%s", msg);
2853 
2854   ShenandoahWorkerScope scope(workers(),
2855                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac_update(),
2856                               "concurrent evacuation/update");
2857 
2858   try_inject_alloc_failure();
2859   op_conc_evac_update();
2860 }
2861 
2862 void ShenandoahHeap::entry_weak_roots() {
2863   static const char* msg = "Concurrent weak roots";
2864   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
2865   EventMark em("%s", msg);
2866 
2867   ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots);
2868 
2869   ShenandoahWorkerScope scope(workers(),
2870                               ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2871                               "concurrent weak root");
2872 
2873   try_inject_alloc_failure();
2874   op_weak_roots();
2875 }
2876 
2877 void ShenandoahHeap::entry_class_unloading() {
2878   static const char* msg = "Concurrent class unloading";
2879   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);


3076 
3077   if (proc_refs && unload_cls) {
3078     return "Concurrent marking (process weakrefs) (unload classes)";
3079   } else if (proc_refs) {
3080     return "Concurrent marking (process weakrefs)";
3081   } else if (unload_cls) {
3082     return "Concurrent marking (unload classes)";
3083   } else {
3084     return "Concurrent marking";
3085   }
3086 }
3087 
3088 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
3089   switch (point) {
3090     case _degenerated_unset:
3091       return "Pause Degenerated GC (<UNSET>)";
3092     case _degenerated_outside_cycle:
3093       return "Pause Degenerated GC (Outside of Cycle)";
3094     case _degenerated_mark:
3095       return "Pause Degenerated GC (Mark)";
3096     case _degenerated_evac_update:
3097       return "Pause Degenerated GC (Evac Update)";


3098     default:
3099       ShouldNotReachHere();
3100       return "ERROR";
3101   }
3102 }
3103 
3104 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
3105 #ifdef ASSERT
3106   assert(_liveness_cache != NULL, "sanity");
3107   assert(worker_id < _max_workers, "sanity");
3108   for (uint i = 0; i < num_regions(); i++) {
3109     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
3110   }
3111 #endif
3112   return _liveness_cache[worker_id];
3113 }
3114 
3115 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
3116   assert(worker_id < _max_workers, "sanity");
3117   assert(_liveness_cache != NULL, "sanity");
< prev index next >