21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "memory/allocation.hpp"
26 #include "memory/universe.hpp"
27
28 #include "gc/shared/gcArguments.hpp"
29 #include "gc/shared/gcTimer.hpp"
30 #include "gc/shared/gcTraceTime.inline.hpp"
31 #include "gc/shared/memAllocator.hpp"
32 #include "gc/shared/parallelCleaning.hpp"
33 #include "gc/shared/plab.hpp"
34
35 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
36 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
37 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
39 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
40 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
41 #include "gc/shenandoah/shenandoahControlThread.hpp"
42 #include "gc/shenandoah/shenandoahFreeSet.hpp"
43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
44 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
45 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
47 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
48 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
49 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
50 #include "gc/shenandoah/shenandoahMetrics.hpp"
51 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
52 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
53 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
54 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
55 #include "gc/shenandoah/shenandoahStringDedup.hpp"
56 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
57 #include "gc/shenandoah/shenandoahUtils.hpp"
58 #include "gc/shenandoah/shenandoahVerifier.hpp"
59 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
60 #include "gc/shenandoah/shenandoahVMOperations.hpp"
932 };
933
934 class ShenandoahEvacuationTask : public AbstractGangTask {
935 private:
936 ShenandoahHeap* const _sh;
937 ShenandoahCollectionSet* const _cs;
938 bool _concurrent;
939 public:
940 ShenandoahEvacuationTask(ShenandoahHeap* sh,
941 ShenandoahCollectionSet* cs,
942 bool concurrent) :
943 AbstractGangTask("Parallel Evacuation Task"),
944 _sh(sh),
945 _cs(cs),
946 _concurrent(concurrent)
947 {}
948
949 void work(uint worker_id) {
950 if (_concurrent) {
951 ShenandoahConcurrentWorkerSession worker_session(worker_id);
952 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
953 ShenandoahEvacOOMScope oom_evac_scope;
954 do_work();
955 } else {
956 ShenandoahParallelWorkerSession worker_session(worker_id);
957 ShenandoahEvacOOMScope oom_evac_scope;
958 do_work();
959 }
960 }
961
962 private:
963 void do_work() {
964 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
965 ShenandoahHeapRegion* r;
966 while ((r =_cs->claim_next()) != NULL) {
967 assert(r->has_live(), "all-garbage regions are reclaimed early");
968 _sh->marked_object_iterate(r, &cl);
969
970 if (ShenandoahPacing) {
971 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
972 }
1054
1055 public:
1056 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1057 AbstractGangTask("Shenandoah evacuate and update roots"),
1058 _rp(rp) {}
1059
1060 void work(uint worker_id) {
1061 ShenandoahParallelWorkerSession worker_session(worker_id);
1062 ShenandoahEvacOOMScope oom_evac_scope;
1063 ShenandoahEvacuateUpdateRootsClosure cl;
1064 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1065 _rp->roots_do(worker_id, &cl);
1066 }
1067 };
1068
1069 void ShenandoahHeap::evacuate_and_update_roots() {
1070 #if COMPILER2_OR_JVMCI
1071 DerivedPointerTable::clear();
1072 #endif
1073 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1074
1075 {
1076 ShenandoahRootEvacuator rp(workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1077 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1078 workers()->run_task(&roots_task);
1079 }
1080
1081 #if COMPILER2_OR_JVMCI
1082 DerivedPointerTable::update_pointers();
1083 #endif
1084 }
1085
1086 // Returns size in bytes
1087 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1088 if (ShenandoahElasticTLAB) {
1089 // With Elastic TLABs, return the max allowed size, and let the allocation path
1090 // figure out the safe size for current allocation.
1091 return ShenandoahHeapRegion::max_tlab_size_bytes();
1092 } else {
1093 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1094 }
1095 }
1096
1500 // If collection set has candidates, start evacuation.
1501 // Otherwise, bypass the rest of the cycle.
1502 if (!collection_set()->is_empty()) {
1503 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1504
1505 if (ShenandoahVerify) {
1506 verifier()->verify_before_evacuation();
1507 }
1508
1509 set_evacuation_in_progress(true);
1510 // From here on, we need to update references.
1511 set_has_forwarded_objects(true);
1512
1513 evacuate_and_update_roots();
1514
1515 if (ShenandoahPacing) {
1516 pacer()->setup_for_evac();
1517 }
1518
1519 if (ShenandoahVerify) {
1520 verifier()->verify_roots_no_forwarded();
1521 verifier()->verify_during_evacuation();
1522 }
1523 } else {
1524 if (ShenandoahVerify) {
1525 verifier()->verify_after_concmark();
1526 }
1527
1528 if (VerifyAfterGC) {
1529 Universe::verify();
1530 }
1531 }
1532
1533 } else {
1534 concurrent_mark()->cancel();
1535 stop_concurrent_marking();
1536
1537 if (process_references()) {
1538 // Abandon reference processing right away: pre-cleaning must have failed.
1539 ReferenceProcessor *rp = ref_processor();
1540 rp->disable_discovery();
1561 }
1562
1563 void ShenandoahHeap::op_conc_evac() {
1564 ShenandoahEvacuationTask task(this, _collection_set, true);
1565 workers()->run_task(&task);
1566 }
1567
1568 void ShenandoahHeap::op_stw_evac() {
1569 ShenandoahEvacuationTask task(this, _collection_set, false);
1570 workers()->run_task(&task);
1571 }
1572
1573 void ShenandoahHeap::op_updaterefs() {
1574 update_heap_references(true);
1575 }
1576
1577 void ShenandoahHeap::op_cleanup() {
1578 free_set()->recycle_trash();
1579 }
1580
1581 void ShenandoahHeap::op_reset() {
1582 reset_mark_bitmap();
1583 }
1584
1585 void ShenandoahHeap::op_preclean() {
1586 concurrent_mark()->preclean_weak_refs();
1587 }
1588
1589 void ShenandoahHeap::op_init_traversal() {
1590 traversal_gc()->init_traversal_collection();
1591 }
1592
1593 void ShenandoahHeap::op_traversal() {
1594 traversal_gc()->concurrent_traversal_collection();
1595 }
1596
1597 void ShenandoahHeap::op_final_traversal() {
1598 traversal_gc()->final_traversal_collection();
1599 }
1600
1834 false, // Reference discovery is not atomic
1835 NULL, // No closure, should be installed before use
1836 true); // Scale worker threads
1837
1838 shenandoah_assert_rp_isalive_not_installed();
1839 }
1840
1841 GCTracer* ShenandoahHeap::tracer() {
1842 return shenandoah_policy()->tracer();
1843 }
1844
1845 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1846 return _free_set->used();
1847 }
1848
1849 bool ShenandoahHeap::try_cancel_gc() {
1850 while (true) {
1851 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1852 if (prev == CANCELLABLE) return true;
1853 else if (prev == CANCELLED) return false;
1854 assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1855 assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1856 {
1857 // We need to provide a safepoint here, otherwise we might
1858 // spin forever if a SP is pending.
1859 ThreadBlockInVM sp(JavaThread::current());
1860 SpinPause();
1861 }
1862 }
1863 }
1864
1865 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1866 if (try_cancel_gc()) {
1867 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1868 log_info(gc)("%s", msg.buffer());
1869 Events::log(Thread::current(), "%s", msg.buffer());
1870 }
1871 }
1872
1873 uint ShenandoahHeap::max_workers() {
1874 return _max_workers;
2048
2049 template<class T>
2050 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2051 private:
2052 T cl;
2053 ShenandoahHeap* _heap;
2054 ShenandoahRegionIterator* _regions;
2055 bool _concurrent;
2056 public:
2057 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2058 AbstractGangTask("Concurrent Update References Task"),
2059 cl(T()),
2060 _heap(ShenandoahHeap::heap()),
2061 _regions(regions),
2062 _concurrent(concurrent) {
2063 }
2064
2065 void work(uint worker_id) {
2066 if (_concurrent) {
2067 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2068 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2069 do_work();
2070 } else {
2071 ShenandoahParallelWorkerSession worker_session(worker_id);
2072 do_work();
2073 }
2074 }
2075
2076 private:
2077 void do_work() {
2078 ShenandoahHeapRegion* r = _regions->next();
2079 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2080 while (r != NULL) {
2081 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2082 assert (top_at_start_ur >= r->bottom(), "sanity");
2083 if (r->is_active() && !r->is_cset()) {
2084 _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
2085 }
2086 if (ShenandoahPacing) {
2087 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2088 }
2248 return true;
2249 }
2250
2251 if (is_bitmap_slice_committed(r, true)) {
2252 // Some other region from the group is still committed, meaning the bitmap
2253 // slice is should stay committed, exit right away.
2254 return true;
2255 }
2256
2257 // Uncommit the bitmap slice:
2258 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2259 size_t off = _bitmap_bytes_per_slice * slice;
2260 size_t len = _bitmap_bytes_per_slice;
2261 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2262 return false;
2263 }
2264 return true;
2265 }
2266
2267 void ShenandoahHeap::safepoint_synchronize_begin() {
2268 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2269 SuspendibleThreadSet::synchronize();
2270 }
2271 }
2272
2273 void ShenandoahHeap::safepoint_synchronize_end() {
2274 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2275 SuspendibleThreadSet::desynchronize();
2276 }
2277 }
2278
2279 void ShenandoahHeap::vmop_entry_init_mark() {
2280 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2281 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2282 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2283
2284 try_inject_alloc_failure();
2285 VM_ShenandoahInitMark op;
2286 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2287 }
2288
2289 void ShenandoahHeap::vmop_entry_final_mark() {
2290 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2291 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2292 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2293
2294 try_inject_alloc_failure();
2295 VM_ShenandoahFinalMarkStartEvac op;
2296 VMThread::execute(&op); // jump to entry_final_mark under safepoint
2521 "concurrent evacuation");
2522
2523 try_inject_alloc_failure();
2524 op_conc_evac();
2525 }
2526
2527 void ShenandoahHeap::entry_updaterefs() {
2528 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2529
2530 static const char* msg = "Concurrent update references";
2531 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2532 EventMark em("%s", msg);
2533
2534 ShenandoahWorkerScope scope(workers(),
2535 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2536 "concurrent reference update");
2537
2538 try_inject_alloc_failure();
2539 op_updaterefs();
2540 }
2541 void ShenandoahHeap::entry_cleanup() {
2542 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2543
2544 static const char* msg = "Concurrent cleanup";
2545 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2546 EventMark em("%s", msg);
2547
2548 // This phase does not use workers, no need for setup
2549
2550 try_inject_alloc_failure();
2551 op_cleanup();
2552 }
2553
2554 void ShenandoahHeap::entry_reset() {
2555 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2556
2557 static const char* msg = "Concurrent reset";
2558 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2559 EventMark em("%s", msg);
2560
|
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "memory/allocation.hpp"
26 #include "memory/universe.hpp"
27
28 #include "gc/shared/gcArguments.hpp"
29 #include "gc/shared/gcTimer.hpp"
30 #include "gc/shared/gcTraceTime.inline.hpp"
31 #include "gc/shared/memAllocator.hpp"
32 #include "gc/shared/parallelCleaning.hpp"
33 #include "gc/shared/plab.hpp"
34
35 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
36 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
37 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
38 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
39 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
40 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
41 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
42 #include "gc/shenandoah/shenandoahControlThread.hpp"
43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
48 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
51 #include "gc/shenandoah/shenandoahMetrics.hpp"
52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
55 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
56 #include "gc/shenandoah/shenandoahStringDedup.hpp"
57 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
58 #include "gc/shenandoah/shenandoahUtils.hpp"
59 #include "gc/shenandoah/shenandoahVerifier.hpp"
60 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
61 #include "gc/shenandoah/shenandoahVMOperations.hpp"
933 };
934
935 class ShenandoahEvacuationTask : public AbstractGangTask {
936 private:
937 ShenandoahHeap* const _sh;
938 ShenandoahCollectionSet* const _cs;
939 bool _concurrent;
940 public:
941 ShenandoahEvacuationTask(ShenandoahHeap* sh,
942 ShenandoahCollectionSet* cs,
943 bool concurrent) :
944 AbstractGangTask("Parallel Evacuation Task"),
945 _sh(sh),
946 _cs(cs),
947 _concurrent(concurrent)
948 {}
949
950 void work(uint worker_id) {
951 if (_concurrent) {
952 ShenandoahConcurrentWorkerSession worker_session(worker_id);
953 ShenandoahSuspendibleThreadSetJoiner stsj;
954 ShenandoahEvacOOMScope oom_evac_scope;
955 do_work();
956 } else {
957 ShenandoahParallelWorkerSession worker_session(worker_id);
958 ShenandoahEvacOOMScope oom_evac_scope;
959 do_work();
960 }
961 }
962
963 private:
964 void do_work() {
965 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
966 ShenandoahHeapRegion* r;
967 while ((r =_cs->claim_next()) != NULL) {
968 assert(r->has_live(), "all-garbage regions are reclaimed early");
969 _sh->marked_object_iterate(r, &cl);
970
971 if (ShenandoahPacing) {
972 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
973 }
1055
1056 public:
1057 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1058 AbstractGangTask("Shenandoah evacuate and update roots"),
1059 _rp(rp) {}
1060
1061 void work(uint worker_id) {
1062 ShenandoahParallelWorkerSession worker_session(worker_id);
1063 ShenandoahEvacOOMScope oom_evac_scope;
1064 ShenandoahEvacuateUpdateRootsClosure cl;
1065 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1066 _rp->roots_do(worker_id, &cl);
1067 }
1068 };
1069
1070 void ShenandoahHeap::evacuate_and_update_roots() {
1071 #if COMPILER2_OR_JVMCI
1072 DerivedPointerTable::clear();
1073 #endif
1074 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1075 {
1076 // Include concurrent roots if current cycle can not process those roots concurrently
1077 bool include_concurrent_roots = !ShenandoahConcurrentRoots::should_do_concurrent_roots();
1078
1079 ShenandoahRootEvacuator rp(workers()->active_workers(), ShenandoahPhaseTimings::init_evac, include_concurrent_roots);
1080 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1081 workers()->run_task(&roots_task);
1082 }
1083
1084 #if COMPILER2_OR_JVMCI
1085 DerivedPointerTable::update_pointers();
1086 #endif
1087 }
1088
1089 // Returns size in bytes
1090 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1091 if (ShenandoahElasticTLAB) {
1092 // With Elastic TLABs, return the max allowed size, and let the allocation path
1093 // figure out the safe size for current allocation.
1094 return ShenandoahHeapRegion::max_tlab_size_bytes();
1095 } else {
1096 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1097 }
1098 }
1099
1503 // If collection set has candidates, start evacuation.
1504 // Otherwise, bypass the rest of the cycle.
1505 if (!collection_set()->is_empty()) {
1506 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1507
1508 if (ShenandoahVerify) {
1509 verifier()->verify_before_evacuation();
1510 }
1511
1512 set_evacuation_in_progress(true);
1513 // From here on, we need to update references.
1514 set_has_forwarded_objects(true);
1515
1516 evacuate_and_update_roots();
1517
1518 if (ShenandoahPacing) {
1519 pacer()->setup_for_evac();
1520 }
1521
1522 if (ShenandoahVerify) {
1523 if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1524 ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::CLDGRoots);
1525 verifier()->verify_roots_no_forwarded_except(types);
1526 } else {
1527 verifier()->verify_roots_no_forwarded();
1528 }
1529 verifier()->verify_during_evacuation();
1530 }
1531 } else {
1532 if (ShenandoahVerify) {
1533 verifier()->verify_after_concmark();
1534 }
1535
1536 if (VerifyAfterGC) {
1537 Universe::verify();
1538 }
1539 }
1540
1541 } else {
1542 concurrent_mark()->cancel();
1543 stop_concurrent_marking();
1544
1545 if (process_references()) {
1546 // Abandon reference processing right away: pre-cleaning must have failed.
1547 ReferenceProcessor *rp = ref_processor();
1548 rp->disable_discovery();
1569 }
1570
1571 void ShenandoahHeap::op_conc_evac() {
1572 ShenandoahEvacuationTask task(this, _collection_set, true);
1573 workers()->run_task(&task);
1574 }
1575
1576 void ShenandoahHeap::op_stw_evac() {
1577 ShenandoahEvacuationTask task(this, _collection_set, false);
1578 workers()->run_task(&task);
1579 }
1580
1581 void ShenandoahHeap::op_updaterefs() {
1582 update_heap_references(true);
1583 }
1584
1585 void ShenandoahHeap::op_cleanup() {
1586 free_set()->recycle_trash();
1587 }
1588
1589 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1590 private:
1591 SuspendibleThreadSetJoiner _sts_joiner;
1592 ShenandoahJNIHandleRoots<true /*concurrent*/> _jni_roots;
1593 ShenandoahClassLoaderDataRoots<true /* concurrent */> _cld_roots;
1594 public:
1595 ShenandoahConcurrentRootsEvacUpdateTask() :
1596 AbstractGangTask("Shenandoah Evacuate/Update Concurrent Roots Task") {
1597 }
1598
1599 void work(uint worker_id) {
1600 ShenandoahEvacOOMScope oom;
1601 ShenandoahEvacuateUpdateRootsClosure cl;
1602 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1603
1604 _jni_roots.oops_do<ShenandoahEvacuateUpdateRootsClosure>(&cl);
1605 _cld_roots.cld_do(&clds);
1606 }
1607 };
1608
1609 void ShenandoahHeap::op_concurrent_roots() {
1610 if (is_evacuation_in_progress() &&
1611 ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1612 ShenandoahConcurrentRootsEvacUpdateTask task;
1613 workers()->run_task(&task);
1614 }
1615 }
1616
1617 void ShenandoahHeap::op_reset() {
1618 reset_mark_bitmap();
1619 }
1620
1621 void ShenandoahHeap::op_preclean() {
1622 concurrent_mark()->preclean_weak_refs();
1623 }
1624
1625 void ShenandoahHeap::op_init_traversal() {
1626 traversal_gc()->init_traversal_collection();
1627 }
1628
1629 void ShenandoahHeap::op_traversal() {
1630 traversal_gc()->concurrent_traversal_collection();
1631 }
1632
1633 void ShenandoahHeap::op_final_traversal() {
1634 traversal_gc()->final_traversal_collection();
1635 }
1636
1870 false, // Reference discovery is not atomic
1871 NULL, // No closure, should be installed before use
1872 true); // Scale worker threads
1873
1874 shenandoah_assert_rp_isalive_not_installed();
1875 }
1876
1877 GCTracer* ShenandoahHeap::tracer() {
1878 return shenandoah_policy()->tracer();
1879 }
1880
1881 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1882 return _free_set->used();
1883 }
1884
1885 bool ShenandoahHeap::try_cancel_gc() {
1886 while (true) {
1887 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1888 if (prev == CANCELLABLE) return true;
1889 else if (prev == CANCELLED) return false;
1890 assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1891 {
1892 // We need to provide a safepoint here, otherwise we might
1893 // spin forever if a SP is pending.
1894 ThreadBlockInVM sp(JavaThread::current());
1895 SpinPause();
1896 }
1897 }
1898 }
1899
1900 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1901 if (try_cancel_gc()) {
1902 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1903 log_info(gc)("%s", msg.buffer());
1904 Events::log(Thread::current(), "%s", msg.buffer());
1905 }
1906 }
1907
1908 uint ShenandoahHeap::max_workers() {
1909 return _max_workers;
2083
2084 template<class T>
2085 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2086 private:
2087 T cl;
2088 ShenandoahHeap* _heap;
2089 ShenandoahRegionIterator* _regions;
2090 bool _concurrent;
2091 public:
2092 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2093 AbstractGangTask("Concurrent Update References Task"),
2094 cl(T()),
2095 _heap(ShenandoahHeap::heap()),
2096 _regions(regions),
2097 _concurrent(concurrent) {
2098 }
2099
2100 void work(uint worker_id) {
2101 if (_concurrent) {
2102 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2103 ShenandoahSuspendibleThreadSetJoiner stsj;
2104 do_work();
2105 } else {
2106 ShenandoahParallelWorkerSession worker_session(worker_id);
2107 do_work();
2108 }
2109 }
2110
2111 private:
2112 void do_work() {
2113 ShenandoahHeapRegion* r = _regions->next();
2114 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2115 while (r != NULL) {
2116 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2117 assert (top_at_start_ur >= r->bottom(), "sanity");
2118 if (r->is_active() && !r->is_cset()) {
2119 _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
2120 }
2121 if (ShenandoahPacing) {
2122 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2123 }
2283 return true;
2284 }
2285
2286 if (is_bitmap_slice_committed(r, true)) {
2287 // Some other region from the group is still committed, meaning the bitmap
2288 // slice is should stay committed, exit right away.
2289 return true;
2290 }
2291
2292 // Uncommit the bitmap slice:
2293 size_t slice = r->region_number() / _bitmap_regions_per_slice;
2294 size_t off = _bitmap_bytes_per_slice * slice;
2295 size_t len = _bitmap_bytes_per_slice;
2296 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2297 return false;
2298 }
2299 return true;
2300 }
2301
2302 void ShenandoahHeap::safepoint_synchronize_begin() {
2303 SuspendibleThreadSet::synchronize();
2304 }
2305
2306 void ShenandoahHeap::safepoint_synchronize_end() {
2307 SuspendibleThreadSet::desynchronize();
2308 }
2309
2310 void ShenandoahHeap::vmop_entry_init_mark() {
2311 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2312 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2313 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2314
2315 try_inject_alloc_failure();
2316 VM_ShenandoahInitMark op;
2317 VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2318 }
2319
2320 void ShenandoahHeap::vmop_entry_final_mark() {
2321 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2322 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2323 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2324
2325 try_inject_alloc_failure();
2326 VM_ShenandoahFinalMarkStartEvac op;
2327 VMThread::execute(&op); // jump to entry_final_mark under safepoint
2552 "concurrent evacuation");
2553
2554 try_inject_alloc_failure();
2555 op_conc_evac();
2556 }
2557
2558 void ShenandoahHeap::entry_updaterefs() {
2559 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2560
2561 static const char* msg = "Concurrent update references";
2562 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2563 EventMark em("%s", msg);
2564
2565 ShenandoahWorkerScope scope(workers(),
2566 ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2567 "concurrent reference update");
2568
2569 try_inject_alloc_failure();
2570 op_updaterefs();
2571 }
2572
2573 void ShenandoahHeap::entry_concurrent_roots() {
2574 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_roots);
2575
2576 static const char* msg = "Concurrent roots processing";
2577 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2578 EventMark em("%s", msg);
2579
2580 ShenandoahWorkerScope scope(workers(),
2581 ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
2582 "concurrent root processing");
2583
2584 try_inject_alloc_failure();
2585 op_concurrent_roots();
2586 }
2587
2588 void ShenandoahHeap::entry_cleanup() {
2589 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2590
2591 static const char* msg = "Concurrent cleanup";
2592 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2593 EventMark em("%s", msg);
2594
2595 // This phase does not use workers, no need for setup
2596
2597 try_inject_alloc_failure();
2598 op_cleanup();
2599 }
2600
2601 void ShenandoahHeap::entry_reset() {
2602 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2603
2604 static const char* msg = "Concurrent reset";
2605 GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2606 EventMark em("%s", msg);
2607
|