1047 }
1048 workers()->threads_do(&cl);
1049 }
1050
1051 void ShenandoahHeap::resize_tlabs() {
1052 CollectedHeap::resize_all_tlabs();
1053 }
1054
1055 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1056 private:
1057 ShenandoahRootEvacuator* _rp;
1058
1059 public:
1060 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1061 AbstractGangTask("Shenandoah evacuate and update roots"),
1062 _rp(rp) {}
1063
1064 void work(uint worker_id) {
1065 ShenandoahParallelWorkerSession worker_session(worker_id);
1066 ShenandoahEvacOOMScope oom_evac_scope;
1067 ShenandoahEvacuateUpdateRootsClosure cl;
1068 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1069 _rp->roots_do(worker_id, &cl);
1070 }
1071 };
1072
1073 void ShenandoahHeap::evacuate_and_update_roots() {
1074 #if COMPILER2_OR_JVMCI
1075 DerivedPointerTable::clear();
1076 #endif
1077 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1078 {
1079 // Include concurrent roots if current cycle can not process those roots concurrently
1080 ShenandoahRootEvacuator rp(workers()->active_workers(),
1081 ShenandoahPhaseTimings::init_evac,
1082 !ShenandoahConcurrentRoots::should_do_concurrent_roots(),
1083 !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
1084 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1085 workers()->run_task(&roots_task);
1086 }
1087
1289 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1290 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1291 * is allowed to report dead objects, but is not required to do so.
1292 */
1293 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1294 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1295 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1296 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1297 return;
1298 }
1299
1300 // Reset bitmap
1301 _aux_bit_map.clear();
1302
1303 Stack<oop,mtGC> oop_stack;
1304
1305 // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects.
1306 ShenandoahHeapIterationRootScanner rp;
1307 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1308
1309 // If we are unloading classes right now, we should not touch weak roots,
1310 // on the off-chance we would evacuate them and make them live accidentally.
1311 // In other cases, we have to scan all roots.
1312 if (is_evacuation_in_progress() && unload_classes()) {
1313 rp.strong_roots_do(&oops);
1314 } else {
1315 rp.roots_do(&oops);
1316 }
1317
1318 // Work through the oop stack to traverse heap.
1319 while (! oop_stack.is_empty()) {
1320 oop obj = oop_stack.pop();
1321 assert(oopDesc::is_oop(obj), "must be a valid oop");
1322 cl->do_object(obj);
1323 obj->oop_iterate(&oops);
1324 }
1325
1326 assert(oop_stack.is_empty(), "should be empty");
1327
1328 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1329 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1330 }
1331 }
1332
1560 set_evacuation_in_progress(true);
1561 // From here on, we need to update references.
1562 set_has_forwarded_objects(true);
1563
1564 if (!is_degenerated_gc_in_progress()) {
1565 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1566 ShenandoahCodeRoots::arm_nmethods();
1567 }
1568 evacuate_and_update_roots();
1569 }
1570
1571 if (ShenandoahPacing) {
1572 pacer()->setup_for_evac();
1573 }
1574
1575 if (ShenandoahVerify) {
1576 ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
1577 if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1578 types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
1579 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
1580 }
1581
1582 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1583 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
1584 }
1585 verifier()->verify_roots_no_forwarded_except(types);
1586 verifier()->verify_during_evacuation();
1587 }
1588 } else {
1589 if (ShenandoahVerify) {
1590 verifier()->verify_after_concmark();
1591 }
1592
1593 if (VerifyAfterGC) {
1594 Universe::verify();
1595 }
1596 }
1597
1598 } else {
1599 // If this cycle was updating references, we need to keep the has_forwarded_objects
1636 }
1637
1638 void ShenandoahHeap::op_stw_evac() {
1639 ShenandoahEvacuationTask task(this, _collection_set, false);
1640 workers()->run_task(&task);
1641 }
1642
1643 void ShenandoahHeap::op_updaterefs() {
1644 update_heap_references(true);
1645 }
1646
1647 void ShenandoahHeap::op_cleanup() {
1648 free_set()->recycle_trash();
1649 }
1650
1651 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1652 private:
1653 ShenandoahVMRoots<true /*concurrent*/> _vm_roots;
1654 ShenandoahWeakRoots<true /*concurrent*/> _weak_roots;
1655 ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1656 bool _include_weak_roots;
1657
1658 public:
1659 ShenandoahConcurrentRootsEvacUpdateTask(bool include_weak_roots) :
1660 AbstractGangTask("Shenandoah Evacuate/Update Concurrent Roots Task"),
1661 _include_weak_roots(include_weak_roots) {
1662 }
1663
1664 void work(uint worker_id) {
1665 ShenandoahEvacOOMScope oom;
1666 {
1667 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1668 // may race against OopStorage::release() calls.
1669 ShenandoahEvacUpdateOopStorageRootsClosure cl;
1670 _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1671
1672 if (_include_weak_roots) {
1673 _weak_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1674 }
1675 }
1676
1677 {
1678 ShenandoahEvacuateUpdateRootsClosure cl;
1679 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1680 _cld_roots.cld_do(&clds);
1681 }
1682 }
1683 };
1684
1685 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
1686 private:
1687 ShenandoahHeap* const _heap;
1688 ShenandoahMarkingContext* const _mark_context;
1689 bool _evac_in_progress;
1690 Thread* const _thread;
1691 size_t _dead_counter;
1692
1693 public:
1694 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
1695 void do_oop(oop* p);
1696 void do_oop(narrowOop* p);
1697
1698 size_t dead_counter() const;
1699 void reset_dead_counter();
1700 };
|
1047 }
1048 workers()->threads_do(&cl);
1049 }
1050
1051 void ShenandoahHeap::resize_tlabs() {
1052 CollectedHeap::resize_all_tlabs();
1053 }
1054
1055 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1056 private:
1057 ShenandoahRootEvacuator* _rp;
1058
1059 public:
1060 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1061 AbstractGangTask("Shenandoah evacuate and update roots"),
1062 _rp(rp) {}
1063
1064 void work(uint worker_id) {
1065 ShenandoahParallelWorkerSession worker_session(worker_id);
1066 ShenandoahEvacOOMScope oom_evac_scope;
1067 ShenandoahEvacuateUpdateRootsClosure<> cl;
1068 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1069 _rp->roots_do(worker_id, &cl);
1070 }
1071 };
1072
1073 void ShenandoahHeap::evacuate_and_update_roots() {
1074 #if COMPILER2_OR_JVMCI
1075 DerivedPointerTable::clear();
1076 #endif
1077 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1078 {
1079 // Include concurrent roots if current cycle can not process those roots concurrently
1080 ShenandoahRootEvacuator rp(workers()->active_workers(),
1081 ShenandoahPhaseTimings::init_evac,
1082 !ShenandoahConcurrentRoots::should_do_concurrent_roots(),
1083 !ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
1084 ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1085 workers()->run_task(&roots_task);
1086 }
1087
1289 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1290 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1291 * is allowed to report dead objects, but is not required to do so.
1292 */
1293 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1294 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1295 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1296 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1297 return;
1298 }
1299
1300 // Reset bitmap
1301 _aux_bit_map.clear();
1302
1303 Stack<oop,mtGC> oop_stack;
1304
1305 // First, we process GC roots according to current GC cycle. This populates the work stack with initial objects.
1306 ShenandoahHeapIterationRootScanner rp;
1307 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1308
1309 // When concurrent root is in progress, weak roots may contain dead oops, they should not be used
1310 // for root scanning.
1311 if (is_concurrent_root_in_progress()) {
1312 rp.strong_roots_do(&oops);
1313 } else {
1314 rp.roots_do(&oops);
1315 }
1316
1317 // Work through the oop stack to traverse heap.
1318 while (! oop_stack.is_empty()) {
1319 oop obj = oop_stack.pop();
1320 assert(oopDesc::is_oop(obj), "must be a valid oop");
1321 cl->do_object(obj);
1322 obj->oop_iterate(&oops);
1323 }
1324
1325 assert(oop_stack.is_empty(), "should be empty");
1326
1327 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1328 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1329 }
1330 }
1331
1559 set_evacuation_in_progress(true);
1560 // From here on, we need to update references.
1561 set_has_forwarded_objects(true);
1562
1563 if (!is_degenerated_gc_in_progress()) {
1564 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1565 ShenandoahCodeRoots::arm_nmethods();
1566 }
1567 evacuate_and_update_roots();
1568 }
1569
1570 if (ShenandoahPacing) {
1571 pacer()->setup_for_evac();
1572 }
1573
1574 if (ShenandoahVerify) {
1575 ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
1576 if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
1577 types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
1578 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
1579 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots);
1580 }
1581
1582 if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
1583 types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
1584 }
1585 verifier()->verify_roots_no_forwarded_except(types);
1586 verifier()->verify_during_evacuation();
1587 }
1588 } else {
1589 if (ShenandoahVerify) {
1590 verifier()->verify_after_concmark();
1591 }
1592
1593 if (VerifyAfterGC) {
1594 Universe::verify();
1595 }
1596 }
1597
1598 } else {
1599 // If this cycle was updating references, we need to keep the has_forwarded_objects
1636 }
1637
1638 void ShenandoahHeap::op_stw_evac() {
1639 ShenandoahEvacuationTask task(this, _collection_set, false);
1640 workers()->run_task(&task);
1641 }
1642
1643 void ShenandoahHeap::op_updaterefs() {
1644 update_heap_references(true);
1645 }
1646
1647 void ShenandoahHeap::op_cleanup() {
1648 free_set()->recycle_trash();
1649 }
1650
1651 class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
1652 private:
1653 ShenandoahVMRoots<true /*concurrent*/> _vm_roots;
1654 ShenandoahWeakRoots<true /*concurrent*/> _weak_roots;
1655 ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
1656 ShenandoahConcurrentStringDedupRoots _dedup_roots;
1657 bool _include_weak_roots;
1658
1659 public:
1660 ShenandoahConcurrentRootsEvacUpdateTask(bool include_weak_roots) :
1661 AbstractGangTask("Shenandoah Evacuate/Update Concurrent Roots Task"),
1662 _include_weak_roots(include_weak_roots) {
1663 }
1664
1665 void work(uint worker_id) {
1666 ShenandoahEvacOOMScope oom;
1667 {
1668 // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
1669 // may race against OopStorage::release() calls.
1670 ShenandoahEvacUpdateOopStorageRootsClosure cl;
1671 _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1672
1673 if (_include_weak_roots) {
1674 _weak_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl);
1675 }
1676 }
1677
1678 {
1679 ShenandoahEvacuateUpdateRootsClosure<> cl;
1680 CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
1681 _cld_roots.cld_do(&clds);
1682 }
1683
1684 {
1685 ShenandoahForwardedIsAliveClosure is_alive;
1686 ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
1687 _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
1688 }
1689 }
1690 };
1691
1692 class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
1693 private:
1694 ShenandoahHeap* const _heap;
1695 ShenandoahMarkingContext* const _mark_context;
1696 bool _evac_in_progress;
1697 Thread* const _thread;
1698 size_t _dead_counter;
1699
1700 public:
1701 ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
1702 void do_oop(oop* p);
1703 void do_oop(narrowOop* p);
1704
1705 size_t dead_counter() const;
1706 void reset_dead_counter();
1707 };
|