28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/parallelCleaning.hpp"
30
31 #include "gc/shenandoah/brooksPointer.hpp"
32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
45 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
46 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
48 #include "gc/shenandoah/shenandoahPartialGC.hpp"
49 #include "gc/shenandoah/shenandoahPacer.hpp"
50 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
51 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
52 #include "gc/shenandoah/shenandoahStringDedup.hpp"
53 #include "gc/shenandoah/shenandoahUtils.hpp"
54 #include "gc/shenandoah/shenandoahVerifier.hpp"
55 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
56 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
57 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
58
59 #include "runtime/vmThread.hpp"
60 #include "services/mallocTracker.hpp"
61
62 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
63
64 #ifdef ASSERT
65 template <class T>
66 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
67 T o = RawAccess<>::oop_load(p);
68 if (! CompressedOops::is_null(o)) {
185 ShenandoahHeapLocker locker(lock());
186 for (size_t i = 0; i < _num_regions; i++) {
187 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
188 (HeapWord*) pgc_rs.base() + reg_size_words * i,
189 reg_size_words,
190 i,
191 i < num_committed_regions);
192
193 _complete_top_at_mark_starts_base[i] = r->bottom();
194 _next_top_at_mark_starts_base[i] = r->bottom();
195 _regions[i] = r;
196 assert(!collection_set()->is_in(i), "New region should not be in collection set");
197 }
198
199 _free_set->rebuild();
200 }
201
202 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
203 "misaligned heap: "PTR_FORMAT, p2i(base()));
204
205 LogTarget(Trace, gc, region) lt;
206 if (lt.is_enabled()) {
207 ResourceMark rm;
208 LogStream ls(lt);
209 log_trace(gc, region)("All Regions");
210 print_heap_regions_on(&ls);
211 log_trace(gc, region)("Free Regions");
212 _free_set->print_on(&ls);
213 }
214
215 // The call below uses stuff (the SATB* things) that are in G1, but probably
216 // belong into a shared location.
217 ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
218 SATB_Q_FL_lock,
219 20 /*G1SATBProcessCompletedThreshold */,
220 Shared_SATB_Q_lock);
221
222 // Reserve space for prev and next bitmap.
223 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
224 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
225 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
226 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
227
228 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
229
230 guarantee(bitmap_bytes_per_region != 0,
231 "Bitmap bytes per region should not be zero");
232 guarantee(is_power_of_2(bitmap_bytes_per_region),
233 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
234
290 }
291
292 _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
293 _complete_mark_bit_map = &_mark_bit_map0;
294
295 _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
296 _next_mark_bit_map = &_mark_bit_map1;
297
298 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
299 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
300 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
301 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
302 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
303
304 if (UseShenandoahMatrix) {
305 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
306 } else {
307 _connection_matrix = NULL;
308 }
309
310 _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
311 new ShenandoahPartialGC(this, _num_regions) :
312 NULL;
313
314 _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ?
315 new ShenandoahTraversalGC(this, _num_regions) :
316 NULL;
317
318 _monitoring_support = new ShenandoahMonitoringSupport(this);
319
320 _phase_timings = new ShenandoahPhaseTimings();
321
322 if (ShenandoahAllocationTrace) {
323 _alloc_tracker = new ShenandoahAllocTracker();
324 }
325
326 ShenandoahStringDedup::initialize();
327
328 _concurrent_gc_thread = new ShenandoahConcurrentThread();
329
330 ShenandoahCodeRoots::initialize();
331
332 log_info(gc, init)("Safepointing mechanism: %s",
333 SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
334 (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
335
336 return JNI_OK;
337 }
338
339 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
340 CollectedHeap(),
341 _shenandoah_policy(policy),
342 _soft_ref_policy(),
343 _regions(NULL),
344 _free_set(NULL),
345 _collection_set(NULL),
346 _update_refs_iterator(ShenandoahRegionIterator(this)),
347 _bytes_allocated_since_gc_start(0),
348 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
349 _ref_processor(NULL),
350 _next_top_at_mark_starts(NULL),
351 _next_top_at_mark_starts_base(NULL),
352 _complete_top_at_mark_starts(NULL),
353 _complete_top_at_mark_starts_base(NULL),
354 _mark_bit_map0(),
355 _mark_bit_map1(),
356 _aux_bit_map(),
357 _connection_matrix(NULL),
358 _verifier(NULL),
359 _pacer(NULL),
360 _used_at_last_gc(0),
361 _alloc_seq_at_last_gc_start(0),
362 _alloc_seq_at_last_gc_end(0),
363 _safepoint_workers(NULL),
364 #ifdef ASSERT
365 _heap_expansion_count(0),
366 #endif
367 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
368 _phase_timings(NULL),
369 _alloc_tracker(NULL),
370 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
371 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
372 _memory_pool(NULL)
373 {
374 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
375 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
376 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
377
378 _scm = new ShenandoahConcurrentMark();
379 _full_gc = new ShenandoahMarkCompact();
380 _used = 0;
381
382 _max_workers = MAX2(_max_workers, 1U);
383 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
413 if (heap->is_bitmap_slice_committed(region)) {
414 HeapWord* bottom = region->bottom();
415 HeapWord* top = heap->next_top_at_mark_start(region->bottom());
416 if (top > bottom) {
417 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
418 }
419 assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
420 }
421 region = _regions.next();
422 }
423 }
424 };
425
426 void ShenandoahHeap::reset_next_mark_bitmap() {
427 assert_gc_workers(_workers->active_workers());
428
429 ShenandoahResetNextBitmapTask task(region_iterator());
430 _workers->run_task(&task);
431 }
432
433 bool ShenandoahHeap::is_next_bitmap_clear() {
434 for (size_t idx = 0; idx < _num_regions; idx++) {
435 ShenandoahHeapRegion* r = get_region(idx);
436 if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
437 return false;
438 }
439 }
440 return true;
441 }
442
443 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
444 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
445 }
446
447 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
448 return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
449 }
450
451 void ShenandoahHeap::print_on(outputStream* st) const {
452 st->print_cr("Shenandoah Heap");
453 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
454 capacity() / K, committed() / K, used() / K);
455 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
456 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
457
458 st->print("Status: ");
459 if (has_forwarded_objects()) st->print("has forwarded objects, ");
460 if (is_concurrent_mark_in_progress()) st->print("marking, ");
461 if (is_evacuation_in_progress()) st->print("evacuating, ");
462 if (is_update_refs_in_progress()) st->print("updating refs, ");
463 if (is_concurrent_partial_in_progress()) st->print("partial, ");
464 if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
465 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
466 if (is_full_gc_in_progress()) st->print("full gc, ");
467 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
468
469 if (cancelled_concgc()) {
470 st->print("conc gc cancelled");
471 } else {
472 st->print("not cancelled");
473 }
474 st->cr();
475
476 st->print_cr("Reserved region:");
477 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
478 p2i(reserved_region().start()),
479 p2i(reserved_region().end()));
480
481 if (UseShenandoahMatrix) {
482 st->print_cr("Matrix:");
483
783 private:
784 ShenandoahHeap* _heap;
785 Thread* _thread;
786 public:
787 ShenandoahEvacuateUpdateRootsClosure() :
788 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
789 }
790
791 private:
792 template <class T>
793 void do_oop_work(T* p) {
794 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
795
796 T o = RawAccess<>::oop_load(p);
797 if (! CompressedOops::is_null(o)) {
798 oop obj = CompressedOops::decode_not_null(o);
799 if (_heap->in_collection_set(obj)) {
800 shenandoah_assert_marked_complete(p, obj);
801 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
802 if (oopDesc::unsafe_equals(resolved, obj)) {
803 bool evac;
804 resolved = _heap->evacuate_object(obj, _thread, evac);
805 }
806 RawAccess<OOP_NOT_NULL>::oop_store(p, resolved);
807 }
808 }
809 }
810
811 public:
812 void do_oop(oop* p) {
813 do_oop_work(p);
814 }
815 void do_oop(narrowOop* p) {
816 do_oop_work(p);
817 }
818 };
819
820 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
821 private:
822 ShenandoahHeap* _heap;
823 Thread* _thread;
824 public:
825 ShenandoahEvacuateRootsClosure() :
826 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
827 }
828
829 private:
830 template <class T>
831 void do_oop_work(T* p) {
832 T o = RawAccess<>::oop_load(p);
833 if (! CompressedOops::is_null(o)) {
834 oop obj = CompressedOops::decode_not_null(o);
835 if (_heap->in_collection_set(obj)) {
836 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
837 if (oopDesc::unsafe_equals(resolved, obj)) {
838 bool evac;
839 _heap->evacuate_object(obj, _thread, evac);
840 }
841 }
842 }
843 }
844
845 public:
846 void do_oop(oop* p) {
847 do_oop_work(p);
848 }
849 void do_oop(narrowOop* p) {
850 do_oop_work(p);
851 }
852 };
853
854 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
855 private:
856 ShenandoahHeap* const _heap;
857 Thread* const _thread;
858 public:
859 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
860 _heap(heap), _thread(Thread::current()) {}
861
862 void do_object(oop p) {
863 shenandoah_assert_marked_complete(NULL, p);
864 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
865 bool evac;
866 _heap->evacuate_object(p, _thread, evac);
867 }
868 }
869 };
870
871 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
872 private:
873 ShenandoahHeap* const _sh;
874 ShenandoahCollectionSet* const _cs;
875 ShenandoahSharedFlag _claimed_codecache;
876
877 public:
878 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
879 ShenandoahCollectionSet* cs) :
880 AbstractGangTask("Parallel Evacuation Task"),
881 _cs(cs),
882 _sh(sh)
883 {}
884
885 void work(uint worker_id) {
886
1596 }
1597
1598 void ShenandoahHeap::op_cleanup() {
1599 ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1600 free_set()->recycle_trash();
1601
1602 // Allocations happen during cleanup, record peak after the phase:
1603 shenandoahPolicy()->record_peak_occupancy();
1604 }
1605
1606 void ShenandoahHeap::op_cleanup_bitmaps() {
1607 op_cleanup();
1608
1609 ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1610 reset_next_mark_bitmap();
1611
1612 // Allocations happen during bitmap cleanup, record peak after the phase:
1613 shenandoahPolicy()->record_peak_occupancy();
1614 }
1615
1616 void ShenandoahHeap::op_preclean() {
1617 concurrentMark()->preclean_weak_refs();
1618
1619 // Allocations happen during concurrent preclean, record peak after the phase:
1620 shenandoahPolicy()->record_peak_occupancy();
1621 }
1622
1623 void ShenandoahHeap::op_init_partial() {
1624 partial_gc()->init_partial_collection();
1625 }
1626
1627 void ShenandoahHeap::op_partial() {
1628 partial_gc()->concurrent_partial_collection();
1629 }
1630
1631 void ShenandoahHeap::op_final_partial() {
1632 partial_gc()->final_partial_collection();
1633 }
1634
1635 void ShenandoahHeap::op_init_traversal() {
1636 traversal_gc()->init_traversal_collection();
1637 }
1638
1639 void ShenandoahHeap::op_traversal() {
1640 traversal_gc()->concurrent_traversal_collection();
1641 }
1642
1643 void ShenandoahHeap::op_final_traversal() {
1644 traversal_gc()->final_traversal_collection();
1645 }
1646
1647 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1648 full_gc()->do_it(cause);
1649 }
1650
1651 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1652 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1653 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1654 // some phase, we have to upgrade the Degenerate GC to Full GC.
1655
1656 clear_cancelled_concgc();
1657
1658 size_t used_before = used();
1659
1660 switch (point) {
1661 case _degenerated_partial:
1662 case _degenerated_evac:
1663 // Not possible to degenerate from here, upgrade to Full GC right away.
1664 cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1665 op_degenerated_fail();
1666 return;
1667
1668 // The cases below form the Duff's-like device: it describes the actual GC cycle,
1669 // but enters it at different points, depending on which concurrent phase had
1670 // degenerated.
1671
1672 case _degenerated_traversal:
1673 {
1674 ShenandoahHeapLocker locker(lock());
1675 collection_set()->clear_current_index();
1676 for (size_t i = 0; i < collection_set()->count(); i++) {
1677 ShenandoahHeapRegion* r = collection_set()->next();
1678 r->make_regular_bypass();
1679 }
1680 collection_set()->clear();
1681 }
1682 op_final_traversal();
1683 op_cleanup_bitmaps();
1684 return;
1685
1686 case _degenerated_outside_cycle:
1687 if (shenandoahPolicy()->can_do_traversal_gc()) {
1688 // Not possible to degenerate from here, upgrade to Full GC right away.
1689 cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1690 op_degenerated_fail();
1691 return;
1692 }
1693 op_init_mark();
1694 if (cancelled_concgc()) {
1695 op_degenerated_fail();
1696 return;
1697 }
1698
1699 case _degenerated_mark:
1700 op_final_mark();
1701 if (cancelled_concgc()) {
1702 op_degenerated_fail();
1703 return;
1803 }
1804 }
1805
1806 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1807 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1808 ShenandoahThreadLocalData::set_gc_state(t, state);
1809 }
1810 }
1811
1812 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1813 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1814 _gc_state.set_cond(mask, value);
1815 set_gc_state_all_threads(_gc_state.raw_value());
1816 }
1817
1818 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1819 set_gc_state_mask(MARKING, in_progress);
1820 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1821 }
1822
1823 void ShenandoahHeap::set_concurrent_partial_in_progress(bool in_progress) {
1824
1825 set_gc_state_mask(PARTIAL | HAS_FORWARDED, in_progress);
1826 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1827 }
1828
1829 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1830 set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1831 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1832 }
1833
1834 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1835 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1836 set_gc_state_mask(EVACUATION, in_progress);
1837 }
1838
1839 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1840 // Initialize Brooks pointer for the next object
1841 HeapWord* result = obj + BrooksPointer::word_size();
1842 BrooksPointer::initialize(oop(result));
1843 return result;
1844 }
1845
1846 uint ShenandoahHeap::oop_extra_words() {
1847 return BrooksPointer::word_size();
1848 }
2180 } else {
2181 // Use ParallelGCThreads inside safepoints
2182 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2183 }
2184 } else {
2185 if (UseDynamicNumberOfGCThreads ||
2186 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2187 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2188 } else {
2189 // Use ConcGCThreads outside safepoints
2190 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2191 }
2192 }
2193 }
2194 #endif
2195
2196 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2197 return _connection_matrix;
2198 }
2199
2200 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2201 return _partial_gc;
2202 }
2203
2204 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2205 return _traversal_gc;
2206 }
2207
2208 ShenandoahVerifier* ShenandoahHeap::verifier() {
2209 guarantee(ShenandoahVerify, "Should be enabled");
2210 assert (_verifier != NULL, "sanity");
2211 return _verifier;
2212 }
2213
2214 template<class T>
2215 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2216 private:
2217 T cl;
2218 ShenandoahHeap* _heap;
2219 ShenandoahRegionIterator _regions;
2220 bool _concurrent;
2221 public:
2222 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator regions, bool concurrent) :
2223 AbstractGangTask("Concurrent Update References Task"),
2496 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2497 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2498 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2499 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2500
2501 try_inject_alloc_failure();
2502 VM_ShenandoahInitUpdateRefs op;
2503 VMThread::execute(&op);
2504 }
2505
2506 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2507 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2508 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2509 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2510
2511 try_inject_alloc_failure();
2512 VM_ShenandoahFinalUpdateRefs op;
2513 VMThread::execute(&op);
2514 }
2515
2516 void ShenandoahHeap::vmop_entry_init_partial() {
2517 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2518 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2519 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_partial_gc_gross);
2520
2521 try_inject_alloc_failure();
2522 VM_ShenandoahInitPartialGC op;
2523 VMThread::execute(&op);
2524 }
2525
2526 void ShenandoahHeap::vmop_entry_final_partial() {
2527 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2528 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2529 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_partial_gc_gross);
2530
2531 try_inject_alloc_failure();
2532 VM_ShenandoahFinalPartialGC op;
2533 VMThread::execute(&op);
2534 }
2535
2536 void ShenandoahHeap::vmop_entry_init_traversal() {
2537 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2538 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2539 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2540
2541 try_inject_alloc_failure();
2542 VM_ShenandoahInitTraversalGC op;
2543 VMThread::execute(&op);
2544 }
2545
2546 void ShenandoahHeap::vmop_entry_final_traversal() {
2547 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2548 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2549 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2550
2551 try_inject_alloc_failure();
2552 VM_ShenandoahFinalTraversalGC op;
2553 VMThread::execute(&op);
2554 }
2555
2624 EventMark em("%s", msg);
2625
2626 // No workers used in this phase, no setup required
2627
2628 op_init_updaterefs();
2629 }
2630
2631 void ShenandoahHeap::entry_final_updaterefs() {
2632 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2633 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2634
2635 static const char* msg = "Pause Final Update Refs";
2636 GCTraceTime(Info, gc) time(msg, gc_timer());
2637 EventMark em("%s", msg);
2638
2639 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2640
2641 op_final_updaterefs();
2642 }
2643
2644 void ShenandoahHeap::entry_init_partial() {
2645 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2646 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_partial_gc);
2647
2648 static const char* msg = "Pause Init Partial";
2649 GCTraceTime(Info, gc) time(msg, gc_timer());
2650 EventMark em("%s", msg);
2651
2652 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_partial());
2653
2654 op_init_partial();
2655 }
2656
2657 void ShenandoahHeap::entry_final_partial() {
2658 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2659 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_partial_gc);
2660
2661 static const char* msg = "Pause Final Partial";
2662 GCTraceTime(Info, gc) time(msg, gc_timer());
2663 EventMark em("%s", msg);
2664
2665 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_partial());
2666
2667 op_final_partial();
2668 }
2669
2670 void ShenandoahHeap::entry_init_traversal() {
2671 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2672 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2673
2674 static const char* msg = "Pause Init Traversal";
2675 GCTraceTime(Info, gc) time(msg, gc_timer());
2676 EventMark em("%s", msg);
2677
2678 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2679
2680 op_init_traversal();
2681 }
2682
2683 void ShenandoahHeap::entry_final_traversal() {
2684 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2685 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2686
2687 static const char* msg = "Pause Final Traversal";
2688 GCTraceTime(Info, gc) time(msg, gc_timer());
2689 EventMark em("%s", msg);
2760 EventMark em("%s", msg);
2761
2762 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2763
2764 try_inject_alloc_failure();
2765 op_updaterefs();
2766 }
2767 void ShenandoahHeap::entry_cleanup() {
2768 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2769
2770 static const char* msg = "Concurrent cleanup";
2771 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2772 EventMark em("%s", msg);
2773
2774 // This phase does not use workers, no need for setup
2775
2776 try_inject_alloc_failure();
2777 op_cleanup();
2778 }
2779
2780 void ShenandoahHeap::entry_cleanup_bitmaps() {
2781 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2782
2783 static const char* msg = "Concurrent cleanup";
2784 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2785 EventMark em("%s", msg);
2786
2787 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2788
2789 try_inject_alloc_failure();
2790 op_cleanup_bitmaps();
2791 }
2792
2793 void ShenandoahHeap::entry_preclean() {
2794 if (ShenandoahPreclean && process_references()) {
2795 static const char* msg = "Concurrent precleaning";
2796 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2797 EventMark em("%s", msg);
2798
2799 ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2800
2801 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2802
2803 try_inject_alloc_failure();
2804 op_preclean();
2805 }
2806 }
2807
2808 void ShenandoahHeap::entry_partial() {
2809 static const char* msg = "Concurrent partial";
2810 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2811 EventMark em("%s", msg);
2812
2813 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2814
2815 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_partial());
2816
2817 try_inject_alloc_failure();
2818 op_partial();
2819 }
2820
2821 void ShenandoahHeap::entry_traversal() {
2822 static const char* msg = "Concurrent traversal";
2823 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2824 EventMark em("%s", msg);
2825
2826 TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2827
2828 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2829
2830 try_inject_alloc_failure();
2831 op_traversal();
2832 }
2833
2834 void ShenandoahHeap::try_inject_alloc_failure() {
2835 if (ShenandoahAllocFailureALot && !cancelled_concgc() && ((os::random() % 1000) > 950)) {
2836 _inject_alloc_failure.set();
2837 os::naked_short_sleep(1);
2838 if (cancelled_concgc()) {
2839 log_info(gc)("Allocation failure was successfully injected");
2840 }
2841 }
2842 }
2843
2844 bool ShenandoahHeap::should_inject_alloc_failure() {
2845 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2846 }
2884 _index(0),
2885 _heap(heap) {}
2886
2887 bool ShenandoahRegionIterator::has_next() const {
2888 return _index < _heap->num_regions();
2889 }
2890
2891 ShenandoahRegionIterator ShenandoahHeap::region_iterator() const {
2892 return ShenandoahRegionIterator();
2893 }
2894
2895 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2896 ShenandoahRegionIterator regions = region_iterator();
2897 ShenandoahHeapRegion* r = regions.next();
2898 while (r != NULL) {
2899 if (cl.heap_region_do(r)) {
2900 break;
2901 }
2902 r = regions.next();
2903 }
2904 }
2905
2906 char ShenandoahHeap::gc_state() const {
2907 return _gc_state.raw_value();
2908 }
|
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/parallelCleaning.hpp"
30
31 #include "gc/shenandoah/brooksPointer.hpp"
32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
45 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
46 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
48 #include "gc/shenandoah/shenandoahPacer.hpp"
49 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
50 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
51 #include "gc/shenandoah/shenandoahStringDedup.hpp"
52 #include "gc/shenandoah/shenandoahUtils.hpp"
53 #include "gc/shenandoah/shenandoahVerifier.hpp"
54 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
55 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
56 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
57
58 #include "runtime/vmThread.hpp"
59 #include "services/mallocTracker.hpp"
60
61 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
62
63 #ifdef ASSERT
64 template <class T>
65 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
66 T o = RawAccess<>::oop_load(p);
67 if (! CompressedOops::is_null(o)) {
184 ShenandoahHeapLocker locker(lock());
185 for (size_t i = 0; i < _num_regions; i++) {
186 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
187 (HeapWord*) pgc_rs.base() + reg_size_words * i,
188 reg_size_words,
189 i,
190 i < num_committed_regions);
191
192 _complete_top_at_mark_starts_base[i] = r->bottom();
193 _next_top_at_mark_starts_base[i] = r->bottom();
194 _regions[i] = r;
195 assert(!collection_set()->is_in(i), "New region should not be in collection set");
196 }
197
198 _free_set->rebuild();
199 }
200
201 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
202 "misaligned heap: "PTR_FORMAT, p2i(base()));
203
204 // The call below uses stuff (the SATB* things) that are in G1, but probably
205 // belong into a shared location.
206 ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
207 SATB_Q_FL_lock,
208 20 /*G1SATBProcessCompletedThreshold */,
209 Shared_SATB_Q_lock);
210
211 // Reserve space for prev and next bitmap.
212 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
213 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
214 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
215 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
216
217 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
218
219 guarantee(bitmap_bytes_per_region != 0,
220 "Bitmap bytes per region should not be zero");
221 guarantee(is_power_of_2(bitmap_bytes_per_region),
222 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
223
279 }
280
281 _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
282 _complete_mark_bit_map = &_mark_bit_map0;
283
284 _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
285 _next_mark_bit_map = &_mark_bit_map1;
286
287 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
288 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
289 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
290 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
291 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
292
293 if (UseShenandoahMatrix) {
294 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
295 } else {
296 _connection_matrix = NULL;
297 }
298
299 _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ?
300 new ShenandoahTraversalGC(this, _num_regions) :
301 NULL;
302
303 _monitoring_support = new ShenandoahMonitoringSupport(this);
304
305 _phase_timings = new ShenandoahPhaseTimings();
306
307 if (ShenandoahAllocationTrace) {
308 _alloc_tracker = new ShenandoahAllocTracker();
309 }
310
311 ShenandoahStringDedup::initialize();
312
313 _concurrent_gc_thread = new ShenandoahConcurrentThread();
314
315 ShenandoahCodeRoots::initialize();
316
317 LogTarget(Trace, gc, region) lt;
318 if (lt.is_enabled()) {
319 ResourceMark rm;
320 LogStream ls(lt);
321 log_trace(gc, region)("All Regions");
322 print_heap_regions_on(&ls);
323 log_trace(gc, region)("Free Regions");
324 _free_set->print_on(&ls);
325 }
326
327 log_info(gc, init)("Safepointing mechanism: %s",
328 SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
329 (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
330
331 return JNI_OK;
332 }
333
334 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
335 CollectedHeap(),
336 _shenandoah_policy(policy),
337 _soft_ref_policy(),
338 _regions(NULL),
339 _free_set(NULL),
340 _collection_set(NULL),
341 _update_refs_iterator(ShenandoahRegionIterator(this)),
342 _bytes_allocated_since_gc_start(0),
343 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
344 _ref_processor(NULL),
345 _next_top_at_mark_starts(NULL),
346 _next_top_at_mark_starts_base(NULL),
347 _complete_top_at_mark_starts(NULL),
348 _complete_top_at_mark_starts_base(NULL),
349 _mark_bit_map0(),
350 _mark_bit_map1(),
351 _aux_bit_map(),
352 _connection_matrix(NULL),
353 _verifier(NULL),
354 _pacer(NULL),
355 _used_at_last_gc(0),
356 _alloc_seq_at_last_gc_start(0),
357 _alloc_seq_at_last_gc_end(0),
358 _safepoint_workers(NULL),
359 _gc_cycle_mode(),
360 #ifdef ASSERT
361 _heap_expansion_count(0),
362 #endif
363 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
364 _phase_timings(NULL),
365 _alloc_tracker(NULL),
366 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
367 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
368 _memory_pool(NULL)
369 {
370 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
371 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
372 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
373
374 _scm = new ShenandoahConcurrentMark();
375 _full_gc = new ShenandoahMarkCompact();
376 _used = 0;
377
378 _max_workers = MAX2(_max_workers, 1U);
379 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
409 if (heap->is_bitmap_slice_committed(region)) {
410 HeapWord* bottom = region->bottom();
411 HeapWord* top = heap->next_top_at_mark_start(region->bottom());
412 if (top > bottom) {
413 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
414 }
415 assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
416 }
417 region = _regions.next();
418 }
419 }
420 };
421
422 void ShenandoahHeap::reset_next_mark_bitmap() {
423 assert_gc_workers(_workers->active_workers());
424
425 ShenandoahResetNextBitmapTask task(region_iterator());
426 _workers->run_task(&task);
427 }
428
429 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
430 private:
431 ShenandoahRegionIterator _regions;
432
433 public:
434 ShenandoahResetNextBitmapTraversalTask(ShenandoahRegionIterator regions) :
435 AbstractGangTask("Parallel Reset Bitmap Task for Traversal"),
436 _regions(regions) {
437 }
438
439 void work(uint worker_id) {
440 ShenandoahHeap* heap = ShenandoahHeap::heap();
441 ShenandoahHeapRegionSet* traversal_set = heap->traversal_gc()->traversal_set();
442 ShenandoahHeapRegion* region = _regions.next();
443 while (region != NULL) {
444 if (heap->is_bitmap_slice_committed(region)) {
445 if (traversal_set->is_in(region) && !region->is_trash()) {
446 ShenandoahHeapLocker locker(heap->lock());
447 HeapWord* bottom = region->bottom();
448 HeapWord* top = heap->next_top_at_mark_start(bottom);
449 assert(top <= region->top(),
450 "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
451 p2i(top), p2i(region->top()));
452 if (top > bottom) {
453 heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
454 heap->set_complete_top_at_mark_start(bottom, top);
455 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
456 heap->set_next_top_at_mark_start(bottom, bottom);
457 }
458 }
459 assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
460 "need clear next bitmap");
461 }
462 region = _regions.next();
463 }
464 }
465 };
466
467 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
468 assert_gc_workers(_workers->active_workers());
469
470 ShenandoahResetNextBitmapTraversalTask task(region_iterator());
471 _workers->run_task(&task);
472 }
473
474 bool ShenandoahHeap::is_next_bitmap_clear() {
475 for (size_t idx = 0; idx < _num_regions; idx++) {
476 ShenandoahHeapRegion* r = get_region(idx);
477 if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
478 return false;
479 }
480 }
481 return true;
482 }
483
484 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
485 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
486 }
487
488 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
489 return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
490 }
491
492 void ShenandoahHeap::print_on(outputStream* st) const {
493 st->print_cr("Shenandoah Heap");
494 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
495 capacity() / K, committed() / K, used() / K);
496 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
497 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
498
499 st->print("Status: ");
500 if (has_forwarded_objects()) st->print("has forwarded objects, ");
501 if (is_concurrent_mark_in_progress()) st->print("marking, ");
502 if (is_evacuation_in_progress()) st->print("evacuating, ");
503 if (is_update_refs_in_progress()) st->print("updating refs, ");
504 if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
505 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
506 if (is_full_gc_in_progress()) st->print("full gc, ");
507 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
508
509 if (cancelled_concgc()) {
510 st->print("conc gc cancelled");
511 } else {
512 st->print("not cancelled");
513 }
514 st->cr();
515
516 st->print_cr("Reserved region:");
517 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
518 p2i(reserved_region().start()),
519 p2i(reserved_region().end()));
520
521 if (UseShenandoahMatrix) {
522 st->print_cr("Matrix:");
523
823 private:
824 ShenandoahHeap* _heap;
825 Thread* _thread;
826 public:
827 ShenandoahEvacuateUpdateRootsClosure() :
828 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
829 }
830
831 private:
832 template <class T>
833 void do_oop_work(T* p) {
834 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
835
836 T o = RawAccess<>::oop_load(p);
837 if (! CompressedOops::is_null(o)) {
838 oop obj = CompressedOops::decode_not_null(o);
839 if (_heap->in_collection_set(obj)) {
840 shenandoah_assert_marked_complete(p, obj);
841 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
842 if (oopDesc::unsafe_equals(resolved, obj)) {
843 resolved = _heap->evacuate_object(obj, _thread);
844 }
845 RawAccess<OOP_NOT_NULL>::oop_store(p, resolved);
846 }
847 }
848 }
849
850 public:
851 void do_oop(oop* p) {
852 do_oop_work(p);
853 }
854 void do_oop(narrowOop* p) {
855 do_oop_work(p);
856 }
857 };
858
859 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
860 private:
861 ShenandoahHeap* _heap;
862 Thread* _thread;
863 public:
864 ShenandoahEvacuateRootsClosure() :
865 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
866 }
867
868 private:
869 template <class T>
870 void do_oop_work(T* p) {
871 T o = RawAccess<>::oop_load(p);
872 if (! CompressedOops::is_null(o)) {
873 oop obj = CompressedOops::decode_not_null(o);
874 if (_heap->in_collection_set(obj)) {
875 oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
876 if (oopDesc::unsafe_equals(resolved, obj)) {
877 _heap->evacuate_object(obj, _thread);
878 }
879 }
880 }
881 }
882
883 public:
884 void do_oop(oop* p) {
885 do_oop_work(p);
886 }
887 void do_oop(narrowOop* p) {
888 do_oop_work(p);
889 }
890 };
891
892 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
893 private:
894 ShenandoahHeap* const _heap;
895 Thread* const _thread;
896 public:
897 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
898 _heap(heap), _thread(Thread::current()) {}
899
900 void do_object(oop p) {
901 shenandoah_assert_marked_complete(NULL, p);
902 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
903 _heap->evacuate_object(p, _thread);
904 }
905 }
906 };
907
908 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
909 private:
910 ShenandoahHeap* const _sh;
911 ShenandoahCollectionSet* const _cs;
912 ShenandoahSharedFlag _claimed_codecache;
913
914 public:
915 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
916 ShenandoahCollectionSet* cs) :
917 AbstractGangTask("Parallel Evacuation Task"),
918 _cs(cs),
919 _sh(sh)
920 {}
921
922 void work(uint worker_id) {
923
1633 }
1634
1635 void ShenandoahHeap::op_cleanup() {
1636 ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1637 free_set()->recycle_trash();
1638
1639 // Allocations happen during cleanup, record peak after the phase:
1640 shenandoahPolicy()->record_peak_occupancy();
1641 }
1642
1643 void ShenandoahHeap::op_cleanup_bitmaps() {
1644 op_cleanup();
1645
1646 ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1647 reset_next_mark_bitmap();
1648
1649 // Allocations happen during bitmap cleanup, record peak after the phase:
1650 shenandoahPolicy()->record_peak_occupancy();
1651 }
1652
1653 void ShenandoahHeap::op_cleanup_traversal() {
1654
1655 {
1656 ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1657 reset_next_mark_bitmap_traversal();
1658 }
1659
1660 op_cleanup();
1661
1662 // Allocations happen during bitmap cleanup, record peak after the phase:
1663 shenandoahPolicy()->record_peak_occupancy();
1664 }
1665
1666 void ShenandoahHeap::op_preclean() {
1667 concurrentMark()->preclean_weak_refs();
1668
1669 // Allocations happen during concurrent preclean, record peak after the phase:
1670 shenandoahPolicy()->record_peak_occupancy();
1671 }
1672
1673 void ShenandoahHeap::op_init_traversal() {
1674 traversal_gc()->init_traversal_collection();
1675 }
1676
1677 void ShenandoahHeap::op_traversal() {
1678 traversal_gc()->concurrent_traversal_collection();
1679 }
1680
1681 void ShenandoahHeap::op_final_traversal() {
1682 traversal_gc()->final_traversal_collection();
1683 }
1684
1685 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1686 full_gc()->do_it(cause);
1687 }
1688
1689 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1690 // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1691 // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1692 // some phase, we have to upgrade the Degenerate GC to Full GC.
1693
1694 clear_cancelled_concgc();
1695
1696 size_t used_before = used();
1697
1698 switch (point) {
1699 case _degenerated_evac:
1700 // Not possible to degenerate from here, upgrade to Full GC right away.
1701 cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1702 op_degenerated_fail();
1703 return;
1704
1705 // The cases below form the Duff's-like device: it describes the actual GC cycle,
1706 // but enters it at different points, depending on which concurrent phase had
1707 // degenerated.
1708
1709 case _degenerated_traversal:
1710 {
1711 ShenandoahHeapLocker locker(lock());
1712 collection_set()->clear_current_index();
1713 for (size_t i = 0; i < collection_set()->count(); i++) {
1714 ShenandoahHeapRegion* r = collection_set()->next();
1715 r->make_regular_bypass();
1716 }
1717 collection_set()->clear();
1718 }
1719 op_final_traversal();
1720 op_cleanup_traversal();
1721 return;
1722
1723 case _degenerated_outside_cycle:
1724 if (shenandoahPolicy()->can_do_traversal_gc()) {
1725 // Not possible to degenerate from here, upgrade to Full GC right away.
1726 cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1727 op_degenerated_fail();
1728 return;
1729 }
1730 op_init_mark();
1731 if (cancelled_concgc()) {
1732 op_degenerated_fail();
1733 return;
1734 }
1735
1736 case _degenerated_mark:
1737 op_final_mark();
1738 if (cancelled_concgc()) {
1739 op_degenerated_fail();
1740 return;
1840 }
1841 }
1842
1843 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1844 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1845 ShenandoahThreadLocalData::set_gc_state(t, state);
1846 }
1847 }
1848
1849 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1850 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1851 _gc_state.set_cond(mask, value);
1852 set_gc_state_all_threads(_gc_state.raw_value());
1853 }
1854
1855 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1856 set_gc_state_mask(MARKING, in_progress);
1857 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1858 }
1859
1860 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1861 set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1862 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1863 }
1864
1865 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1866 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1867 set_gc_state_mask(EVACUATION, in_progress);
1868 }
1869
1870 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1871 // Initialize Brooks pointer for the next object
1872 HeapWord* result = obj + BrooksPointer::word_size();
1873 BrooksPointer::initialize(oop(result));
1874 return result;
1875 }
1876
1877 uint ShenandoahHeap::oop_extra_words() {
1878 return BrooksPointer::word_size();
1879 }
2211 } else {
2212 // Use ParallelGCThreads inside safepoints
2213 assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2214 }
2215 } else {
2216 if (UseDynamicNumberOfGCThreads ||
2217 (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2218 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2219 } else {
2220 // Use ConcGCThreads outside safepoints
2221 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2222 }
2223 }
2224 }
2225 #endif
2226
2227 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2228 return _connection_matrix;
2229 }
2230
2231 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2232 return _traversal_gc;
2233 }
2234
2235 ShenandoahVerifier* ShenandoahHeap::verifier() {
2236 guarantee(ShenandoahVerify, "Should be enabled");
2237 assert (_verifier != NULL, "sanity");
2238 return _verifier;
2239 }
2240
2241 template<class T>
2242 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2243 private:
2244 T cl;
2245 ShenandoahHeap* _heap;
2246 ShenandoahRegionIterator _regions;
2247 bool _concurrent;
2248 public:
2249 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator regions, bool concurrent) :
2250 AbstractGangTask("Concurrent Update References Task"),
2523 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2524 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2525 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2526 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2527
2528 try_inject_alloc_failure();
2529 VM_ShenandoahInitUpdateRefs op;
2530 VMThread::execute(&op);
2531 }
2532
2533 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2534 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2535 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2537
2538 try_inject_alloc_failure();
2539 VM_ShenandoahFinalUpdateRefs op;
2540 VMThread::execute(&op);
2541 }
2542
2543 void ShenandoahHeap::vmop_entry_init_traversal() {
2544 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2545 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2546 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2547
2548 try_inject_alloc_failure();
2549 VM_ShenandoahInitTraversalGC op;
2550 VMThread::execute(&op);
2551 }
2552
2553 void ShenandoahHeap::vmop_entry_final_traversal() {
2554 TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2555 ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2556 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2557
2558 try_inject_alloc_failure();
2559 VM_ShenandoahFinalTraversalGC op;
2560 VMThread::execute(&op);
2561 }
2562
2631 EventMark em("%s", msg);
2632
2633 // No workers used in this phase, no setup required
2634
2635 op_init_updaterefs();
2636 }
2637
2638 void ShenandoahHeap::entry_final_updaterefs() {
2639 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2640 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2641
2642 static const char* msg = "Pause Final Update Refs";
2643 GCTraceTime(Info, gc) time(msg, gc_timer());
2644 EventMark em("%s", msg);
2645
2646 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2647
2648 op_final_updaterefs();
2649 }
2650
2651 void ShenandoahHeap::entry_init_traversal() {
2652 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2653 ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2654
2655 static const char* msg = "Pause Init Traversal";
2656 GCTraceTime(Info, gc) time(msg, gc_timer());
2657 EventMark em("%s", msg);
2658
2659 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2660
2661 op_init_traversal();
2662 }
2663
2664 void ShenandoahHeap::entry_final_traversal() {
2665 ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2666 ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2667
2668 static const char* msg = "Pause Final Traversal";
2669 GCTraceTime(Info, gc) time(msg, gc_timer());
2670 EventMark em("%s", msg);
2741 EventMark em("%s", msg);
2742
2743 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2744
2745 try_inject_alloc_failure();
2746 op_updaterefs();
2747 }
2748 void ShenandoahHeap::entry_cleanup() {
2749 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2750
2751 static const char* msg = "Concurrent cleanup";
2752 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2753 EventMark em("%s", msg);
2754
2755 // This phase does not use workers, no need for setup
2756
2757 try_inject_alloc_failure();
2758 op_cleanup();
2759 }
2760
2761 void ShenandoahHeap::entry_cleanup_traversal() {
2762 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2763
2764 static const char* msg = "Concurrent cleanup";
2765 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2766 EventMark em("%s", msg);
2767
2768 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2769
2770 try_inject_alloc_failure();
2771 op_cleanup_traversal();
2772 }
2773
2774 void ShenandoahHeap::entry_cleanup_bitmaps() {
2775 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2776
2777 static const char* msg = "Concurrent cleanup";
2778 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2779 EventMark em("%s", msg);
2780
2781 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2782
2783 try_inject_alloc_failure();
2784 op_cleanup_bitmaps();
2785 }
2786
2787 void ShenandoahHeap::entry_preclean() {
2788 if (ShenandoahPreclean && process_references()) {
2789 static const char* msg = "Concurrent precleaning";
2790 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2791 EventMark em("%s", msg);
2792
2793 ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2794
2795 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2796
2797 try_inject_alloc_failure();
2798 op_preclean();
2799 }
2800 }
2801
2802 void ShenandoahHeap::entry_traversal() {
2803 static const char* msg = "Concurrent traversal";
2804 GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2805 EventMark em("%s", msg);
2806
2807 TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters()
2808 : monitoring_support()->concurrent_collection_counters());
2809
2810 ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2811
2812 try_inject_alloc_failure();
2813 op_traversal();
2814 }
2815
2816 void ShenandoahHeap::try_inject_alloc_failure() {
2817 if (ShenandoahAllocFailureALot && !cancelled_concgc() && ((os::random() % 1000) > 950)) {
2818 _inject_alloc_failure.set();
2819 os::naked_short_sleep(1);
2820 if (cancelled_concgc()) {
2821 log_info(gc)("Allocation failure was successfully injected");
2822 }
2823 }
2824 }
2825
2826 bool ShenandoahHeap::should_inject_alloc_failure() {
2827 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2828 }
2866 _index(0),
2867 _heap(heap) {}
2868
2869 bool ShenandoahRegionIterator::has_next() const {
2870 return _index < _heap->num_regions();
2871 }
2872
2873 ShenandoahRegionIterator ShenandoahHeap::region_iterator() const {
2874 return ShenandoahRegionIterator();
2875 }
2876
2877 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2878 ShenandoahRegionIterator regions = region_iterator();
2879 ShenandoahHeapRegion* r = regions.next();
2880 while (r != NULL) {
2881 if (cl.heap_region_do(r)) {
2882 break;
2883 }
2884 r = regions.next();
2885 }
2886 }
2887
2888 bool ShenandoahHeap::is_minor_gc() const {
2889 return _gc_cycle_mode.get() == MINOR;
2890 }
2891
2892 bool ShenandoahHeap::is_major_gc() const {
2893 return _gc_cycle_mode.get() == MAJOR;
2894 }
2895
2896 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2897 _gc_cycle_mode.set(gc_cycle_mode);
2898 }
2899
2900 char ShenandoahHeap::gc_state() const {
2901 return _gc_state.raw_value();
2902 }
|