27 #include "gc/shared/gcTimer.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/memAllocator.hpp"
30 #include "gc/shared/parallelCleaning.hpp"
31 #include "gc/shared/plab.hpp"
32
33 #include "gc/shenandoah/brooksPointer.hpp"
34 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
35 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
38 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahControlThread.hpp"
41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
44 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
45 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
46 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
47 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
48 #include "gc/shenandoah/shenandoahMetrics.hpp"
49 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
50 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
51 #include "gc/shenandoah/shenandoahPacer.hpp"
52 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
53 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
54 #include "gc/shenandoah/shenandoahStringDedup.hpp"
55 #include "gc/shenandoah/shenandoahUtils.hpp"
56 #include "gc/shenandoah/shenandoahVerifier.hpp"
57 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
58 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
59 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
60 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
61 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
62 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
63 #include "gc/shenandoah/heuristics/shenandoahPartialConnectedHeuristics.hpp"
64 #include "gc/shenandoah/heuristics/shenandoahPartialGenerationalHeuristics.hpp"
65 #include "gc/shenandoah/heuristics/shenandoahPartialLRUHeuristics.hpp"
66 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
160 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
161
162 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
163 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
164 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
165 _committed = _initial_size;
166
167 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
168 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
169 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
170 }
171
172 size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
173 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
174
175 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
176 _free_set = new ShenandoahFreeSet(this, _num_regions);
177
178 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
179
180 _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
181 _next_top_at_mark_starts = _next_top_at_mark_starts_base -
182 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
183
184 _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
185 _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
186 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
187
188 if (ShenandoahPacing) {
189 _pacer = new ShenandoahPacer(this);
190 _pacer->setup_for_idle();
191 } else {
192 _pacer = NULL;
193 }
194
195 {
196 ShenandoahHeapLocker locker(lock());
197 for (size_t i = 0; i < _num_regions; i++) {
198 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
199 (HeapWord*) pgc_rs.base() + reg_size_words * i,
200 reg_size_words,
201 i,
202 i < num_committed_regions);
203
204 _complete_top_at_mark_starts_base[i] = r->bottom();
205 _next_top_at_mark_starts_base[i] = r->bottom();
206 _regions[i] = r;
207 assert(!collection_set()->is_in(i), "New region should not be in collection set");
208 }
209
210 _free_set->rebuild();
211 }
212
213 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
214 "misaligned heap: "PTR_FORMAT, p2i(base()));
215
216 // The call below uses stuff (the SATB* things) that are in G1, but probably
217 // belong into a shared location.
218 ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
219 SATB_Q_FL_lock,
220 20 /*G1SATBProcessCompletedThreshold */,
221 Shared_SATB_Q_lock);
222
223 // Reserve space for prev and next bitmap.
224 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
225 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
226 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
227 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
228
229 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
230
231 guarantee(bitmap_bytes_per_region != 0,
232 "Bitmap bytes per region should not be zero");
260 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
261 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
262 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
263 os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
264 "couldn't allocate initial bitmap");
265 os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
266 "couldn't allocate initial bitmap");
267
268 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
269
270 if (ShenandoahVerify) {
271 ReservedSpace verify_bitmap(_bitmap_size, page_size);
272 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
273 "couldn't allocate verification bitmap");
274 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
275 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
276 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
277 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
278 }
279
280 if (ShenandoahAlwaysPreTouch) {
281 assert (!AlwaysPreTouch, "Should have been overridden");
282
283 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
284 // before initialize() below zeroes it with initializing thread. For any given region,
285 // we touch the region and the corresponding bitmaps from the same thread.
286 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
287
288 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
289 _num_regions, page_size);
290 ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
291 _workers->run_task(&cl);
292 }
293
294 _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
295 _complete_mark_bit_map = &_mark_bit_map0;
296
297 _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
298 _next_mark_bit_map = &_mark_bit_map1;
299
300 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
301 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
302 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
303 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
304 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
305
306 if (UseShenandoahMatrix) {
307 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
308 } else {
309 _connection_matrix = NULL;
310 }
311
312 _traversal_gc = heuristics()->can_do_traversal_gc() ?
313 new ShenandoahTraversalGC(this, _num_regions) :
314 NULL;
315
316 _monitoring_support = new ShenandoahMonitoringSupport(this);
317
318 _phase_timings = new ShenandoahPhaseTimings();
390 }
391
392 }
393
394 #ifdef _MSC_VER
395 #pragma warning( push )
396 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
397 #endif
398
399 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
400 CollectedHeap(),
401 _shenandoah_policy(policy),
402 _soft_ref_policy(),
403 _regions(NULL),
404 _free_set(NULL),
405 _collection_set(NULL),
406 _update_refs_iterator(this),
407 _bytes_allocated_since_gc_start(0),
408 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
409 _ref_processor(NULL),
410 _next_top_at_mark_starts(NULL),
411 _next_top_at_mark_starts_base(NULL),
412 _complete_top_at_mark_starts(NULL),
413 _complete_top_at_mark_starts_base(NULL),
414 _mark_bit_map0(),
415 _mark_bit_map1(),
416 _aux_bit_map(),
417 _connection_matrix(NULL),
418 _verifier(NULL),
419 _pacer(NULL),
420 _used_at_last_gc(0),
421 _alloc_seq_at_last_gc_start(0),
422 _alloc_seq_at_last_gc_end(0),
423 _safepoint_workers(NULL),
424 _gc_cycle_mode(),
425 #ifdef ASSERT
426 _heap_expansion_count(0),
427 #endif
428 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
429 _phase_timings(NULL),
430 _alloc_tracker(NULL),
431 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
432 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
433 _memory_pool(NULL)
434 {
435 log_info(gc, init)("Parallel GC threads: " UINT32_FORMAT, ParallelGCThreads);
456 false, false);
457 _safepoint_workers->initialize_workers();
458 }
459 }
460
461 #ifdef _MSC_VER
462 #pragma warning( pop )
463 #endif
464
465 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
466 private:
467 ShenandoahRegionIterator _regions;
468
469 public:
470 ShenandoahResetNextBitmapTask() :
471 AbstractGangTask("Parallel Reset Bitmap Task") {}
472
473 void work(uint worker_id) {
474 ShenandoahHeapRegion* region = _regions.next();
475 ShenandoahHeap* heap = ShenandoahHeap::heap();
476 while (region != NULL) {
477 if (heap->is_bitmap_slice_committed(region)) {
478 HeapWord* bottom = region->bottom();
479 HeapWord* top = heap->next_top_at_mark_start(region->bottom());
480 if (top > bottom) {
481 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
482 }
483 assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
484 }
485 region = _regions.next();
486 }
487 }
488 };
489
490 void ShenandoahHeap::reset_next_mark_bitmap() {
491 assert_gc_workers(_workers->active_workers());
492
493 ShenandoahResetNextBitmapTask task;
494 _workers->run_task(&task);
495 }
496
497 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
498 private:
499 ShenandoahHeapRegionSetIterator& _regions;
500
501 public:
502 ShenandoahResetNextBitmapTraversalTask(ShenandoahHeapRegionSetIterator& regions) :
503 AbstractGangTask("Parallel Reset Bitmap Task for Traversal"),
504 _regions(regions) {}
505
506 void work(uint worker_id) {
507 ShenandoahHeap* heap = ShenandoahHeap::heap();
508 ShenandoahHeapRegion* region = _regions.claim_next();
509 while (region != NULL) {
510 if (!region->is_trash()) {
511 assert(!region->is_empty_uncommitted(), "sanity");
512 assert(heap->is_bitmap_slice_committed(region), "sanity");
513 HeapWord* bottom = region->bottom();
514 HeapWord* top = heap->next_top_at_mark_start(bottom);
515 assert(top <= region->top(),
516 "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
517 p2i(top), p2i(region->top()));
518 if (top > bottom) {
519 heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
520 heap->set_complete_top_at_mark_start(bottom, top);
521 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
522 heap->set_next_top_at_mark_start(bottom, bottom);
523 }
524 assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
525 "need clear next bitmap");
526 }
527 region = _regions.claim_next();
528 }
529 }
530 };
531
532 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
533 assert_gc_workers(_workers->active_workers());
534
535 ShenandoahHeapRegionSet* regions = traversal_gc()->traversal_set();
536 ShenandoahHeapRegionSetIterator iter(regions);
537 ShenandoahResetNextBitmapTraversalTask task(iter);
538 _workers->run_task(&task);
539 }
540
541 bool ShenandoahHeap::is_next_bitmap_clear() {
542 for (size_t idx = 0; idx < _num_regions; idx++) {
543 ShenandoahHeapRegion* r = get_region(idx);
544 if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
545 return false;
546 }
547 }
548 return true;
549 }
550
551 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
552 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
553 }
554
555 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
556 return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
557 }
558
559 void ShenandoahHeap::print_on(outputStream* st) const {
560 st->print_cr("Shenandoah Heap");
561 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
562 capacity() / K, committed() / K, used() / K);
563 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
564 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
565
566 st->print("Status: ");
567 if (has_forwarded_objects()) st->print("has forwarded objects, ");
568 if (is_concurrent_mark_in_progress()) st->print("marking, ");
569 if (is_evacuation_in_progress()) st->print("evacuating, ");
570 if (is_update_refs_in_progress()) st->print("updating refs, ");
571 if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
572 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
573 if (is_full_gc_in_progress()) st->print("full gc, ");
574 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
575
576 if (cancelled_gc()) {
577 st->print("cancelled");
578 } else {
1630 if (skip_humongous_continuation && current->is_humongous_continuation()) {
1631 continue;
1632 }
1633 if (skip_cset_regions && in_collection_set(current)) {
1634 continue;
1635 }
1636 if (blk->heap_region_do(current)) {
1637 return;
1638 }
1639 }
1640 }
1641
1642 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1643 private:
1644 ShenandoahHeap* sh;
1645 public:
1646 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1647
1648 bool heap_region_do(ShenandoahHeapRegion* r) {
1649 r->clear_live_data();
1650 sh->set_next_top_at_mark_start(r->bottom(), r->top());
1651 return false;
1652 }
1653 };
1654
1655 void ShenandoahHeap::op_init_mark() {
1656 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1657
1658 assert(is_next_bitmap_clear(), "need clear marking bitmap");
1659
1660 if (ShenandoahVerify) {
1661 verifier()->verify_before_concmark();
1662 }
1663
1664 {
1665 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1666 accumulate_statistics_all_tlabs();
1667 }
1668
1669 set_concurrent_mark_in_progress(true);
1670 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1671 {
1672 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1673 make_parsable(true);
1674 }
1675
1676 {
1677 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1678 ShenandoahClearLivenessClosure clc(this);
1704 void ShenandoahHeap::op_final_mark() {
1705 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1706
1707 // It is critical that we
1708 // evacuate roots right after finishing marking, so that we don't
1709 // get unmarked objects in the roots.
1710
1711 if (!cancelled_gc()) {
1712 concurrentMark()->finish_mark_from_roots();
1713 stop_concurrent_marking();
1714
1715 {
1716 ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1717
1718 // All allocations past TAMS are implicitly live, adjust the region data.
1719 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1720 for (size_t i = 0; i < num_regions(); i++) {
1721 ShenandoahHeapRegion* r = get_region(i);
1722 if (!r->is_active()) continue;
1723
1724 HeapWord* tams = complete_top_at_mark_start(r->bottom());
1725 HeapWord* top = r->top();
1726 if (top > tams) {
1727 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1728 }
1729 }
1730 }
1731
1732 {
1733 ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1734 prepare_for_concurrent_evacuation();
1735 }
1736
1737 // If collection set has candidates, start evacuation.
1738 // Otherwise, bypass the rest of the cycle.
1739 if (!collection_set()->is_empty()) {
1740 set_evacuation_in_progress(true);
1741 // From here on, we need to update references.
1742 set_has_forwarded_objects(true);
1743
1744 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
2005 if (!metrics.is_good_progress("Degenerated GC")) {
2006 _progress_last_gc.unset();
2007 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
2008 op_degenerated_futile();
2009 } else {
2010 _progress_last_gc.set();
2011 }
2012 }
2013
2014 void ShenandoahHeap::op_degenerated_fail() {
2015 log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
2016 shenandoahPolicy()->record_degenerated_upgrade_to_full();
2017 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
2018 }
2019
2020 void ShenandoahHeap::op_degenerated_futile() {
2021 shenandoahPolicy()->record_degenerated_upgrade_to_full();
2022 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
2023 }
2024
2025 void ShenandoahHeap::swap_mark_bitmaps() {
2026 // Swap bitmaps.
2027 MarkBitMap* tmp1 = _complete_mark_bit_map;
2028 _complete_mark_bit_map = _next_mark_bit_map;
2029 _next_mark_bit_map = tmp1;
2030
2031 // Swap top-at-mark-start pointers
2032 HeapWord** tmp2 = _complete_top_at_mark_starts;
2033 _complete_top_at_mark_starts = _next_top_at_mark_starts;
2034 _next_top_at_mark_starts = tmp2;
2035
2036 HeapWord** tmp3 = _complete_top_at_mark_starts_base;
2037 _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
2038 _next_top_at_mark_starts_base = tmp3;
2039 }
2040
2041
2042 void ShenandoahHeap::stop_concurrent_marking() {
2043 assert(is_concurrent_mark_in_progress(), "How else could we get here?");
2044 if (!cancelled_gc()) {
2045 // If we needed to update refs, and concurrent marking has been cancelled,
2046 // we need to finish updating references.
2047 set_has_forwarded_objects(false);
2048 swap_mark_bitmaps();
2049 }
2050 set_concurrent_mark_in_progress(false);
2051
2052 LogTarget(Trace, gc, region) lt;
2053 if (lt.is_enabled()) {
2054 ResourceMark rm;
2055 LogStream ls(lt);
2056 ls.print_cr("Regions at stopping the concurrent mark:");
2057 print_heap_regions_on(&ls);
2058 }
2059 }
2060
2061 void ShenandoahHeap::force_satb_flush_all_threads() {
2062 if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
2063 // No need to flush SATBs
2064 return;
2065 }
2066
2067 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2068 ShenandoahThreadLocalData::set_force_satb_flush(t, true);
2106 return result;
2107 }
2108
2109 uint ShenandoahHeap::oop_extra_words() {
2110 return BrooksPointer::word_size();
2111 }
2112
2113 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2114 _heap(ShenandoahHeap::heap_no_check()) {
2115 }
2116
2117 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
2118 _heap(ShenandoahHeap::heap_no_check()) {
2119 }
2120
2121 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2122 if (CompressedOops::is_null(obj)) {
2123 return false;
2124 }
2125 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
2126 shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
2127 return _heap->is_marked_next(obj);
2128 }
2129
2130 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
2131 if (CompressedOops::is_null(obj)) {
2132 return false;
2133 }
2134 shenandoah_assert_not_forwarded(NULL, obj);
2135 return _heap->is_marked_next(obj);
2136 }
2137
2138 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
2139 return has_forwarded_objects() ?
2140 (BoolObjectClosure*) &_forwarded_is_alive :
2141 (BoolObjectClosure*) &_is_alive;
2142 }
2143
2144 void ShenandoahHeap::ref_processing_init() {
2145 MemRegion mr = reserved_region();
2146
2147 _forwarded_is_alive.init(this);
2148 _is_alive.init(this);
2149 assert(_max_workers > 0, "Sanity");
2150
2151 _ref_processor =
2152 new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery
2153 ParallelRefProcEnabled, // MT processing
2154 _max_workers, // Degree of MT processing
2155 true, // MT discovery
2322 return _unload_classes.is_set();
2323 }
2324
2325 //fixme this should be in heapregionset
2326 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2327 size_t region_idx = r->region_number() + 1;
2328 ShenandoahHeapRegion* next = get_region(region_idx);
2329 guarantee(next->region_number() == region_idx, "region number must match");
2330 while (next->is_humongous()) {
2331 region_idx = next->region_number() + 1;
2332 next = get_region(region_idx);
2333 guarantee(next->region_number() == region_idx, "region number must match");
2334 }
2335 return next;
2336 }
2337
2338 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2339 return _monitoring_support;
2340 }
2341
2342 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2343 return _complete_mark_bit_map;
2344 }
2345
2346 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2347 return _next_mark_bit_map;
2348 }
2349
2350 address ShenandoahHeap::in_cset_fast_test_addr() {
2351 ShenandoahHeap* heap = ShenandoahHeap::heap();
2352 assert(heap->collection_set() != NULL, "Sanity");
2353 return (address) heap->collection_set()->biased_map_address();
2354 }
2355
2356 address ShenandoahHeap::cancelled_gc_addr() {
2357 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2358 }
2359
2360 address ShenandoahHeap::gc_state_addr() {
2361 return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2362 }
2363
2364 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2365 return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2366 }
2367
2368 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2369 OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2370 }
2371
2372 ShenandoahPacer* ShenandoahHeap::pacer() const {
2373 assert (_pacer != NULL, "sanity");
2374 return _pacer;
2375 }
2376
2377 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2378 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2379 _next_top_at_mark_starts[index] = addr;
2380 }
2381
2382 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2383 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2384 return _next_top_at_mark_starts[index];
2385 }
2386
2387 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2388 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2389 _complete_top_at_mark_starts[index] = addr;
2390 }
2391
2392 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2393 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2394 return _complete_top_at_mark_starts[index];
2395 }
2396
2397 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2398 _degenerated_gc_in_progress.set_cond(in_progress);
2399 }
2400
2401 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2402 _full_gc_in_progress.set_cond(in_progress);
2403 }
2404
2405 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2406 assert (is_full_gc_in_progress(), "should be");
2407 _full_gc_move_in_progress.set_cond(in_progress);
2408 }
2409
2410 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2411 set_gc_state_mask(UPDATEREFS, in_progress);
2412 }
2413
2414 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2415 ShenandoahCodeRoots::add_nmethod(nm);
2416 }
2477 template<class T>
2478 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2479 private:
2480 T cl;
2481 ShenandoahHeap* _heap;
2482 ShenandoahRegionIterator* _regions;
2483 bool _concurrent;
2484 public:
2485 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2486 AbstractGangTask("Concurrent Update References Task"),
2487 cl(T()),
2488 _heap(ShenandoahHeap::heap()),
2489 _regions(regions),
2490 _concurrent(concurrent) {
2491 }
2492
2493 void work(uint worker_id) {
2494 ShenandoahWorkerSession worker_session(worker_id);
2495 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2496 ShenandoahHeapRegion* r = _regions->next();
2497 while (r != NULL) {
2498 if (_heap->in_collection_set(r)) {
2499 HeapWord* bottom = r->bottom();
2500 HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2501 if (top > bottom) {
2502 _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2503 }
2504 } else {
2505 if (r->is_active()) {
2506 _heap->marked_object_oop_safe_iterate(r, &cl);
2507 }
2508 }
2509 if (ShenandoahPacing) {
2510 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2511 assert (top_at_start_ur >= r->bottom(), "sanity");
2512 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2513 }
2514 if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2515 return;
2516 }
2517 r = _regions->next();
2518 }
2519 }
2520 };
2521
2522 void ShenandoahHeap::update_heap_references(bool concurrent) {
|
27 #include "gc/shared/gcTimer.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/memAllocator.hpp"
30 #include "gc/shared/parallelCleaning.hpp"
31 #include "gc/shared/plab.hpp"
32
33 #include "gc/shenandoah/brooksPointer.hpp"
34 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
35 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
38 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahControlThread.hpp"
41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
44 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
45 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
46 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
47 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
48 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
49 #include "gc/shenandoah/shenandoahMetrics.hpp"
50 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
51 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
52 #include "gc/shenandoah/shenandoahPacer.hpp"
53 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
54 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
55 #include "gc/shenandoah/shenandoahStringDedup.hpp"
56 #include "gc/shenandoah/shenandoahUtils.hpp"
57 #include "gc/shenandoah/shenandoahVerifier.hpp"
58 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
60 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
61 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
62 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
63 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
64 #include "gc/shenandoah/heuristics/shenandoahPartialConnectedHeuristics.hpp"
65 #include "gc/shenandoah/heuristics/shenandoahPartialGenerationalHeuristics.hpp"
66 #include "gc/shenandoah/heuristics/shenandoahPartialLRUHeuristics.hpp"
67 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
161 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
162
163 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
164 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
165 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
166 _committed = _initial_size;
167
168 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
169 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
170 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
171 }
172
173 size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
174 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
175
176 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
177 _free_set = new ShenandoahFreeSet(this, _num_regions);
178
179 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
180
181 if (ShenandoahPacing) {
182 _pacer = new ShenandoahPacer(this);
183 _pacer->setup_for_idle();
184 } else {
185 _pacer = NULL;
186 }
187
188 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
189 "misaligned heap: "PTR_FORMAT, p2i(base()));
190
191 // The call below uses stuff (the SATB* things) that are in G1, but probably
192 // belong into a shared location.
193 ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
194 SATB_Q_FL_lock,
195 20 /*G1SATBProcessCompletedThreshold */,
196 Shared_SATB_Q_lock);
197
198 // Reserve space for prev and next bitmap.
199 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
200 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
201 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
202 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
203
204 size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
205
206 guarantee(bitmap_bytes_per_region != 0,
207 "Bitmap bytes per region should not be zero");
235 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
236 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
237 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
238 os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
239 "couldn't allocate initial bitmap");
240 os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
241 "couldn't allocate initial bitmap");
242
243 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
244
245 if (ShenandoahVerify) {
246 ReservedSpace verify_bitmap(_bitmap_size, page_size);
247 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
248 "couldn't allocate verification bitmap");
249 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
250 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
251 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
252 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
253 }
254
255 _complete_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap0_region, _num_regions);
256 _next_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap1_region, _num_regions);
257
258 {
259 ShenandoahHeapLocker locker(lock());
260 for (size_t i = 0; i < _num_regions; i++) {
261 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
262 (HeapWord*) pgc_rs.base() + reg_size_words * i,
263 reg_size_words,
264 i,
265 i < num_committed_regions);
266
267 _complete_marking_context->set_top_at_mark_start(i, r->bottom());
268 _next_marking_context->set_top_at_mark_start(i, r->bottom());
269 _regions[i] = r;
270 assert(!collection_set()->is_in(i), "New region should not be in collection set");
271 }
272
273 _free_set->rebuild();
274 }
275
276 if (ShenandoahAlwaysPreTouch) {
277 assert (!AlwaysPreTouch, "Should have been overridden");
278
279 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
280 // before initialize() below zeroes it with initializing thread. For any given region,
281 // we touch the region and the corresponding bitmaps from the same thread.
282 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
283
284 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
285 _num_regions, page_size);
286 ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
287 _workers->run_task(&cl);
288 }
289
290
291 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
292 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
293 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
294 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
295 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
296
297 if (UseShenandoahMatrix) {
298 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
299 } else {
300 _connection_matrix = NULL;
301 }
302
303 _traversal_gc = heuristics()->can_do_traversal_gc() ?
304 new ShenandoahTraversalGC(this, _num_regions) :
305 NULL;
306
307 _monitoring_support = new ShenandoahMonitoringSupport(this);
308
309 _phase_timings = new ShenandoahPhaseTimings();
381 }
382
383 }
384
385 #ifdef _MSC_VER
386 #pragma warning( push )
387 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
388 #endif
389
390 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
391 CollectedHeap(),
392 _shenandoah_policy(policy),
393 _soft_ref_policy(),
394 _regions(NULL),
395 _free_set(NULL),
396 _collection_set(NULL),
397 _update_refs_iterator(this),
398 _bytes_allocated_since_gc_start(0),
399 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
400 _ref_processor(NULL),
401 _complete_marking_context(NULL),
402 _next_marking_context(NULL),
403 _aux_bit_map(),
404 _connection_matrix(NULL),
405 _verifier(NULL),
406 _pacer(NULL),
407 _used_at_last_gc(0),
408 _alloc_seq_at_last_gc_start(0),
409 _alloc_seq_at_last_gc_end(0),
410 _safepoint_workers(NULL),
411 _gc_cycle_mode(),
412 #ifdef ASSERT
413 _heap_expansion_count(0),
414 #endif
415 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
416 _phase_timings(NULL),
417 _alloc_tracker(NULL),
418 _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
419 _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
420 _memory_pool(NULL)
421 {
422 log_info(gc, init)("Parallel GC threads: " UINT32_FORMAT, ParallelGCThreads);
443 false, false);
444 _safepoint_workers->initialize_workers();
445 }
446 }
447
448 #ifdef _MSC_VER
449 #pragma warning( pop )
450 #endif
451
452 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
453 private:
454 ShenandoahRegionIterator _regions;
455
456 public:
457 ShenandoahResetNextBitmapTask() :
458 AbstractGangTask("Parallel Reset Bitmap Task") {}
459
460 void work(uint worker_id) {
461 ShenandoahHeapRegion* region = _regions.next();
462 ShenandoahHeap* heap = ShenandoahHeap::heap();
463 ShenandoahMarkingContext* const ctx = heap->next_marking_context();
464 while (region != NULL) {
465 if (heap->is_bitmap_slice_committed(region)) {
466 HeapWord* bottom = region->bottom();
467 HeapWord* top = ctx->top_at_mark_start(region->region_number());
468 if (top > bottom) {
469 ctx->clear_bitmap(bottom, top);
470 }
471 assert(ctx->is_bitmap_clear_range(bottom, region->end()), "must be clear");
472 }
473 region = _regions.next();
474 }
475 }
476 };
477
478 void ShenandoahHeap::reset_next_mark_bitmap() {
479 assert_gc_workers(_workers->active_workers());
480
481 ShenandoahResetNextBitmapTask task;
482 _workers->run_task(&task);
483 }
484
485 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
486 private:
487 ShenandoahHeapRegionSetIterator& _regions;
488
489 public:
490 ShenandoahResetNextBitmapTraversalTask(ShenandoahHeapRegionSetIterator& regions) :
491 AbstractGangTask("Parallel Reset Bitmap Task for Traversal"),
492 _regions(regions) {}
493
494 void work(uint worker_id) {
495 ShenandoahHeap* heap = ShenandoahHeap::heap();
496 ShenandoahHeapRegion* region = _regions.claim_next();
497 ShenandoahMarkingContext* const next_ctx = heap->next_marking_context();
498 ShenandoahMarkingContext* const compl_ctx = heap->next_marking_context();
499 while (region != NULL) {
500 if (!region->is_trash()) {
501 assert(!region->is_empty_uncommitted(), "sanity");
502 assert(heap->is_bitmap_slice_committed(region), "sanity");
503 HeapWord* bottom = region->bottom();
504 HeapWord* top = next_ctx->top_at_mark_start(region->region_number());
505 assert(top <= region->top(),
506 "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
507 p2i(top), p2i(region->top()));
508 if (top > bottom) {
509 compl_ctx->mark_bit_map()->copy_from(heap->next_marking_context()->mark_bit_map(), MemRegion(bottom, top));
510 compl_ctx->set_top_at_mark_start(region->region_number(), top);
511 next_ctx->clear_bitmap(bottom, top);
512 next_ctx->set_top_at_mark_start(region->region_number(), bottom);
513 }
514 assert(next_ctx->is_bitmap_clear_range(region->bottom(), region->end()),
515 "need clear next bitmap");
516 }
517 region = _regions.claim_next();
518 }
519 }
520 };
521
522 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
523 assert_gc_workers(_workers->active_workers());
524
525 ShenandoahHeapRegionSet* regions = traversal_gc()->traversal_set();
526 ShenandoahHeapRegionSetIterator iter(regions);
527 ShenandoahResetNextBitmapTraversalTask task(iter);
528 _workers->run_task(&task);
529 }
530
531 void ShenandoahHeap::print_on(outputStream* st) const {
532 st->print_cr("Shenandoah Heap");
533 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
534 capacity() / K, committed() / K, used() / K);
535 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
536 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
537
538 st->print("Status: ");
539 if (has_forwarded_objects()) st->print("has forwarded objects, ");
540 if (is_concurrent_mark_in_progress()) st->print("marking, ");
541 if (is_evacuation_in_progress()) st->print("evacuating, ");
542 if (is_update_refs_in_progress()) st->print("updating refs, ");
543 if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
544 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
545 if (is_full_gc_in_progress()) st->print("full gc, ");
546 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
547
548 if (cancelled_gc()) {
549 st->print("cancelled");
550 } else {
1602 if (skip_humongous_continuation && current->is_humongous_continuation()) {
1603 continue;
1604 }
1605 if (skip_cset_regions && in_collection_set(current)) {
1606 continue;
1607 }
1608 if (blk->heap_region_do(current)) {
1609 return;
1610 }
1611 }
1612 }
1613
1614 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1615 private:
1616 ShenandoahHeap* sh;
1617 public:
1618 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1619
1620 bool heap_region_do(ShenandoahHeapRegion* r) {
1621 r->clear_live_data();
1622 sh->next_marking_context()->set_top_at_mark_start(r->region_number(), r->top());
1623 return false;
1624 }
1625 };
1626
1627 void ShenandoahHeap::op_init_mark() {
1628 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1629
1630 assert(next_marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1631
1632 if (ShenandoahVerify) {
1633 verifier()->verify_before_concmark();
1634 }
1635
1636 {
1637 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1638 accumulate_statistics_all_tlabs();
1639 }
1640
1641 set_concurrent_mark_in_progress(true);
1642 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1643 {
1644 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1645 make_parsable(true);
1646 }
1647
1648 {
1649 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1650 ShenandoahClearLivenessClosure clc(this);
1676 void ShenandoahHeap::op_final_mark() {
1677 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1678
1679 // It is critical that we
1680 // evacuate roots right after finishing marking, so that we don't
1681 // get unmarked objects in the roots.
1682
1683 if (!cancelled_gc()) {
1684 concurrentMark()->finish_mark_from_roots();
1685 stop_concurrent_marking();
1686
1687 {
1688 ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1689
1690 // All allocations past TAMS are implicitly live, adjust the region data.
1691 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1692 for (size_t i = 0; i < num_regions(); i++) {
1693 ShenandoahHeapRegion* r = get_region(i);
1694 if (!r->is_active()) continue;
1695
1696 HeapWord* tams = complete_marking_context()->top_at_mark_start(r->region_number());
1697 HeapWord* top = r->top();
1698 if (top > tams) {
1699 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1700 }
1701 }
1702 }
1703
1704 {
1705 ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1706 prepare_for_concurrent_evacuation();
1707 }
1708
1709 // If collection set has candidates, start evacuation.
1710 // Otherwise, bypass the rest of the cycle.
1711 if (!collection_set()->is_empty()) {
1712 set_evacuation_in_progress(true);
1713 // From here on, we need to update references.
1714 set_has_forwarded_objects(true);
1715
1716 ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1977 if (!metrics.is_good_progress("Degenerated GC")) {
1978 _progress_last_gc.unset();
1979 cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1980 op_degenerated_futile();
1981 } else {
1982 _progress_last_gc.set();
1983 }
1984 }
1985
1986 void ShenandoahHeap::op_degenerated_fail() {
1987 log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1988 shenandoahPolicy()->record_degenerated_upgrade_to_full();
1989 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1990 }
1991
1992 void ShenandoahHeap::op_degenerated_futile() {
1993 shenandoahPolicy()->record_degenerated_upgrade_to_full();
1994 op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1995 }
1996
1997 void ShenandoahHeap::swap_mark_contexts() {
1998 ShenandoahMarkingContext* tmp = _complete_marking_context;
1999 _complete_marking_context = _next_marking_context;
2000 _next_marking_context = tmp;
2001 }
2002
2003
2004 void ShenandoahHeap::stop_concurrent_marking() {
2005 assert(is_concurrent_mark_in_progress(), "How else could we get here?");
2006 if (!cancelled_gc()) {
2007 // If we needed to update refs, and concurrent marking has been cancelled,
2008 // we need to finish updating references.
2009 set_has_forwarded_objects(false);
2010 swap_mark_contexts();
2011 }
2012 set_concurrent_mark_in_progress(false);
2013
2014 LogTarget(Trace, gc, region) lt;
2015 if (lt.is_enabled()) {
2016 ResourceMark rm;
2017 LogStream ls(lt);
2018 ls.print_cr("Regions at stopping the concurrent mark:");
2019 print_heap_regions_on(&ls);
2020 }
2021 }
2022
2023 void ShenandoahHeap::force_satb_flush_all_threads() {
2024 if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
2025 // No need to flush SATBs
2026 return;
2027 }
2028
2029 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2030 ShenandoahThreadLocalData::set_force_satb_flush(t, true);
2068 return result;
2069 }
2070
2071 uint ShenandoahHeap::oop_extra_words() {
2072 return BrooksPointer::word_size();
2073 }
2074
2075 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2076 _heap(ShenandoahHeap::heap_no_check()) {
2077 }
2078
2079 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
2080 _heap(ShenandoahHeap::heap_no_check()) {
2081 }
2082
2083 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2084 if (CompressedOops::is_null(obj)) {
2085 return false;
2086 }
2087 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
2088 shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress());
2089 return _heap->next_marking_context()->is_marked(obj);
2090 }
2091
2092 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
2093 if (CompressedOops::is_null(obj)) {
2094 return false;
2095 }
2096 shenandoah_assert_not_forwarded(NULL, obj);
2097 return _heap->next_marking_context()->is_marked(obj);
2098 }
2099
2100 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
2101 return has_forwarded_objects() ?
2102 (BoolObjectClosure*) &_forwarded_is_alive :
2103 (BoolObjectClosure*) &_is_alive;
2104 }
2105
2106 void ShenandoahHeap::ref_processing_init() {
2107 MemRegion mr = reserved_region();
2108
2109 _forwarded_is_alive.init(this);
2110 _is_alive.init(this);
2111 assert(_max_workers > 0, "Sanity");
2112
2113 _ref_processor =
2114 new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery
2115 ParallelRefProcEnabled, // MT processing
2116 _max_workers, // Degree of MT processing
2117 true, // MT discovery
2284 return _unload_classes.is_set();
2285 }
2286
2287 //fixme this should be in heapregionset
2288 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2289 size_t region_idx = r->region_number() + 1;
2290 ShenandoahHeapRegion* next = get_region(region_idx);
2291 guarantee(next->region_number() == region_idx, "region number must match");
2292 while (next->is_humongous()) {
2293 region_idx = next->region_number() + 1;
2294 next = get_region(region_idx);
2295 guarantee(next->region_number() == region_idx, "region number must match");
2296 }
2297 return next;
2298 }
2299
2300 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2301 return _monitoring_support;
2302 }
2303
2304 address ShenandoahHeap::in_cset_fast_test_addr() {
2305 ShenandoahHeap* heap = ShenandoahHeap::heap();
2306 assert(heap->collection_set() != NULL, "Sanity");
2307 return (address) heap->collection_set()->biased_map_address();
2308 }
2309
2310 address ShenandoahHeap::cancelled_gc_addr() {
2311 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2312 }
2313
2314 address ShenandoahHeap::gc_state_addr() {
2315 return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2316 }
2317
2318 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2319 return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2320 }
2321
2322 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2323 OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2324 }
2325
2326 ShenandoahPacer* ShenandoahHeap::pacer() const {
2327 assert (_pacer != NULL, "sanity");
2328 return _pacer;
2329 }
2330
2331 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2332 _degenerated_gc_in_progress.set_cond(in_progress);
2333 }
2334
2335 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2336 _full_gc_in_progress.set_cond(in_progress);
2337 }
2338
2339 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2340 assert (is_full_gc_in_progress(), "should be");
2341 _full_gc_move_in_progress.set_cond(in_progress);
2342 }
2343
2344 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2345 set_gc_state_mask(UPDATEREFS, in_progress);
2346 }
2347
2348 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2349 ShenandoahCodeRoots::add_nmethod(nm);
2350 }
2411 template<class T>
2412 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2413 private:
2414 T cl;
2415 ShenandoahHeap* _heap;
2416 ShenandoahRegionIterator* _regions;
2417 bool _concurrent;
2418 public:
2419 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2420 AbstractGangTask("Concurrent Update References Task"),
2421 cl(T()),
2422 _heap(ShenandoahHeap::heap()),
2423 _regions(regions),
2424 _concurrent(concurrent) {
2425 }
2426
2427 void work(uint worker_id) {
2428 ShenandoahWorkerSession worker_session(worker_id);
2429 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2430 ShenandoahHeapRegion* r = _regions->next();
2431 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2432 while (r != NULL) {
2433 if (_heap->in_collection_set(r)) {
2434 HeapWord* bottom = r->bottom();
2435 HeapWord* top = ctx->top_at_mark_start(r->region_number());
2436 if (top > bottom) {
2437 ctx->clear_bitmap(bottom, top);
2438 }
2439 } else {
2440 if (r->is_active()) {
2441 _heap->marked_object_oop_safe_iterate(r, &cl);
2442 }
2443 }
2444 if (ShenandoahPacing) {
2445 HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2446 assert (top_at_start_ur >= r->bottom(), "sanity");
2447 _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2448 }
2449 if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2450 return;
2451 }
2452 r = _regions->next();
2453 }
2454 }
2455 };
2456
2457 void ShenandoahHeap::update_heap_references(bool concurrent) {
|