< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 48920 : [backport] Use PLAB for evacuations instead of TLAB
rev 48921 : [backport] Fix PLAB alignment reserve


  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"

  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahControlThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  46 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahPacer.hpp"
  49 #include "gc/shenandoah/shenandoahPacer.inline.hpp"


 356   _complete_top_at_mark_starts_base(NULL),
 357   _mark_bit_map0(),
 358   _mark_bit_map1(),
 359   _aux_bit_map(),
 360   _connection_matrix(NULL),
 361   _verifier(NULL),
 362   _pacer(NULL),
 363   _used_at_last_gc(0),
 364   _alloc_seq_at_last_gc_start(0),
 365   _alloc_seq_at_last_gc_end(0),
 366   _safepoint_workers(NULL),
 367   _gc_cycle_mode(),
 368 #ifdef ASSERT
 369   _heap_expansion_count(0),
 370 #endif
 371   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 372   _phase_timings(NULL),
 373   _alloc_tracker(NULL),
 374   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 375   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),


 376   _memory_pool(NULL)
 377 {
 378   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 379   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 380   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 381 
 382   _scm = new ShenandoahConcurrentMark();
 383   _full_gc = new ShenandoahMarkCompact();
 384   _used = 0;
 385 
 386   _max_workers = MAX2(_max_workers, 1U);
 387   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 388                             /* are_GC_task_threads */true,
 389                             /* are_ConcurrentGC_threads */false);
 390   if (_workers == NULL) {
 391     vm_exit_during_initialization("Failed necessary allocation.");
 392   } else {
 393     _workers->initialize_workers();
 394   }
 395 


 531     st->print_cr("Matrix:");
 532 
 533     ShenandoahConnectionMatrix* matrix = connection_matrix();
 534     if (matrix != NULL) {
 535       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 536       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 537       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 538     } else {
 539       st->print_cr(" No matrix.");
 540     }
 541   }
 542 
 543   if (Verbose) {
 544     print_heap_regions_on(st);
 545   }
 546 }
 547 
 548 class ShenandoahInitGCLABClosure : public ThreadClosure {
 549 public:
 550   void do_thread(Thread* thread) {
 551     thread->gclab().initialize(true);
 552   }
 553 };
 554 
 555 void ShenandoahHeap::post_initialize() {
 556   CollectedHeap::post_initialize();
 557   if (UseTLAB) {
 558     MutexLocker ml(Threads_lock);
 559 
 560     ShenandoahInitGCLABClosure init_gclabs;
 561     Threads::java_threads_do(&init_gclabs);
 562     gc_threads_do(&init_gclabs);
 563 
 564     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 565     // Now, we will let WorkGang to initialize gclab when new worker is created.
 566     _workers->set_initialize_gclab();
 567   }
 568 
 569   _scm->initialize(_max_workers);
 570   _full_gc->initialize(_gc_timer);
 571 
 572   ref_processing_init();
 573 
 574   _shenandoah_policy->post_heap_initialize();
 575 }
 576 
 577 size_t ShenandoahHeap::used() const {
 578   return OrderAccess::load_acquire(&_used);
 579 }
 580 
 581 size_t ShenandoahHeap::committed() const {
 582   OrderAccess::acquire();
 583   return _committed;
 584 }
 585 
 586 void ShenandoahHeap::increase_committed(size_t bytes) {
 587   assert_heaplock_or_safepoint();


 660 
 661   size_t count = 0;
 662   for (size_t i = 0; i < num_regions(); i++) {
 663     ShenandoahHeapRegion* r = get_region(i);
 664     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 665       r->make_uncommitted();
 666       count++;
 667     }
 668   }
 669 
 670   if (count > 0) {
 671     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 672                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 673     _control_thread->notify_heap_changed();
 674   }
 675 }
 676 
 677 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 678   // Retain tlab and allocate object in shared space if
 679   // the amount free in the tlab is too large to discard.
 680   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 681     thread->gclab().record_slow_allocation(size);
 682     return NULL;
 683   }
 684 
 685   // Discard gclab and allocate a new one.
 686   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 687   size_t new_gclab_size = thread->gclab().compute_size(size);
 688 
 689   thread->gclab().clear_before_allocation();
 690 
 691   if (new_gclab_size == 0) {
 692     return NULL;

 693   }
 694 
 695   // Allocate a new GCLAB...
 696   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 697   if (obj == NULL) {
 698     return NULL;
 699   }
 700 
 701   if (ZeroTLAB) {
 702     // ..and clear it.
 703     Copy::zero_to_words(obj, new_gclab_size);
 704   } else {
 705     // ...and zap just allocated object.
 706 #ifdef ASSERT
 707     // Skip mangling the space corresponding to the object header to
 708     // ensure that the returned space is not considered parsable by
 709     // any concurrent GC thread.
 710     size_t hdr_size = oopDesc::header_size();
 711     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 712 #endif // ASSERT
 713   }
 714   thread->gclab().fill(obj, obj + size, new_gclab_size);
 715   return obj;
 716 }
 717 
 718 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 719 #ifdef ASSERT
 720   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 721 #endif
 722   return allocate_new_lab(word_size, _alloc_tlab);
 723 }
 724 
 725 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 726 #ifdef ASSERT
 727   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 728 #endif
 729   return allocate_new_lab(word_size, _alloc_gclab);
 730 }
 731 
 732 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 733   HeapWord* result = allocate_memory(word_size, type);
 734 
 735   if (result != NULL) {


1064       _free_set->rebuild();
1065     }
1066 
1067     Universe::update_heap_info_at_gc();
1068 
1069     if (ShenandoahVerify) {
1070       verifier()->verify_before_evacuation();
1071     }
1072   }
1073 }
1074 
1075 
1076 class ShenandoahRetireTLABClosure : public ThreadClosure {
1077 private:
1078   bool _retire;
1079 
1080 public:
1081   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1082 
1083   void do_thread(Thread* thread) {
1084     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1085     thread->gclab().make_parsable(_retire);


1086   }
1087 };
1088 
1089 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1090   if (UseTLAB) {
1091     CollectedHeap::ensure_parsability(retire_tlabs);

1092     ShenandoahRetireTLABClosure cl(retire_tlabs);
1093     Threads::java_threads_do(&cl);
1094     gc_threads_do(&cl);
1095   }
1096 }
1097 
1098 
1099 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1100   ShenandoahRootEvacuator* _rp;
1101 public:
1102 
1103   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1104     AbstractGangTask("Shenandoah evacuate and update roots"),
1105     _rp(rp)
1106   {
1107     // Nothing else to do.
1108   }
1109 
1110   void work(uint worker_id) {
1111     ShenandoahEvacOOMScope oom_evac_scope;
1112     ShenandoahEvacuateUpdateRootsClosure cl;
1113 
1114     if (ShenandoahConcurrentEvacCodeRoots) {
1115       _rp->process_evacuate_roots(&cl, NULL, worker_id);


1187 
1188   CodeBlobToOopClosure blobsCl(cl, false);
1189   CLDToOopClosure cldCl(cl);
1190 
1191   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1192   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1193 }
1194 
1195 bool ShenandoahHeap::supports_tlab_allocation() const {
1196   return true;
1197 }
1198 
1199 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1200   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1201 }
1202 
1203 size_t ShenandoahHeap::max_tlab_size() const {
1204   return ShenandoahHeapRegion::max_tlab_size_bytes();
1205 }
1206 
1207 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1208 public:
1209   void do_thread(Thread* thread) {
1210     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1211     thread->gclab().resize();
1212   }
1213 };
1214 
1215 void ShenandoahHeap::resize_all_tlabs() {
1216   CollectedHeap::resize_all_tlabs();
1217 
1218   ShenandoahResizeGCLABClosure cl;
1219   Threads::java_threads_do(&cl);
1220   gc_threads_do(&cl);
1221 }
1222 
1223 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1224 public:
1225   void do_thread(Thread* thread) {
1226     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1227     thread->gclab().accumulate_statistics();
1228     thread->gclab().initialize_statistics();






1229   }
1230 };
1231 
1232 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1233   ShenandoahAccumulateStatisticsGCLABClosure cl;
1234   Threads::java_threads_do(&cl);
1235   gc_threads_do(&cl);


1236 }
1237 
1238 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1239   return true;
1240 }
1241 
1242 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1243   // Overridden to do nothing.
1244   return new_obj;
1245 }
1246 
1247 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1248   return true;
1249 }
1250 
1251 bool ShenandoahHeap::card_mark_must_follow_store() const {
1252   return false;
1253 }
1254 
1255 void ShenandoahHeap::collect(GCCause::Cause cause) {


1475     return false;
1476   }
1477 };
1478 
1479 void ShenandoahHeap::op_init_mark() {
1480   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1481 
1482   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1483 
1484   if (ShenandoahVerify) {
1485     verifier()->verify_before_concmark();
1486   }
1487 
1488   {
1489     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1490     accumulate_statistics_all_tlabs();
1491   }
1492 
1493   set_concurrent_mark_in_progress(true);
1494   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1495   if (UseTLAB) {
1496     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1497     make_tlabs_parsable(true);
1498   }
1499 
1500   {
1501     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1502     ShenandoahClearLivenessClosure clc(this);
1503     heap_region_iterate(&clc);
1504   }
1505 
1506   // Make above changes visible to worker threads
1507   OrderAccess::fence();
1508 
1509   concurrentMark()->init_mark_roots();
1510 
1511   if (UseTLAB) {
1512     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1513     resize_all_tlabs();
1514   }
1515 


1555     if (ShenandoahPacing) {
1556       pacer()->setup_for_evac();
1557     }
1558   } else {
1559     concurrentMark()->cancel();
1560     stop_concurrent_marking();
1561 
1562     if (process_references()) {
1563       // Abandon reference processing right away: pre-cleaning must have failed.
1564       ReferenceProcessor *rp = ref_processor();
1565       rp->disable_discovery();
1566       rp->abandon_partial_discovery();
1567       rp->verify_no_references_recorded();
1568     }
1569   }
1570 }
1571 
1572 void ShenandoahHeap::op_final_evac() {
1573   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1574 

1575   set_evacuation_in_progress(false);
1576   if (ShenandoahVerify) {
1577     verifier()->verify_after_evacuation();
1578   }
1579 }
1580 
1581 void ShenandoahHeap::op_evac() {
1582 
1583   LogTarget(Trace, gc, region) lt_region;
1584   LogTarget(Trace, gc, cset) lt_cset;
1585 
1586   if (lt_region.is_enabled()) {
1587     ResourceMark rm;
1588     LogStream ls(lt_region);
1589     ls.print_cr("All available regions:");
1590     print_heap_regions_on(&ls);
1591   }
1592 
1593   if (lt_cset.is_enabled()) {
1594     ResourceMark rm;


1667   concurrentMark()->preclean_weak_refs();
1668 
1669   // Allocations happen during concurrent preclean, record peak after the phase:
1670   shenandoahPolicy()->record_peak_occupancy();
1671 }
1672 
1673 void ShenandoahHeap::op_init_traversal() {
1674   traversal_gc()->init_traversal_collection();
1675 }
1676 
1677 void ShenandoahHeap::op_traversal() {
1678   traversal_gc()->concurrent_traversal_collection();
1679 }
1680 
1681 void ShenandoahHeap::op_final_traversal() {
1682   traversal_gc()->final_traversal_collection();
1683 }
1684 
1685 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1686   full_gc()->do_it(cause);




1687 }
1688 
1689 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1690   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1691   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1692   // some phase, we have to upgrade the Degenerate GC to Full GC.
1693 
1694   clear_cancelled_concgc();
1695 
1696   size_t used_before = used();
1697 
1698   switch (point) {
1699     case _degenerated_evac:
1700       // Not possible to degenerate from here, upgrade to Full GC right away.
1701       cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1702       op_degenerated_fail();
1703       return;
1704 
1705     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1706     // but enters it at different points, depending on which concurrent phase had


2274   }
2275 };
2276 
2277 void ShenandoahHeap::update_heap_references(bool concurrent) {
2278   if (UseShenandoahMatrix) {
2279     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(&_update_refs_iterator, concurrent);
2280     workers()->run_task(&task);
2281   } else {
2282     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2283     workers()->run_task(&task);
2284   }
2285 }
2286 
2287 void ShenandoahHeap::op_init_updaterefs() {
2288   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2289 
2290   if (ShenandoahVerify) {
2291     verifier()->verify_before_updaterefs();
2292   }
2293 

2294   set_evacuation_in_progress(false);
2295   set_update_refs_in_progress(true);
2296   make_tlabs_parsable(true);
2297   if (UseShenandoahMatrix) {
2298     connection_matrix()->clear_all();
2299   }
2300   for (uint i = 0; i < num_regions(); i++) {
2301     ShenandoahHeapRegion* r = get_region(i);
2302     r->set_concurrent_iteration_safe_limit(r->top());
2303   }
2304 
2305   // Reset iterator.
2306   _update_refs_iterator = ShenandoahRegionIterator();
2307 
2308   if (ShenandoahPacing) {
2309     pacer()->setup_for_updaterefs();
2310   }
2311 }
2312 
2313 void ShenandoahHeap::op_final_updaterefs() {


2874       break;
2875     }
2876     r = regions.next();
2877   }
2878 }
2879 
2880 bool ShenandoahHeap::is_minor_gc() const {
2881   return _gc_cycle_mode.get() == MINOR;
2882 }
2883 
2884 bool ShenandoahHeap::is_major_gc() const {
2885   return _gc_cycle_mode.get() == MAJOR;
2886 }
2887 
2888 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2889   _gc_cycle_mode.set(gc_cycle_mode);
2890 }
2891 
2892 char ShenandoahHeap::gc_state() {
2893   return _gc_state.raw_value();














2894 }


  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 #include "gc/shared/plab.hpp"
  31 
  32 #include "gc/shenandoah/brooksPointer.hpp"
  33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahControlThread.hpp"
  40 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  45 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  46 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  47 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  48 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  49 #include "gc/shenandoah/shenandoahPacer.hpp"
  50 #include "gc/shenandoah/shenandoahPacer.inline.hpp"


 357   _complete_top_at_mark_starts_base(NULL),
 358   _mark_bit_map0(),
 359   _mark_bit_map1(),
 360   _aux_bit_map(),
 361   _connection_matrix(NULL),
 362   _verifier(NULL),
 363   _pacer(NULL),
 364   _used_at_last_gc(0),
 365   _alloc_seq_at_last_gc_start(0),
 366   _alloc_seq_at_last_gc_end(0),
 367   _safepoint_workers(NULL),
 368   _gc_cycle_mode(),
 369 #ifdef ASSERT
 370   _heap_expansion_count(0),
 371 #endif
 372   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 373   _phase_timings(NULL),
 374   _alloc_tracker(NULL),
 375   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 376   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 377   _mutator_gclab_stats(new PLABStats("Shenandoah mutator GCLAB stats", OldPLABSize, PLABWeight)),
 378   _collector_gclab_stats(new PLABStats("Shenandoah collector GCLAB stats", YoungPLABSize, PLABWeight)),
 379   _memory_pool(NULL)
 380 {
 381   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 382   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 383   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 384 
 385   _scm = new ShenandoahConcurrentMark();
 386   _full_gc = new ShenandoahMarkCompact();
 387   _used = 0;
 388 
 389   _max_workers = MAX2(_max_workers, 1U);
 390   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 391                             /* are_GC_task_threads */true,
 392                             /* are_ConcurrentGC_threads */false);
 393   if (_workers == NULL) {
 394     vm_exit_during_initialization("Failed necessary allocation.");
 395   } else {
 396     _workers->initialize_workers();
 397   }
 398 


 534     st->print_cr("Matrix:");
 535 
 536     ShenandoahConnectionMatrix* matrix = connection_matrix();
 537     if (matrix != NULL) {
 538       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 539       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 540       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 541     } else {
 542       st->print_cr(" No matrix.");
 543     }
 544   }
 545 
 546   if (Verbose) {
 547     print_heap_regions_on(st);
 548   }
 549 }
 550 
 551 class ShenandoahInitGCLABClosure : public ThreadClosure {
 552 public:
 553   void do_thread(Thread* thread) {
 554     ShenandoahHeap::heap()->initialize_gclab(thread);
 555   }
 556 };
 557 
 558 void ShenandoahHeap::post_initialize() {
 559   CollectedHeap::post_initialize();

 560   MutexLocker ml(Threads_lock);
 561 
 562   ShenandoahInitGCLABClosure init_gclabs;
 563   Threads::java_threads_do(&init_gclabs);
 564   gc_threads_do(&init_gclabs);
 565 
 566   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 567   // Now, we will let WorkGang to initialize gclab when new worker is created.
 568   _workers->set_initialize_gclab();

 569 
 570   _scm->initialize(_max_workers);
 571   _full_gc->initialize(_gc_timer);
 572 
 573   ref_processing_init();
 574 
 575   _shenandoah_policy->post_heap_initialize();
 576 }
 577 
 578 size_t ShenandoahHeap::used() const {
 579   return OrderAccess::load_acquire(&_used);
 580 }
 581 
 582 size_t ShenandoahHeap::committed() const {
 583   OrderAccess::acquire();
 584   return _committed;
 585 }
 586 
 587 void ShenandoahHeap::increase_committed(size_t bytes) {
 588   assert_heaplock_or_safepoint();


 661 
 662   size_t count = 0;
 663   for (size_t i = 0; i < num_regions(); i++) {
 664     ShenandoahHeapRegion* r = get_region(i);
 665     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 666       r->make_uncommitted();
 667       count++;
 668     }
 669   }
 670 
 671   if (count > 0) {
 672     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 673                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 674     _control_thread->notify_heap_changed();
 675   }
 676 }
 677 
 678 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 679   // Retain tlab and allocate object in shared space if
 680   // the amount free in the tlab is too large to discard.
 681   PLAB* gclab = thread->gclab();



 682 
 683   // Discard gclab and allocate a new one.
 684   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 685   gclab->retire();
 686   // Figure out size of new GCLAB
 687   size_t new_gclab_size;
 688   if (thread->is_Java_thread()) {
 689     new_gclab_size = _mutator_gclab_stats->desired_plab_sz(Threads::number_of_threads());
 690   } else {
 691     new_gclab_size = _collector_gclab_stats->desired_plab_sz(workers()->active_workers());
 692   }
 693 
 694   // Allocate a new GCLAB...
 695   HeapWord* gclab_buf = allocate_new_gclab(new_gclab_size);
 696   if (gclab_buf == NULL) {
 697     return NULL;
 698   }
 699 
 700   if (ZeroTLAB) {
 701     // ..and clear it.
 702     Copy::zero_to_words(gclab_buf, new_gclab_size);
 703   } else {
 704     // ...and zap just allocated object.
 705 #ifdef ASSERT
 706     // Skip mangling the space corresponding to the object header to
 707     // ensure that the returned space is not considered parsable by
 708     // any concurrent GC thread.
 709     size_t hdr_size = oopDesc::header_size();
 710     Copy::fill_to_words(gclab_buf + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 711 #endif // ASSERT
 712   }
 713   gclab->set_buf(gclab_buf, new_gclab_size);
 714   return gclab->allocate(size);
 715 }
 716 
 717 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 718 #ifdef ASSERT
 719   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 720 #endif
 721   return allocate_new_lab(word_size, _alloc_tlab);
 722 }
 723 
 724 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 725 #ifdef ASSERT
 726   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 727 #endif
 728   return allocate_new_lab(word_size, _alloc_gclab);
 729 }
 730 
 731 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 732   HeapWord* result = allocate_memory(word_size, type);
 733 
 734   if (result != NULL) {


1063       _free_set->rebuild();
1064     }
1065 
1066     Universe::update_heap_info_at_gc();
1067 
1068     if (ShenandoahVerify) {
1069       verifier()->verify_before_evacuation();
1070     }
1071   }
1072 }
1073 
1074 
1075 class ShenandoahRetireTLABClosure : public ThreadClosure {
1076 private:
1077   bool _retire;
1078 
1079 public:
1080   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1081 
1082   void do_thread(Thread* thread) {
1083     PLAB* gclab = thread->gclab();
1084     if (gclab != NULL) {
1085       gclab->retire();
1086     }
1087   }
1088 };
1089 
1090 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1091   if (UseTLAB) {
1092     CollectedHeap::ensure_parsability(retire_tlabs);
1093   }
1094   ShenandoahRetireTLABClosure cl(retire_tlabs);
1095   Threads::java_threads_do(&cl);
1096   gc_threads_do(&cl);

1097 }
1098 
1099 
1100 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1101   ShenandoahRootEvacuator* _rp;
1102 public:
1103 
1104   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1105     AbstractGangTask("Shenandoah evacuate and update roots"),
1106     _rp(rp)
1107   {
1108     // Nothing else to do.
1109   }
1110 
1111   void work(uint worker_id) {
1112     ShenandoahEvacOOMScope oom_evac_scope;
1113     ShenandoahEvacuateUpdateRootsClosure cl;
1114 
1115     if (ShenandoahConcurrentEvacCodeRoots) {
1116       _rp->process_evacuate_roots(&cl, NULL, worker_id);


1188 
1189   CodeBlobToOopClosure blobsCl(cl, false);
1190   CLDToOopClosure cldCl(cl);
1191 
1192   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1193   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1194 }
1195 
1196 bool ShenandoahHeap::supports_tlab_allocation() const {
1197   return true;
1198 }
1199 
1200 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1201   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1202 }
1203 
1204 size_t ShenandoahHeap::max_tlab_size() const {
1205   return ShenandoahHeapRegion::max_tlab_size_bytes();
1206 }
1207 
















1208 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1209 public:
1210   void do_thread(Thread* thread) {
1211     ShenandoahHeap* heap = ShenandoahHeap::heap();
1212     PLAB* gclab = thread->gclab();
1213     if (gclab != NULL) {
1214       if (thread->is_Java_thread()) {
1215         gclab->flush_and_retire_stats(heap->mutator_gclab_stats());
1216       } else {
1217         gclab->flush_and_retire_stats(heap->collector_gclab_stats());
1218       }
1219     }
1220   }
1221 };
1222 
1223 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1224   ShenandoahAccumulateStatisticsGCLABClosure cl;
1225   Threads::java_threads_do(&cl);
1226   gc_threads_do(&cl);
1227   _mutator_gclab_stats->adjust_desired_plab_sz();
1228   _collector_gclab_stats->adjust_desired_plab_sz();
1229 }
1230 
1231 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1232   return true;
1233 }
1234 
1235 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1236   // Overridden to do nothing.
1237   return new_obj;
1238 }
1239 
1240 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1241   return true;
1242 }
1243 
1244 bool ShenandoahHeap::card_mark_must_follow_store() const {
1245   return false;
1246 }
1247 
1248 void ShenandoahHeap::collect(GCCause::Cause cause) {


1468     return false;
1469   }
1470 };
1471 
1472 void ShenandoahHeap::op_init_mark() {
1473   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1474 
1475   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1476 
1477   if (ShenandoahVerify) {
1478     verifier()->verify_before_concmark();
1479   }
1480 
1481   {
1482     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1483     accumulate_statistics_all_tlabs();
1484   }
1485 
1486   set_concurrent_mark_in_progress(true);
1487   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1488   {
1489     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1490     make_tlabs_parsable(true);
1491   }
1492 
1493   {
1494     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1495     ShenandoahClearLivenessClosure clc(this);
1496     heap_region_iterate(&clc);
1497   }
1498 
1499   // Make above changes visible to worker threads
1500   OrderAccess::fence();
1501 
1502   concurrentMark()->init_mark_roots();
1503 
1504   if (UseTLAB) {
1505     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1506     resize_all_tlabs();
1507   }
1508 


1548     if (ShenandoahPacing) {
1549       pacer()->setup_for_evac();
1550     }
1551   } else {
1552     concurrentMark()->cancel();
1553     stop_concurrent_marking();
1554 
1555     if (process_references()) {
1556       // Abandon reference processing right away: pre-cleaning must have failed.
1557       ReferenceProcessor *rp = ref_processor();
1558       rp->disable_discovery();
1559       rp->abandon_partial_discovery();
1560       rp->verify_no_references_recorded();
1561     }
1562   }
1563 }
1564 
1565 void ShenandoahHeap::op_final_evac() {
1566   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1567 
1568   accumulate_statistics_all_gclabs();
1569   set_evacuation_in_progress(false);
1570   if (ShenandoahVerify) {
1571     verifier()->verify_after_evacuation();
1572   }
1573 }
1574 
1575 void ShenandoahHeap::op_evac() {
1576 
1577   LogTarget(Trace, gc, region) lt_region;
1578   LogTarget(Trace, gc, cset) lt_cset;
1579 
1580   if (lt_region.is_enabled()) {
1581     ResourceMark rm;
1582     LogStream ls(lt_region);
1583     ls.print_cr("All available regions:");
1584     print_heap_regions_on(&ls);
1585   }
1586 
1587   if (lt_cset.is_enabled()) {
1588     ResourceMark rm;


1661   concurrentMark()->preclean_weak_refs();
1662 
1663   // Allocations happen during concurrent preclean, record peak after the phase:
1664   shenandoahPolicy()->record_peak_occupancy();
1665 }
1666 
1667 void ShenandoahHeap::op_init_traversal() {
1668   traversal_gc()->init_traversal_collection();
1669 }
1670 
1671 void ShenandoahHeap::op_traversal() {
1672   traversal_gc()->concurrent_traversal_collection();
1673 }
1674 
1675 void ShenandoahHeap::op_final_traversal() {
1676   traversal_gc()->final_traversal_collection();
1677 }
1678 
1679 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1680   full_gc()->do_it(cause);
1681   if (UseTLAB) {
1682     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1683     resize_all_tlabs();
1684   }
1685 }
1686 
1687 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1688   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1689   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1690   // some phase, we have to upgrade the Degenerate GC to Full GC.
1691 
1692   clear_cancelled_concgc();
1693 
1694   size_t used_before = used();
1695 
1696   switch (point) {
1697     case _degenerated_evac:
1698       // Not possible to degenerate from here, upgrade to Full GC right away.
1699       cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1700       op_degenerated_fail();
1701       return;
1702 
1703     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1704     // but enters it at different points, depending on which concurrent phase had


2272   }
2273 };
2274 
2275 void ShenandoahHeap::update_heap_references(bool concurrent) {
2276   if (UseShenandoahMatrix) {
2277     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(&_update_refs_iterator, concurrent);
2278     workers()->run_task(&task);
2279   } else {
2280     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2281     workers()->run_task(&task);
2282   }
2283 }
2284 
2285 void ShenandoahHeap::op_init_updaterefs() {
2286   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2287 
2288   if (ShenandoahVerify) {
2289     verifier()->verify_before_updaterefs();
2290   }
2291 
2292   accumulate_statistics_all_gclabs();
2293   set_evacuation_in_progress(false);
2294   set_update_refs_in_progress(true);
2295   make_tlabs_parsable(true);
2296   if (UseShenandoahMatrix) {
2297     connection_matrix()->clear_all();
2298   }
2299   for (uint i = 0; i < num_regions(); i++) {
2300     ShenandoahHeapRegion* r = get_region(i);
2301     r->set_concurrent_iteration_safe_limit(r->top());
2302   }
2303 
2304   // Reset iterator.
2305   _update_refs_iterator = ShenandoahRegionIterator();
2306 
2307   if (ShenandoahPacing) {
2308     pacer()->setup_for_updaterefs();
2309   }
2310 }
2311 
2312 void ShenandoahHeap::op_final_updaterefs() {


2873       break;
2874     }
2875     r = regions.next();
2876   }
2877 }
2878 
2879 bool ShenandoahHeap::is_minor_gc() const {
2880   return _gc_cycle_mode.get() == MINOR;
2881 }
2882 
2883 bool ShenandoahHeap::is_major_gc() const {
2884   return _gc_cycle_mode.get() == MAJOR;
2885 }
2886 
2887 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2888   _gc_cycle_mode.set(gc_cycle_mode);
2889 }
2890 
2891 char ShenandoahHeap::gc_state() {
2892   return _gc_state.raw_value();
2893 }
2894 
2895 void ShenandoahHeap::initialize_gclab(Thread* thread) {
2896   if (thread->is_Java_thread()) {
2897     thread->set_gclab(new PLAB(OldPLABSize));
2898   } else {
2899     thread->set_gclab(new PLAB(YoungPLABSize));
2900   }
2901 }
2902 
2903 void ShenandoahHeap::finalize_mutator_gclab(Thread* thread) {
2904   thread->gclab()->flush_and_retire_stats(ShenandoahHeap::heap()->mutator_gclab_stats());
2905   delete thread->gclab();
2906   thread->set_gclab(NULL);
2907 }
< prev index next >