< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp

Print this page
rev 50076 : Fold Partial GC into Traversal GC


  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shared/taskqueue.inline.hpp"
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"

  32 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  36 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"

  39 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  41 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  42 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  43 #include "gc/shenandoah/shenandoahStrDedupQueue.inline.hpp"
  44 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  45 #include "gc/shenandoah/shenandoahUtils.hpp"
  46 #include "gc/shenandoah/shenandoahVerifier.hpp"
  47 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  49 
  50 #include "memory/iterator.hpp"
  51 
  52 /**
  53  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  54  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  55  * is incremental-update-based.
  56  *
  57  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  58  * several reasons:


  79  * Gotchas:
  80  * - While we want new objects to be implicitely marked, we don't want to count
  81  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  82  *   them for cset. This means that we need to protect such regions from
  83  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  84  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  85  *   code.
  86  * - We *need* to traverse through evacuated objects. Those objects are
  87  *   pre-existing, and any references in them point to interesting objects that
  88  *   we need to see. We also want to count them as live, because we just
  89  *   determined that they are alive :-) I achieve this by upping TAMS
  90  *   concurrently for every gclab/gc-shared alloc before publishing the
  91  *   evacuated object. This way, the GC threads will not consider such objects
  92  *   implictely marked, and traverse through them as normal.
  93  */
  94 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  95 private:
  96   ShenandoahObjToScanQueue* _queue;
  97   ShenandoahTraversalGC* _traversal_gc;
  98   ShenandoahHeap* _heap;


  99 public:
 100   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 101     _queue(q), _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 102     _heap(ShenandoahHeap::heap())

 103  { }
 104 
 105   void do_buffer(void** buffer, size_t size) {
 106     for (size_t i = 0; i < size; ++i) {
 107       oop* p = (oop*) &buffer[i];
 108       oop obj = RawAccess<>::oop_load(p);
 109       shenandoah_assert_not_forwarded(p, obj);
 110       if (!_heap->is_marked_next(obj) && _heap->mark_next(obj)) {
 111         _queue->push(ShenandoahMarkTask(obj));
 112       }
 113     }
 114   }
 115 };
 116 
 117 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 118   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 119 
 120  public:
 121   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 122     _satb_cl(satb_cl) {}
 123 
 124   void do_thread(Thread* thread) {
 125     if (thread->is_Java_thread()) {
 126       JavaThread* jt = (JavaThread*)thread;
 127       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 128     } else if (thread->is_VM_thread()) {
 129       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 130     }


 282       traversal_gc->main_loop(worker_id, _terminator, false);
 283     }
 284 
 285   }
 286 };
 287 
 288 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 289   jushort* ld = get_liveness(worker_id);
 290   for (uint i = 0; i < _heap->num_regions(); i++) {
 291     ShenandoahHeapRegion* r = _heap->get_region(i);
 292     jushort live = ld[i];
 293     if (live > 0) {
 294       r->increase_live_data_gc_words(live);
 295       ld[i] = 0;
 296     }
 297   }
 298 }
 299 
 300 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 301   _heap(heap),
 302   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())) {




 303 
 304   uint num_queues = heap->max_workers();
 305   for (uint i = 0; i < num_queues; ++i) {
 306     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 307     task_queue->initialize();
 308     _task_queues->register_queue(i, task_queue);
 309   }
 310 
 311   uint workers = heap->max_workers();
 312   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 313   for (uint worker = 0; worker < workers; worker++) {
 314      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 315   }
 316 
 317 }
 318 
 319 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 320 }
 321 





























 322 void ShenandoahTraversalGC::prepare() {
 323   _heap->collection_set()->clear();
 324   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 325 
 326   _heap->make_tlabs_parsable(true);
 327 
 328   assert(_heap->is_next_bitmap_clear(), "need clean mark bitmap");
 329 
 330   ShenandoahFreeSet* free_set = _heap->free_set();
 331   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 332 
 333   // Find collection set
 334   _heap->shenandoahPolicy()->choose_collection_set(collection_set, false);

 335 
 336   // Rebuild free set
 337   free_set->rebuild();
 338 
 339   log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions", collection_set->count());
 340 }
 341 
 342 void ShenandoahTraversalGC::init_traversal_collection() {
 343   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 344 
 345   if (ShenandoahVerify) {
 346     _heap->verifier()->verify_before_traversal();
 347   }
 348 
 349   {
 350     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 351     ShenandoahHeapLocker lock(_heap->lock());
 352     prepare();
 353   }
 354 
 355   _heap->set_concurrent_traversal_in_progress(true);
 356 
 357   bool process_refs = _heap->process_references();
 358   if (process_refs) {
 359     ReferenceProcessor* rp = _heap->ref_processor();


 376 
 377       if (UseShenandoahOWST) {
 378         ShenandoahTaskTerminator terminator(nworkers, task_queues());
 379         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 380         _heap->workers()->run_task(&traversal_task);
 381       } else {
 382         ParallelTaskTerminator terminator(nworkers, task_queues());
 383         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 384         _heap->workers()->run_task(&traversal_task);
 385       }
 386     }
 387 
 388 #if defined(COMPILER2) || INCLUDE_JVMCI
 389     DerivedPointerTable::update_pointers();
 390 #endif
 391   }
 392 
 393   if (ShenandoahPacing) {
 394     _heap->pacer()->setup_for_traversal();
 395   }


 396 }
 397 
 398 void ShenandoahTraversalGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator, bool do_satb) {
 399   if (do_satb) {
 400     main_loop_prework<true>(worker_id, terminator);
 401   } else {
 402     main_loop_prework<false>(worker_id, terminator);
 403   }
 404 }
 405 
 406 template <bool DO_SATB>
 407 void ShenandoahTraversalGC::main_loop_prework(uint w, ParallelTaskTerminator* t) {
 408   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 409 
 410   // Initialize live data.
 411   jushort* ld = get_liveness(w);
 412   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 413 
 414   ReferenceProcessor* rp = NULL;
 415   if (_heap->process_references()) {
 416     rp = _heap->ref_processor();
 417   }











































 418   if (!_heap->is_degenerated_gc_in_progress()) {
 419     if (_heap->unload_classes()) {
 420       if (ShenandoahStringDedup::is_enabled()) {
 421         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 422         ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq);
 423         main_loop_work<ShenandoahTraversalMetadataDedupClosure, DO_SATB>(&cl, ld, w, t);
 424       } else {
 425         ShenandoahTraversalMetadataClosure cl(q, rp);
 426         main_loop_work<ShenandoahTraversalMetadataClosure, DO_SATB>(&cl, ld, w, t);
 427       }
 428     } else {
 429       if (ShenandoahStringDedup::is_enabled()) {
 430         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 431         ShenandoahTraversalDedupClosure cl(q, rp, dq);
 432         main_loop_work<ShenandoahTraversalDedupClosure, DO_SATB>(&cl, ld, w, t);
 433       } else {
 434         ShenandoahTraversalClosure cl(q, rp);
 435         main_loop_work<ShenandoahTraversalClosure, DO_SATB>(&cl, ld, w, t);
 436       }
 437     }


 439     if (_heap->unload_classes()) {
 440       if (ShenandoahStringDedup::is_enabled()) {
 441         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 442         ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp, dq);
 443         main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 444       } else {
 445         ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 446         main_loop_work<ShenandoahTraversalMetadataDegenClosure, DO_SATB>(&cl, ld, w, t);
 447       }
 448     } else {
 449       if (ShenandoahStringDedup::is_enabled()) {
 450         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 451         ShenandoahTraversalDedupDegenClosure cl(q, rp, dq);
 452         main_loop_work<ShenandoahTraversalDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 453       } else {
 454         ShenandoahTraversalDegenClosure cl(q, rp);
 455         main_loop_work<ShenandoahTraversalDegenClosure, DO_SATB>(&cl, ld, w, t);
 456       }
 457     }
 458   }

 459   flush_liveness(w);
 460 
 461 }
 462 
 463 template <class T, bool DO_SATB>
 464 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 465   ShenandoahObjToScanQueueSet* queues = task_queues();
 466   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 467   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 468 
 469   uintx stride = ShenandoahMarkLoopStride;
 470 
 471   ShenandoahMarkTask task;
 472 
 473   // Process outstanding queues, if any.
 474   q = queues->claim_next();
 475   while (q != NULL) {
 476     if (_heap->check_cancelled_concgc_and_yield()) {
 477       ShenandoahCancelledTerminatorTerminator tt;
 478       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 479       while (!terminator->offer_termination(&tt));
 480       return;
 481     }
 482 
 483     for (uint i = 0; i < stride; i++) {
 484       if (q->pop_buffer(task) ||
 485           q->pop_local(task) ||
 486           q->pop_overflow(task)) {
 487         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 488       } else {
 489         assert(q->is_empty(), "Must be empty");
 490         q = queues->claim_next();
 491         break;
 492       }
 493     }
 494   }

















 495   // Normal loop.
 496   q = queues->queue(worker_id);
 497   ShenandoahTraversalSATBBufferClosure satb_cl(q);
 498   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 499 
 500   int seed = 17;
 501 
 502   while (true) {
 503     if (check_and_handle_cancelled_gc(terminator)) return;
 504 
 505     for (uint i = 0; i < stride; i++) {
 506       if ((q->pop_buffer(task) ||
 507            q->pop_local(task) ||
 508            q->pop_overflow(task) ||
 509            (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) ||
 510            queues->steal(worker_id, &seed, task))) {
 511         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 512       } else {
 513         ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 514         if (terminator->offer_termination()) return;
 515       }
 516     }
 517   }
 518 }
 519 
 520 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 521   if (_heap->cancelled_concgc()) {
 522     ShenandoahCancelledTerminatorTerminator tt;
 523     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 524     while (! terminator->offer_termination(&tt));
 525     return true;
 526   }
 527   return false;
 528 }
 529 
 530 void ShenandoahTraversalGC::concurrent_traversal_collection() {


 581 
 582   if (!_heap->cancelled_concgc() && _heap->process_references()) {
 583     weak_refs_work();
 584   }
 585 
 586   if (!_heap->cancelled_concgc() && _heap->unload_classes()) {
 587     _heap->unload_classes_and_cleanup_tables(false);
 588     fixup_roots();
 589   }
 590 
 591   if (!_heap->cancelled_concgc()) {
 592     // Still good? We can now trash the cset, and make final verification
 593     {
 594       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 595       ShenandoahHeapLocker lock(_heap->lock());
 596 
 597       // Trash everything
 598       // Clear immediate garbage regions.
 599       size_t num_regions = _heap->num_regions();
 600 

 601       ShenandoahFreeSet* free_regions = _heap->free_set();
 602       free_regions->clear();
 603       for (size_t i = 0; i < num_regions; i++) {
 604         ShenandoahHeapRegion* r = _heap->get_region(i);
 605         bool not_allocated = _heap->next_top_at_mark_start(r->bottom()) == r->top();
 606         if (r->is_humongous_start() && !r->has_live() && not_allocated) {


 607           // Trash humongous.
 608           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 609           assert(!_heap->is_marked_next(oop(humongous_obj)), "must not be marked");
 610           r->make_trash();
 611           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 612             i++;
 613             r = _heap->get_region(i);
 614             assert(r->is_humongous_continuation(), "must be humongous continuation");
 615             r->make_trash();
 616           }
 617         } else if (!r->is_empty() && !r->has_live() && not_allocated) {
 618           // Trash regular.
 619           assert(!r->is_humongous(), "handled above");
 620           assert(!r->is_trash(), "must not already be trashed");
 621           r->make_trash();
 622         }
 623       }
 624       _heap->collection_set()->clear();
 625       _heap->free_set()->rebuild();
 626       reset();
 627     }
 628 
 629     if (ShenandoahVerify) {
 630       _heap->verifier()->verify_after_traversal();
 631     }
 632 
 633     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 634     _heap->set_concurrent_traversal_in_progress(false);
 635     assert(!_heap->cancelled_concgc(), "must not be cancelled when getting out here");
 636   }
 637 }


 709 
 710 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 711 public:
 712   void do_void() {
 713     ShenandoahHeap* sh = ShenandoahHeap::heap();
 714     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 715     assert(sh->process_references(), "why else would we be here?");
 716     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 717     shenandoah_assert_rp_isalive_installed();
 718     traversal_gc->main_loop((uint) 0, &terminator, false);
 719   }
 720 };
 721 
 722 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 723 private:
 724   ShenandoahObjToScanQueue* _queue;
 725   Thread* _thread;
 726   ShenandoahTraversalGC* _traversal_gc;
 727   template <class T>
 728   inline void do_oop_nv(T* p) {
 729     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue);
 730   }
 731 
 732 public:
 733   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 734     _queue(q), _thread(Thread::current()),
 735     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 736 
 737   void do_oop(narrowOop* p) { do_oop_nv(p); }
 738   void do_oop(oop* p)       { do_oop_nv(p); }
 739 };
 740 
 741 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 742 private:
 743   ShenandoahObjToScanQueue* _queue;
 744   Thread* _thread;
 745   ShenandoahTraversalGC* _traversal_gc;
 746   template <class T>
 747   inline void do_oop_nv(T* p) {
 748     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue);
 749   }
 750 
 751 public:
 752   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 753     _queue(q), _thread(Thread::current()),
 754     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 755 
 756   void do_oop(narrowOop* p) { do_oop_nv(p); }
 757   void do_oop(oop* p)       { do_oop_nv(p); }
 758 };
 759 








































 760 void ShenandoahTraversalGC::preclean_weak_refs() {
 761   // Pre-cleaning weak references before diving into STW makes sense at the
 762   // end of concurrent mark. This will filter out the references which referents
 763   // are alive. Note that ReferenceProcessor already filters out these on reference
 764   // discovery, and the bulk of work is done here. This phase processes leftovers
 765   // that missed the initial filtering, i.e. when referent was marked alive after
 766   // reference was discovered by RP.
 767 
 768   assert(_heap->process_references(), "sanity");
 769 
 770   ShenandoahHeap* sh = ShenandoahHeap::heap();
 771   ReferenceProcessor* rp = sh->ref_processor();
 772 
 773   // Shortcut if no references were discovered to avoid winding up threads.
 774   if (!rp->has_discovered_references()) {
 775     return;
 776   }
 777 
 778   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 779 
 780   shenandoah_assert_rp_isalive_not_installed();
 781   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
 782 
 783   // Interrupt on cancelled GC
 784   ShenandoahTraversalCancelledGCYieldClosure yield;
 785 
 786   assert(task_queues()->is_empty(), "Should be empty");
 787   assert(!sh->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 788 
 789   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 790   ShenandoahForwardedIsAliveClosure is_alive;







 791   ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 792   ResourceMark rm;
 793   rp->preclean_discovered_references(&is_alive, &keep_alive,
 794                                      &complete_gc, &yield,
 795                                      NULL);

 796   assert(!sh->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
 797 }
 798 
 799 // Weak Reference Closures
 800 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 801   uint _worker_id;
 802   ParallelTaskTerminator* _terminator;
 803   bool _reset_terminator;
 804 
 805 public:
 806   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 807     _worker_id(worker_id),
 808     _terminator(t),
 809     _reset_terminator(reset_terminator) {
 810   }
 811 
 812   void do_void() {
 813     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 814 
 815     ShenandoahHeap* sh = ShenandoahHeap::heap();


 850 
 851 private:
 852   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 853   ParallelTaskTerminator* _terminator;
 854 public:
 855 
 856   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 857                              ParallelTaskTerminator* t) :
 858     AbstractGangTask("Process reference objects in parallel"),
 859     _proc_task(proc_task),
 860     _terminator(t) {
 861   }
 862 
 863   void work(uint worker_id) {
 864     ShenandoahEvacOOMScope oom_evac_scope;
 865     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 866     ShenandoahHeap* heap = ShenandoahHeap::heap();
 867     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 868 
 869     ShenandoahForwardedIsAliveClosure is_alive;









 870     if (!heap->is_degenerated_gc_in_progress()) {
 871       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
 872       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 873     } else {
 874       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
 875       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 876     }
 877   }

 878 };
 879 
 880 class ShenandoahTraversalRefEnqueueTaskProxy : public AbstractGangTask {
 881 
 882 private:
 883   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 884 
 885 public:
 886 
 887   ShenandoahTraversalRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 888     AbstractGangTask("Enqueue reference objects in parallel"),
 889     _enqueue_task(enqueue_task) {
 890   }
 891 
 892   void work(uint worker_id) {
 893     _enqueue_task.work(worker_id);
 894   }
 895 };
 896 
 897 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {


 958   rp->set_active_mt_degree(nworkers);
 959 
 960   assert(task_queues()->is_empty(), "Should be empty");
 961 
 962   // complete_gc and keep_alive closures instantiated here are only needed for
 963   // single-threaded path in RP. They share the queue 0 for tracking work, which
 964   // simplifies implementation. Since RP may decide to call complete_gc several
 965   // times, we need to be able to reuse the terminator.
 966   uint serial_worker_id = 0;
 967   ParallelTaskTerminator terminator(1, task_queues());
 968   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 969 
 970   ShenandoahTraversalRefProcTaskExecutor executor(workers);
 971 
 972   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_q());
 973 
 974   {
 975     ShenandoahGCPhase phase(phase_process);
 976 
 977     ShenandoahForwardedIsAliveClosure is_alive;


















 978     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
 979     rp->process_discovered_references(&is_alive, &keep_alive,
 980                                       &complete_gc, &executor,
 981                                       &pt);
 982     pt.print_all_references();
 983 
 984     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);









 985 
 986     assert(!_heap->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
 987   }
 988 
 989   if (_heap->cancelled_concgc()) return;
 990 
 991   {
 992     ShenandoahGCPhase phase(phase_enqueue);
 993     rp->enqueue_discovered_references(&executor, &pt);
 994     pt.print_enqueue_phase();
 995   }
 996 }


  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shared/taskqueue.inline.hpp"
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  37 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  44 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  45 #include "gc/shenandoah/shenandoahStrDedupQueue.inline.hpp"
  46 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 #include "gc/shenandoah/shenandoahVerifier.hpp"
  49 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  50 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  51 
  52 #include "memory/iterator.hpp"
  53 
  54 /**
  55  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  56  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  57  * is incremental-update-based.
  58  *
  59  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  60  * several reasons:


  81  * Gotchas:
  82  * - While we want new objects to be implicitely marked, we don't want to count
  83  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  84  *   them for cset. This means that we need to protect such regions from
  85  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  86  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  87  *   code.
  88  * - We *need* to traverse through evacuated objects. Those objects are
  89  *   pre-existing, and any references in them point to interesting objects that
  90  *   we need to see. We also want to count them as live, because we just
  91  *   determined that they are alive :-) I achieve this by upping TAMS
  92  *   concurrently for every gclab/gc-shared alloc before publishing the
  93  *   evacuated object. This way, the GC threads will not consider such objects
  94  *   implictely marked, and traverse through them as normal.
  95  */
  96 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  97 private:
  98   ShenandoahObjToScanQueue* _queue;
  99   ShenandoahTraversalGC* _traversal_gc;
 100   ShenandoahHeap* _heap;
 101   ShenandoahHeapRegionSet* _traversal_set;
 102 
 103 public:
 104   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 105     _queue(q), _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 106     _heap(ShenandoahHeap::heap()),
 107     _traversal_set(ShenandoahHeap::heap()->traversal_gc()->traversal_set())
 108  { }
 109 
 110   void do_buffer(void** buffer, size_t size) {
 111     for (size_t i = 0; i < size; ++i) {
 112       oop* p = (oop*) &buffer[i];
 113       oop obj = RawAccess<>::oop_load(p);
 114       shenandoah_assert_not_forwarded(p, obj);
 115       if (_traversal_set->is_in((HeapWord*) obj) && !_heap->is_marked_next(obj) && _heap->mark_next(obj)) {
 116         _queue->push(ShenandoahMarkTask(obj));
 117       }
 118     }
 119   }
 120 };
 121 
 122 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 123   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 124 
 125  public:
 126   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 127     _satb_cl(satb_cl) {}
 128 
 129   void do_thread(Thread* thread) {
 130     if (thread->is_Java_thread()) {
 131       JavaThread* jt = (JavaThread*)thread;
 132       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 133     } else if (thread->is_VM_thread()) {
 134       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 135     }


 287       traversal_gc->main_loop(worker_id, _terminator, false);
 288     }
 289 
 290   }
 291 };
 292 
 293 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 294   jushort* ld = get_liveness(worker_id);
 295   for (uint i = 0; i < _heap->num_regions(); i++) {
 296     ShenandoahHeapRegion* r = _heap->get_region(i);
 297     jushort live = ld[i];
 298     if (live > 0) {
 299       r->increase_live_data_gc_words(live);
 300       ld[i] = 0;
 301     }
 302   }
 303 }
 304 
 305 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 306   _heap(heap),
 307   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 308   _traversal_set(new ShenandoahHeapRegionSet()),
 309   _root_regions(new ShenandoahHeapRegionSet()),
 310   _root_regions_iterator(_root_regions->iterator()),
 311   _matrix(heap->connection_matrix()) {
 312 
 313   uint num_queues = heap->max_workers();
 314   for (uint i = 0; i < num_queues; ++i) {
 315     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 316     task_queue->initialize();
 317     _task_queues->register_queue(i, task_queue);
 318   }
 319 
 320   uint workers = heap->max_workers();
 321   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 322   for (uint worker = 0; worker < workers; worker++) {
 323      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 324   }
 325 
 326 }
 327 
 328 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 329 }
 330 
 331 void ShenandoahTraversalGC::prepare_regions() {
 332   ShenandoahHeap* heap = ShenandoahHeap::heap();
 333   size_t num_regions = heap->num_regions();
 334   ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 335 
 336   for (size_t i = 0; i < num_regions; i++) {
 337     ShenandoahHeapRegion* region = heap->get_region(i);
 338     if (heap->is_bitmap_slice_committed(region)) {
 339       if (_traversal_set->is_in(i)) {
 340         heap->set_next_top_at_mark_start(region->bottom(), region->top());
 341         region->clear_live_data();
 342         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 343       } else {
 344         // Everything outside the traversal set is always considered live.
 345         heap->set_next_top_at_mark_start(region->bottom(), region->bottom());
 346       }
 347       if (_root_regions->is_in(i)) {
 348         assert(!region->in_collection_set(), "roots must not overlap with cset");
 349         matrix->clear_region_outbound(i);
 350         // Since root region can be allocated at, we should bound the scans
 351         // in it at current top. Otherwise, one thread may evacuate objects
 352         // to that root region, while another would try to scan newly evac'ed
 353         // objects under the race.
 354         region->set_concurrent_iteration_safe_limit(region->top());
 355       }
 356     }
 357   }
 358 }
 359 
 360 void ShenandoahTraversalGC::prepare() {
 361   _heap->collection_set()->clear();
 362   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 363 
 364   _heap->make_tlabs_parsable(true);
 365 
 366   assert(_heap->is_next_bitmap_clear(), "need clean mark bitmap");
 367 
 368   ShenandoahFreeSet* free_set = _heap->free_set();
 369   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 370 
 371   // Find collection set
 372   _heap->shenandoahPolicy()->choose_collection_set(collection_set);
 373   prepare_regions();
 374 
 375   // Rebuild free set
 376   free_set->rebuild();
 377 
 378   log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions and "SIZE_FORMAT" root set regions", collection_set->count(), _root_regions->count());
 379 }
 380 
 381 void ShenandoahTraversalGC::init_traversal_collection() {
 382   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 383 
 384   if (ShenandoahVerify) {
 385     _heap->verifier()->verify_before_traversal();
 386   }
 387 
 388   {
 389     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 390     ShenandoahHeapLocker lock(_heap->lock());
 391     prepare();
 392   }
 393 
 394   _heap->set_concurrent_traversal_in_progress(true);
 395 
 396   bool process_refs = _heap->process_references();
 397   if (process_refs) {
 398     ReferenceProcessor* rp = _heap->ref_processor();


 415 
 416       if (UseShenandoahOWST) {
 417         ShenandoahTaskTerminator terminator(nworkers, task_queues());
 418         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 419         _heap->workers()->run_task(&traversal_task);
 420       } else {
 421         ParallelTaskTerminator terminator(nworkers, task_queues());
 422         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 423         _heap->workers()->run_task(&traversal_task);
 424       }
 425     }
 426 
 427 #if defined(COMPILER2) || INCLUDE_JVMCI
 428     DerivedPointerTable::update_pointers();
 429 #endif
 430   }
 431 
 432   if (ShenandoahPacing) {
 433     _heap->pacer()->setup_for_traversal();
 434   }
 435 
 436   _root_regions_iterator = _root_regions->iterator();
 437 }
 438 
 439 void ShenandoahTraversalGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator, bool do_satb) {
 440   if (do_satb) {
 441     main_loop_prework<true>(worker_id, terminator);
 442   } else {
 443     main_loop_prework<false>(worker_id, terminator);
 444   }
 445 }
 446 
 447 template <bool DO_SATB>
 448 void ShenandoahTraversalGC::main_loop_prework(uint w, ParallelTaskTerminator* t) {
 449   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 450 
 451   // Initialize live data.
 452   jushort* ld = get_liveness(w);
 453   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 454 
 455   ReferenceProcessor* rp = NULL;
 456   if (_heap->process_references()) {
 457     rp = _heap->ref_processor();
 458   }
 459   if (UseShenandoahMatrix) {
 460     if (!_heap->is_degenerated_gc_in_progress()) {
 461       if (_heap->unload_classes()) {
 462         if (ShenandoahStringDedup::is_enabled()) {
 463           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 464           ShenandoahTraversalMetadataDedupMatrixClosure cl(q, rp, dq);
 465           main_loop_work<ShenandoahTraversalMetadataDedupMatrixClosure, DO_SATB>(&cl, ld, w, t);
 466         } else {
 467           ShenandoahTraversalMetadataMatrixClosure cl(q, rp);
 468           main_loop_work<ShenandoahTraversalMetadataMatrixClosure, DO_SATB>(&cl, ld, w, t);
 469         }
 470       } else {
 471         if (ShenandoahStringDedup::is_enabled()) {
 472           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 473           ShenandoahTraversalDedupMatrixClosure cl(q, rp, dq);
 474           main_loop_work<ShenandoahTraversalDedupMatrixClosure, DO_SATB>(&cl, ld, w, t);
 475         } else {
 476           ShenandoahTraversalMatrixClosure cl(q, rp);
 477           main_loop_work<ShenandoahTraversalMatrixClosure, DO_SATB>(&cl, ld, w, t);
 478         }
 479       }
 480     } else {
 481       if (_heap->unload_classes()) {
 482         if (ShenandoahStringDedup::is_enabled()) {
 483           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 484           ShenandoahTraversalMetadataDedupDegenMatrixClosure cl(q, rp, dq);
 485           main_loop_work<ShenandoahTraversalMetadataDedupDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 486         } else {
 487           ShenandoahTraversalMetadataDegenMatrixClosure cl(q, rp);
 488           main_loop_work<ShenandoahTraversalMetadataDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 489         }
 490       } else {
 491         if (ShenandoahStringDedup::is_enabled()) {
 492           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 493           ShenandoahTraversalDedupDegenMatrixClosure cl(q, rp, dq);
 494           main_loop_work<ShenandoahTraversalDedupDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 495         } else {
 496           ShenandoahTraversalDegenMatrixClosure cl(q, rp);
 497           main_loop_work<ShenandoahTraversalDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 498         }
 499       }
 500     }
 501   } else {
 502     if (!_heap->is_degenerated_gc_in_progress()) {
 503       if (_heap->unload_classes()) {
 504         if (ShenandoahStringDedup::is_enabled()) {
 505           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 506           ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq);
 507           main_loop_work<ShenandoahTraversalMetadataDedupClosure, DO_SATB>(&cl, ld, w, t);
 508         } else {
 509           ShenandoahTraversalMetadataClosure cl(q, rp);
 510           main_loop_work<ShenandoahTraversalMetadataClosure, DO_SATB>(&cl, ld, w, t);
 511         }
 512       } else {
 513         if (ShenandoahStringDedup::is_enabled()) {
 514           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 515           ShenandoahTraversalDedupClosure cl(q, rp, dq);
 516           main_loop_work<ShenandoahTraversalDedupClosure, DO_SATB>(&cl, ld, w, t);
 517         } else {
 518           ShenandoahTraversalClosure cl(q, rp);
 519           main_loop_work<ShenandoahTraversalClosure, DO_SATB>(&cl, ld, w, t);
 520         }
 521       }


 523       if (_heap->unload_classes()) {
 524         if (ShenandoahStringDedup::is_enabled()) {
 525           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 526           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp, dq);
 527           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 528         } else {
 529           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 530           main_loop_work<ShenandoahTraversalMetadataDegenClosure, DO_SATB>(&cl, ld, w, t);
 531         }
 532       } else {
 533         if (ShenandoahStringDedup::is_enabled()) {
 534           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 535           ShenandoahTraversalDedupDegenClosure cl(q, rp, dq);
 536           main_loop_work<ShenandoahTraversalDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 537         } else {
 538           ShenandoahTraversalDegenClosure cl(q, rp);
 539           main_loop_work<ShenandoahTraversalDegenClosure, DO_SATB>(&cl, ld, w, t);
 540         }
 541       }
 542     }
 543   }
 544   flush_liveness(w);
 545 
 546 }
 547 
 548 template <class T, bool DO_SATB>
 549 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 550   ShenandoahObjToScanQueueSet* queues = task_queues();
 551   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 552   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 553 
 554   uintx stride = ShenandoahMarkLoopStride;
 555 
 556   ShenandoahMarkTask task;
 557 
 558   // Process outstanding queues, if any.
 559   q = queues->claim_next();
 560   while (q != NULL) {
 561     if (_heap->check_cancelled_concgc_and_yield()) {
 562       ShenandoahCancelledTerminatorTerminator tt;
 563       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 564       while (!terminator->offer_termination(&tt));
 565       return;
 566     }
 567 
 568     for (uint i = 0; i < stride; i++) {
 569       if (q->pop_buffer(task) ||
 570           q->pop_local(task) ||
 571           q->pop_overflow(task)) {
 572         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 573       } else {
 574         assert(q->is_empty(), "Must be empty");
 575         q = queues->claim_next();
 576         break;
 577       }
 578     }
 579   }
 580 
 581   if (check_and_handle_cancelled_gc(terminator)) return;
 582 
 583   // Step 2: Process all root regions.
 584   // TODO: Interleave this in the normal mark loop below.
 585   ShenandoahHeapRegion* r = _root_regions_iterator.claim_next();
 586   while (r != NULL) {
 587     _heap->marked_object_oop_safe_iterate(r, cl);
 588     if (ShenandoahPacing) {
 589       _heap->pacer()->report_partial(r->get_live_data_words());
 590     }
 591     if (check_and_handle_cancelled_gc(terminator)) return;
 592     r = _root_regions_iterator.claim_next();
 593   }
 594 
 595   if (check_and_handle_cancelled_gc(terminator)) return;
 596 
 597   // Normal loop.
 598   q = queues->queue(worker_id);
 599   ShenandoahTraversalSATBBufferClosure satb_cl(q);
 600   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 601 
 602   int seed = 17;
 603 
 604   while (true) {
 605     if (check_and_handle_cancelled_gc(terminator)) return;
 606 
 607     for (uint i = 0; i < stride; i++) {
 608       if (q->pop_buffer(task) ||
 609           q->pop_local(task) ||
 610           q->pop_overflow(task) ||
 611           (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) ||
 612           queues->steal(worker_id, &seed, task)) {
 613         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 614       } else {
 615         ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 616         if (terminator->offer_termination()) return;
 617       }
 618     }
 619   }
 620 }
 621 
 622 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 623   if (_heap->cancelled_concgc()) {
 624     ShenandoahCancelledTerminatorTerminator tt;
 625     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 626     while (! terminator->offer_termination(&tt));
 627     return true;
 628   }
 629   return false;
 630 }
 631 
 632 void ShenandoahTraversalGC::concurrent_traversal_collection() {


 683 
 684   if (!_heap->cancelled_concgc() && _heap->process_references()) {
 685     weak_refs_work();
 686   }
 687 
 688   if (!_heap->cancelled_concgc() && _heap->unload_classes()) {
 689     _heap->unload_classes_and_cleanup_tables(false);
 690     fixup_roots();
 691   }
 692 
 693   if (!_heap->cancelled_concgc()) {
 694     // Still good? We can now trash the cset, and make final verification
 695     {
 696       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 697       ShenandoahHeapLocker lock(_heap->lock());
 698 
 699       // Trash everything
 700       // Clear immediate garbage regions.
 701       size_t num_regions = _heap->num_regions();
 702 
 703       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 704       ShenandoahFreeSet* free_regions = _heap->free_set();
 705       free_regions->clear();
 706       for (size_t i = 0; i < num_regions; i++) {
 707         ShenandoahHeapRegion* r = _heap->get_region(i);
 708         bool not_allocated = _heap->next_top_at_mark_start(r->bottom()) == r->top();
 709 
 710         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 711         if (r->is_humongous_start() && candidate) {
 712           // Trash humongous.
 713           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 714           assert(!_heap->is_marked_next(oop(humongous_obj)), "must not be marked");
 715           r->make_trash();
 716           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 717             i++;
 718             r = _heap->get_region(i);
 719             assert(r->is_humongous_continuation(), "must be humongous continuation");
 720             r->make_trash();
 721           }
 722         } else if (!r->is_empty() && candidate) {
 723           // Trash regular.
 724           assert(!r->is_humongous(), "handled above");
 725           assert(!r->is_trash(), "must not already be trashed");
 726           r->make_trash();
 727         }
 728       }
 729       _heap->collection_set()->clear();
 730       _heap->free_set()->rebuild();
 731       reset();
 732     }
 733 
 734     if (ShenandoahVerify) {
 735       _heap->verifier()->verify_after_traversal();
 736     }
 737 
 738     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 739     _heap->set_concurrent_traversal_in_progress(false);
 740     assert(!_heap->cancelled_concgc(), "must not be cancelled when getting out here");
 741   }
 742 }


 814 
 815 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 816 public:
 817   void do_void() {
 818     ShenandoahHeap* sh = ShenandoahHeap::heap();
 819     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 820     assert(sh->process_references(), "why else would we be here?");
 821     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 822     shenandoah_assert_rp_isalive_installed();
 823     traversal_gc->main_loop((uint) 0, &terminator, false);
 824   }
 825 };
 826 
 827 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 828 private:
 829   ShenandoahObjToScanQueue* _queue;
 830   Thread* _thread;
 831   ShenandoahTraversalGC* _traversal_gc;
 832   template <class T>
 833   inline void do_oop_nv(T* p) {
 834     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 835   }
 836 
 837 public:
 838   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 839     _queue(q), _thread(Thread::current()),
 840     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 841 
 842   void do_oop(narrowOop* p) { do_oop_nv(p); }
 843   void do_oop(oop* p)       { do_oop_nv(p); }
 844 };
 845 
 846 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 847 private:
 848   ShenandoahObjToScanQueue* _queue;
 849   Thread* _thread;
 850   ShenandoahTraversalGC* _traversal_gc;
 851   template <class T>
 852   inline void do_oop_nv(T* p) {
 853     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 854   }
 855 
 856 public:
 857   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 858     _queue(q), _thread(Thread::current()),
 859     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 860 
 861   void do_oop(narrowOop* p) { do_oop_nv(p); }
 862   void do_oop(oop* p)       { do_oop_nv(p); }
 863 };
 864 
 865 class ShenandoahTraversalKeepAliveUpdateMatrixClosure : public OopClosure {
 866 private:
 867   ShenandoahObjToScanQueue* _queue;
 868   Thread* _thread;
 869   ShenandoahTraversalGC* _traversal_gc;
 870   template <class T>
 871   inline void do_oop_nv(T* p) {
 872     // TODO: Need to somehow pass base_obj here?
 873     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 874   }
 875 
 876 public:
 877   ShenandoahTraversalKeepAliveUpdateMatrixClosure(ShenandoahObjToScanQueue* q) :
 878     _queue(q), _thread(Thread::current()),
 879     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 880 
 881   void do_oop(narrowOop* p) { do_oop_nv(p); }
 882   void do_oop(oop* p)       { do_oop_nv(p); }
 883 };
 884 
 885 class ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure : public OopClosure {
 886 private:
 887   ShenandoahObjToScanQueue* _queue;
 888   Thread* _thread;
 889   ShenandoahTraversalGC* _traversal_gc;
 890   template <class T>
 891   inline void do_oop_nv(T* p) {
 892     // TODO: Need to somehow pass base_obj here?
 893     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 894   }
 895 
 896 public:
 897   ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure(ShenandoahObjToScanQueue* q) :
 898     _queue(q), _thread(Thread::current()),
 899     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 900 
 901   void do_oop(narrowOop* p) { do_oop_nv(p); }
 902   void do_oop(oop* p)       { do_oop_nv(p); }
 903 };
 904 
 905 void ShenandoahTraversalGC::preclean_weak_refs() {
 906   // Pre-cleaning weak references before diving into STW makes sense at the
 907   // end of concurrent mark. This will filter out the references which referents
 908   // are alive. Note that ReferenceProcessor already filters out these on reference
 909   // discovery, and the bulk of work is done here. This phase processes leftovers
 910   // that missed the initial filtering, i.e. when referent was marked alive after
 911   // reference was discovered by RP.
 912 
 913   assert(_heap->process_references(), "sanity");
 914 
 915   ShenandoahHeap* sh = ShenandoahHeap::heap();
 916   ReferenceProcessor* rp = sh->ref_processor();
 917 
 918   // Shortcut if no references were discovered to avoid winding up threads.
 919   if (!rp->has_discovered_references()) {
 920     return;
 921   }
 922 
 923   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 924 
 925   shenandoah_assert_rp_isalive_not_installed();
 926   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
 927 
 928   // Interrupt on cancelled GC
 929   ShenandoahTraversalCancelledGCYieldClosure yield;
 930 
 931   assert(task_queues()->is_empty(), "Should be empty");
 932   assert(!sh->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 933 
 934   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 935   ShenandoahForwardedIsAliveClosure is_alive;
 936   if (UseShenandoahMatrix) {
 937     ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(0));
 938     ResourceMark rm;
 939     rp->preclean_discovered_references(&is_alive, &keep_alive,
 940                                        &complete_gc, &yield,
 941                                        NULL);
 942   } else {
 943     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 944     ResourceMark rm;
 945     rp->preclean_discovered_references(&is_alive, &keep_alive,
 946                                        &complete_gc, &yield,
 947                                        NULL);
 948   }
 949   assert(!sh->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
 950 }
 951 
 952 // Weak Reference Closures
 953 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 954   uint _worker_id;
 955   ParallelTaskTerminator* _terminator;
 956   bool _reset_terminator;
 957 
 958 public:
 959   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 960     _worker_id(worker_id),
 961     _terminator(t),
 962     _reset_terminator(reset_terminator) {
 963   }
 964 
 965   void do_void() {
 966     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 967 
 968     ShenandoahHeap* sh = ShenandoahHeap::heap();


1003 
1004 private:
1005   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1006   ParallelTaskTerminator* _terminator;
1007 public:
1008 
1009   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1010                              ParallelTaskTerminator* t) :
1011     AbstractGangTask("Process reference objects in parallel"),
1012     _proc_task(proc_task),
1013     _terminator(t) {
1014   }
1015 
1016   void work(uint worker_id) {
1017     ShenandoahEvacOOMScope oom_evac_scope;
1018     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1019     ShenandoahHeap* heap = ShenandoahHeap::heap();
1020     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1021 
1022     ShenandoahForwardedIsAliveClosure is_alive;
1023     if (UseShenandoahMatrix) {
1024       if (!heap->is_degenerated_gc_in_progress()) {
1025         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1026         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1027       } else {
1028         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1029         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1030       }
1031     } else {
1032       if (!heap->is_degenerated_gc_in_progress()) {
1033         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1034         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1035       } else {
1036         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1037         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1038       }
1039     }
1040   }
1041 };
1042 
1043 class ShenandoahTraversalRefEnqueueTaskProxy : public AbstractGangTask {
1044 
1045 private:
1046   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
1047 
1048 public:
1049 
1050   ShenandoahTraversalRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
1051     AbstractGangTask("Enqueue reference objects in parallel"),
1052     _enqueue_task(enqueue_task) {
1053   }
1054 
1055   void work(uint worker_id) {
1056     _enqueue_task.work(worker_id);
1057   }
1058 };
1059 
1060 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {


1121   rp->set_active_mt_degree(nworkers);
1122 
1123   assert(task_queues()->is_empty(), "Should be empty");
1124 
1125   // complete_gc and keep_alive closures instantiated here are only needed for
1126   // single-threaded path in RP. They share the queue 0 for tracking work, which
1127   // simplifies implementation. Since RP may decide to call complete_gc several
1128   // times, we need to be able to reuse the terminator.
1129   uint serial_worker_id = 0;
1130   ParallelTaskTerminator terminator(1, task_queues());
1131   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1132 
1133   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1134 
1135   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_q());
1136 
1137   {
1138     ShenandoahGCPhase phase(phase_process);
1139 
1140     ShenandoahForwardedIsAliveClosure is_alive;
1141     if (UseShenandoahMatrix) {
1142       if (!_heap->is_degenerated_gc_in_progress()) {
1143         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1144         rp->process_discovered_references(&is_alive, &keep_alive,
1145                                           &complete_gc, &executor,
1146                                           &pt);
1147         pt.print_all_references();
1148         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1149       } else {
1150         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1151         rp->process_discovered_references(&is_alive, &keep_alive,
1152                                           &complete_gc, &executor,
1153                                           &pt);
1154         pt.print_all_references();
1155         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1156       }
1157     } else {
1158       if (!_heap->is_degenerated_gc_in_progress()) {
1159         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1160         rp->process_discovered_references(&is_alive, &keep_alive,
1161                                           &complete_gc, &executor,
1162                                           &pt);
1163         pt.print_all_references();

1164         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1165       } else {
1166         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1167         rp->process_discovered_references(&is_alive, &keep_alive,
1168                                           &complete_gc, &executor,
1169                                           &pt);
1170         pt.print_all_references();
1171         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1172       }
1173     }
1174 
1175     assert(!_heap->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
1176   }
1177 
1178   if (_heap->cancelled_concgc()) return;
1179 
1180   {
1181     ShenandoahGCPhase phase(phase_enqueue);
1182     rp->enqueue_discovered_references(&executor, &pt);
1183     pt.print_enqueue_phase();
1184   }
1185 }
< prev index next >