< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp

Print this page
rev 52753 : [backport] 8221435: Shenandoah should not mark through weak roots
Reviewed-by: rkennke, shade
rev 52754 : [backport] 8221629: Shenandoah: Cleanup class unloading logic
Reviewed-by: rkennke


  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shared/weakProcessor.hpp"
  46 
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/oop.inline.hpp"
  51 
  52 template<UpdateRefsMode UPDATE_REFS>
  53 class ShenandoahInitMarkRootsClosure : public OopClosure {
  54 private:
  55   ShenandoahObjToScanQueue* _queue;
  56   ShenandoahHeap* _heap;
  57   ShenandoahMarkingContext* const _mark_context;
  58 
  59   template <class T>
  60   inline void do_oop_work(T* p) {


 104     do_work(heap, &mark_cl, worker_id);
 105   }
 106 
 107 private:
 108   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 109     // The rationale for selecting the roots to scan is as follows:
 110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 112     //      invalidate the relevant code cache blobs. This could be only done together with
 113     //      class unloading.
 114     //   b. With unload_classes = false, we have to nominally retain all the references from code
 115     //      cache, because there could be the case of embedded class/oop in the generated code,
 116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 117     //      we risk executing that code cache blob, and crashing.
 118     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 119     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 120     //      pause time.
 121 
 122     CLDToOopClosure clds_cl(oops);
 123     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 124     OopClosure* weak_oops = _process_refs ? NULL : oops;
 125 
 126     ResourceMark m;
 127     if (heap->unload_classes()) {
 128       _rp->process_strong_roots(oops, weak_oops, &clds_cl, NULL, &blobs_cl, NULL, worker_id);
 129     } else {
 130       if (ShenandoahConcurrentScanCodeRoots) {
 131         CodeBlobClosure* code_blobs = NULL;
 132 #ifdef ASSERT
 133         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 134         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 135         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 136         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 137         if (!heap->has_forwarded_objects()) {
 138           code_blobs = &assert_to_space;
 139         }
 140 #endif
 141         _rp->process_all_roots(oops, weak_oops, &clds_cl, code_blobs, NULL, worker_id);
 142       } else {
 143         _rp->process_all_roots(oops, weak_oops, &clds_cl, &blobs_cl, NULL, worker_id);
 144       }
 145     }
 146   }
 147 };
 148 
 149 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 150 private:
 151   ShenandoahRootProcessor* _rp;
 152   const bool _update_code_cache;
 153 public:
 154   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 155     AbstractGangTask("Shenandoah update roots task"),
 156     _rp(rp),
 157     _update_code_cache(update_code_cache) {
 158   }
 159 
 160   void work(uint worker_id) {
 161     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 162     ShenandoahParallelWorkerSession worker_session(worker_id);
 163 
 164     ShenandoahHeap* heap = ShenandoahHeap::heap();
 165     ShenandoahUpdateRefsClosure cl;
 166     CLDToOopClosure cldCl(&cl);
 167 
 168     CodeBlobClosure* code_blobs;
 169     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 170 #ifdef ASSERT
 171     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 172     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 173 #endif
 174     if (_update_code_cache) {
 175       code_blobs = &update_blobs;
 176     } else {
 177       code_blobs =
 178         DEBUG_ONLY(&assert_to_space)
 179         NOT_DEBUG(NULL);
 180     }
 181     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 182   }
 183 };
 184 
 185 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 186 private:
 187   ShenandoahConcurrentMark* _cm;
 188   ShenandoahTaskTerminator* _terminator;
 189 
 190 public:
 191   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 192     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 193   }
 194 
 195   void work(uint worker_id) {
 196     ShenandoahHeap* heap = ShenandoahHeap::heap();
 197     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 198     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 199     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 200     ReferenceProcessor* rp;
 201     if (heap->process_references()) {


 439     ShenandoahIsAliveSelector is_alive;
 440     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 441 
 442     ShenandoahTerminationTracker termination_tracker(full_gc ?
 443                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 444                                                      ShenandoahPhaseTimings::termination);
 445 
 446     StrongRootsScope scope(nworkers);
 447     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 448     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 449     _heap->workers()->run_task(&task);
 450   }
 451 
 452   assert(task_queues()->is_empty(), "Should be empty");
 453 
 454   // When we're done marking everything, we process weak references.
 455   if (_heap->process_references()) {
 456     weak_refs_work(full_gc);
 457   }
 458 


 459   // And finally finish class unloading
 460   if (_heap->unload_classes()) {
 461     _heap->unload_classes_and_cleanup_tables(full_gc);
 462   }
 463 




 464   assert(task_queues()->is_empty(), "Should be empty");
 465   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 466   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 467 
 468   // Resize Metaspace
 469   MetaspaceGC::compute_new_size();
 470 }
 471 
 472 // Weak Reference Closures
 473 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 474   uint _worker_id;
 475   ShenandoahTaskTerminator* _terminator;
 476   bool _reset_terminator;
 477 
 478 public:
 479   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 480     _worker_id(worker_id),
 481     _terminator(t),
 482     _reset_terminator(reset_terminator) {
 483   }


 548 private:
 549   ShenandoahHeap* const _heap;
 550 
 551   template <class T>
 552   inline void do_oop_work(T* p) {
 553     oop o = _heap->maybe_update_with_forwarded(p);
 554     shenandoah_assert_marked_except(p, o, o == NULL);
 555   }
 556 
 557 public:
 558   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 559 
 560   void do_oop(narrowOop* p) { do_oop_work(p); }
 561   void do_oop(oop* p)       { do_oop_work(p); }
 562 };
 563 
 564 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
 565 private:
 566   template <class T>
 567   inline void do_oop_work(T* p) {

 568     T o = RawAccess<>::oop_load(p);
 569     if (!CompressedOops::is_null(o)) {
 570       oop obj = CompressedOops::decode_not_null(o);
 571       shenandoah_assert_not_forwarded(p, obj);
 572     }

 573   }
 574 
 575 public:
 576   ShenandoahWeakAssertNotForwardedClosure() {}
 577 
 578   void do_oop(narrowOop* p) { do_oop_work(p); }
 579   void do_oop(oop* p)       { do_oop_work(p); }
 580 };
 581 
 582 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 583 private:
 584   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 585   ShenandoahTaskTerminator* _terminator;
 586 
 587 public:
 588   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 589                              ShenandoahTaskTerminator* t) :
 590     AbstractGangTask("Process reference objects in parallel"),
 591     _proc_task(proc_task),
 592     _terminator(t) {


 641 
 642   ShenandoahPhaseTimings::Phase phase_root =
 643           full_gc ?
 644           ShenandoahPhaseTimings::full_gc_weakrefs :
 645           ShenandoahPhaseTimings::weakrefs;
 646 
 647   ShenandoahGCPhase phase(phase_root);
 648 
 649   ReferenceProcessor* rp = _heap->ref_processor();
 650 
 651   // NOTE: We cannot shortcut on has_discovered_references() here, because
 652   // we will miss marking JNI Weak refs then, see implementation in
 653   // ReferenceProcessor::process_discovered_references.
 654   weak_refs_work_doit(full_gc);
 655 
 656   rp->verify_no_references_recorded();
 657   assert(!rp->discovery_enabled(), "Post condition");
 658 
 659 }
 660 
















 661 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 662   ReferenceProcessor* rp = _heap->ref_processor();
 663 
 664   ShenandoahPhaseTimings::Phase phase_process =
 665           full_gc ?
 666           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 667           ShenandoahPhaseTimings::weakrefs_process;
 668 
 669   ShenandoahPhaseTimings::Phase phase_process_termination =
 670           full_gc ?
 671           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 672           ShenandoahPhaseTimings::weakrefs_termination;
 673 
 674   shenandoah_assert_rp_isalive_not_installed();
 675   ShenandoahIsAliveSelector is_alive;
 676   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 677 
 678   WorkGang* workers = _heap->workers();
 679   uint nworkers = workers->active_workers();
 680 


 682   rp->set_active_mt_degree(nworkers);
 683 
 684   assert(task_queues()->is_empty(), "Should be empty");
 685 
 686   // complete_gc and keep_alive closures instantiated here are only needed for
 687   // single-threaded path in RP. They share the queue 0 for tracking work, which
 688   // simplifies implementation. Since RP may decide to call complete_gc several
 689   // times, we need to be able to reuse the terminator.
 690   uint serial_worker_id = 0;
 691   ShenandoahTaskTerminator terminator(1, task_queues());
 692   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 693 
 694   ShenandoahRefProcTaskExecutor executor(workers);
 695 
 696   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 697 
 698   {
 699     ShenandoahGCPhase phase(phase_process);
 700     ShenandoahTerminationTracker phase_term(phase_process_termination);
 701 
 702     // Process leftover weak oops: update them, if needed, or assert they do not
 703     // need updating otherwise. This JDK version does not have parallel WeakProcessor.
 704     // Weak processor API requires us to visit the oops, even if we are not doing
 705     // anything to them.
 706     if (_heap->has_forwarded_objects()) {
 707       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 708       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 709                                         &complete_gc, &executor,
 710                                         &pt);
 711 
 712       ShenandoahWeakUpdateClosure cl;
 713       WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 714     } else {
 715       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 716       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 717                                         &complete_gc, &executor,
 718                                         &pt);
 719 
 720       ShenandoahWeakAssertNotForwardedClosure cl;
 721       WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 722     }
 723 
 724     pt.print_all_references();
 725 
 726     assert(task_queues()->is_empty(), "Should be empty");
 727   }
 728 }
 729 
 730 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 731 private:
 732   ShenandoahHeap* const _heap;
 733 public:
 734   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 735   virtual bool should_return() { return _heap->cancelled_gc(); }
 736 };
 737 
 738 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 739 public:
 740   void do_void() {
 741     ShenandoahHeap* sh = ShenandoahHeap::heap();




  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shared/weakProcessor.hpp"
  46 
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/oop.inline.hpp"
  51 
  52 template<UpdateRefsMode UPDATE_REFS>
  53 class ShenandoahInitMarkRootsClosure : public OopClosure {
  54 private:
  55   ShenandoahObjToScanQueue* _queue;
  56   ShenandoahHeap* _heap;
  57   ShenandoahMarkingContext* const _mark_context;
  58 
  59   template <class T>
  60   inline void do_oop_work(T* p) {


 104     do_work(heap, &mark_cl, worker_id);
 105   }
 106 
 107 private:
 108   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 109     // The rationale for selecting the roots to scan is as follows:
 110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 112     //      invalidate the relevant code cache blobs. This could be only done together with
 113     //      class unloading.
 114     //   b. With unload_classes = false, we have to nominally retain all the references from code
 115     //      cache, because there could be the case of embedded class/oop in the generated code,
 116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 117     //      we risk executing that code cache blob, and crashing.
 118     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 119     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 120     //      pause time.
 121 
 122     CLDToOopClosure clds_cl(oops);
 123     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);

 124 
 125     ResourceMark m;
 126     if (heap->unload_classes()) {
 127       _rp->process_strong_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id);
 128     } else {
 129       if (ShenandoahConcurrentScanCodeRoots) {
 130         CodeBlobClosure* code_blobs = NULL;
 131 #ifdef ASSERT
 132         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 133         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 134         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 135         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 136         if (!heap->has_forwarded_objects()) {
 137           code_blobs = &assert_to_space;
 138         }
 139 #endif
 140         _rp->process_all_roots(oops, &clds_cl, code_blobs, NULL, worker_id);
 141       } else {
 142         _rp->process_all_roots(oops, &clds_cl, &blobs_cl, NULL, worker_id);
 143       }
 144     }
 145   }
 146 };
 147 
 148 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 149 private:
 150   ShenandoahRootProcessor* _rp;
 151   const bool _update_code_cache;
 152 public:
 153   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 154     AbstractGangTask("Shenandoah update roots task"),
 155     _rp(rp),
 156     _update_code_cache(update_code_cache) {
 157   }
 158 
 159   void work(uint worker_id) {
 160     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 161     ShenandoahParallelWorkerSession worker_session(worker_id);
 162 
 163     ShenandoahHeap* heap = ShenandoahHeap::heap();
 164     ShenandoahUpdateRefsClosure cl;
 165     CLDToOopClosure cldCl(&cl);
 166 
 167     CodeBlobClosure* code_blobs;
 168     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 169 #ifdef ASSERT
 170     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 171     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 172 #endif
 173     if (_update_code_cache) {
 174       code_blobs = &update_blobs;
 175     } else {
 176       code_blobs =
 177         DEBUG_ONLY(&assert_to_space)
 178         NOT_DEBUG(NULL);
 179     }
 180     _rp->update_all_roots<AlwaysTrueClosure>(&cl, &cldCl, code_blobs, NULL, worker_id);
 181   }
 182 };
 183 
 184 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 185 private:
 186   ShenandoahConcurrentMark* _cm;
 187   ShenandoahTaskTerminator* _terminator;
 188 
 189 public:
 190   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 191     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 192   }
 193 
 194   void work(uint worker_id) {
 195     ShenandoahHeap* heap = ShenandoahHeap::heap();
 196     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 197     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 198     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 199     ReferenceProcessor* rp;
 200     if (heap->process_references()) {


 438     ShenandoahIsAliveSelector is_alive;
 439     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 440 
 441     ShenandoahTerminationTracker termination_tracker(full_gc ?
 442                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 443                                                      ShenandoahPhaseTimings::termination);
 444 
 445     StrongRootsScope scope(nworkers);
 446     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 447     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 448     _heap->workers()->run_task(&task);
 449   }
 450 
 451   assert(task_queues()->is_empty(), "Should be empty");
 452 
 453   // When we're done marking everything, we process weak references.
 454   if (_heap->process_references()) {
 455     weak_refs_work(full_gc);
 456   }
 457 
 458   weak_roots_work();
 459 
 460   // And finally finish class unloading
 461   if (_heap->unload_classes()) {
 462     _heap->unload_classes_and_cleanup_tables(full_gc);
 463   }
 464   if (ShenandoahStringDedup::is_enabled()) {
 465     ShenandoahIsAliveSelector alive;
 466     BoolObjectClosure* is_alive = alive.is_alive_closure();
 467     ShenandoahStringDedup::unlink_or_oops_do(is_alive, NULL, false);
 468   }
 469   assert(task_queues()->is_empty(), "Should be empty");
 470   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 471   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 472 
 473   // Resize Metaspace
 474   MetaspaceGC::compute_new_size();
 475 }
 476 
 477 // Weak Reference Closures
 478 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 479   uint _worker_id;
 480   ShenandoahTaskTerminator* _terminator;
 481   bool _reset_terminator;
 482 
 483 public:
 484   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 485     _worker_id(worker_id),
 486     _terminator(t),
 487     _reset_terminator(reset_terminator) {
 488   }


 553 private:
 554   ShenandoahHeap* const _heap;
 555 
 556   template <class T>
 557   inline void do_oop_work(T* p) {
 558     oop o = _heap->maybe_update_with_forwarded(p);
 559     shenandoah_assert_marked_except(p, o, o == NULL);
 560   }
 561 
 562 public:
 563   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 564 
 565   void do_oop(narrowOop* p) { do_oop_work(p); }
 566   void do_oop(oop* p)       { do_oop_work(p); }
 567 };
 568 
 569 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
 570 private:
 571   template <class T>
 572   inline void do_oop_work(T* p) {
 573 #ifdef ASSERT
 574     T o = RawAccess<>::oop_load(p);
 575     if (!CompressedOops::is_null(o)) {
 576       oop obj = CompressedOops::decode_not_null(o);
 577       shenandoah_assert_not_forwarded(p, obj);
 578     }
 579 #endif
 580   }
 581 
 582 public:
 583   ShenandoahWeakAssertNotForwardedClosure() {}
 584 
 585   void do_oop(narrowOop* p) { do_oop_work(p); }
 586   void do_oop(oop* p)       { do_oop_work(p); }
 587 };
 588 
 589 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 590 private:
 591   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 592   ShenandoahTaskTerminator* _terminator;
 593 
 594 public:
 595   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 596                              ShenandoahTaskTerminator* t) :
 597     AbstractGangTask("Process reference objects in parallel"),
 598     _proc_task(proc_task),
 599     _terminator(t) {


 648 
 649   ShenandoahPhaseTimings::Phase phase_root =
 650           full_gc ?
 651           ShenandoahPhaseTimings::full_gc_weakrefs :
 652           ShenandoahPhaseTimings::weakrefs;
 653 
 654   ShenandoahGCPhase phase(phase_root);
 655 
 656   ReferenceProcessor* rp = _heap->ref_processor();
 657 
 658   // NOTE: We cannot shortcut on has_discovered_references() here, because
 659   // we will miss marking JNI Weak refs then, see implementation in
 660   // ReferenceProcessor::process_discovered_references.
 661   weak_refs_work_doit(full_gc);
 662 
 663   rp->verify_no_references_recorded();
 664   assert(!rp->discovery_enabled(), "Post condition");
 665 
 666 }
 667 
 668 // Process leftover weak oops: update them, if needed or assert they do not
 669 // need updating otherwise.
 670 // Weak processor API requires us to visit the oops, even if we are not doing
 671 // anything to them.
 672 void ShenandoahConcurrentMark::weak_roots_work() {
 673   ShenandoahIsAliveSelector is_alive;
 674 
 675   if (_heap->has_forwarded_objects()) {
 676     ShenandoahWeakUpdateClosure cl;
 677     WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 678   } else {
 679     ShenandoahWeakAssertNotForwardedClosure cl;
 680     WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 681   }
 682 }
 683 
 684 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 685   ReferenceProcessor* rp = _heap->ref_processor();
 686 
 687   ShenandoahPhaseTimings::Phase phase_process =
 688           full_gc ?
 689           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 690           ShenandoahPhaseTimings::weakrefs_process;
 691 
 692   ShenandoahPhaseTimings::Phase phase_process_termination =
 693           full_gc ?
 694           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 695           ShenandoahPhaseTimings::weakrefs_termination;
 696 
 697   shenandoah_assert_rp_isalive_not_installed();
 698   ShenandoahIsAliveSelector is_alive;
 699   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 700 
 701   WorkGang* workers = _heap->workers();
 702   uint nworkers = workers->active_workers();
 703 


 705   rp->set_active_mt_degree(nworkers);
 706 
 707   assert(task_queues()->is_empty(), "Should be empty");
 708 
 709   // complete_gc and keep_alive closures instantiated here are only needed for
 710   // single-threaded path in RP. They share the queue 0 for tracking work, which
 711   // simplifies implementation. Since RP may decide to call complete_gc several
 712   // times, we need to be able to reuse the terminator.
 713   uint serial_worker_id = 0;
 714   ShenandoahTaskTerminator terminator(1, task_queues());
 715   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 716 
 717   ShenandoahRefProcTaskExecutor executor(workers);
 718 
 719   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 720 
 721   {
 722     ShenandoahGCPhase phase(phase_process);
 723     ShenandoahTerminationTracker phase_term(phase_process_termination);
 724 




 725     if (_heap->has_forwarded_objects()) {
 726       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 727       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 728                                         &complete_gc, &executor,
 729                                         &pt);
 730 


 731     } else {
 732       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 733       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 734                                         &complete_gc, &executor,
 735                                         &pt);
 736 


 737     }
 738 
 739     pt.print_all_references();
 740 
 741     assert(task_queues()->is_empty(), "Should be empty");
 742   }
 743 }
 744 
 745 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 746 private:
 747   ShenandoahHeap* const _heap;
 748 public:
 749   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 750   virtual bool should_return() { return _heap->cancelled_gc(); }
 751 };
 752 
 753 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 754 public:
 755   void do_void() {
 756     ShenandoahHeap* sh = ShenandoahHeap::heap();


< prev index next >