< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp

Print this page
rev 54221 : 8221278: Shenandoah should not enqueue string dedup candidates during root scan


  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 
  45 #include "memory/iterator.inline.hpp"
  46 #include "memory/metaspace.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 
  51 template<UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
  52 class ShenandoahInitMarkRootsClosure : public OopClosure {
  53 private:
  54   ShenandoahObjToScanQueue* _queue;
  55   ShenandoahHeap* _heap;
  56   ShenandoahMarkingContext* const _mark_context;
  57 
  58   template <class T>
  59   inline void do_oop_work(T* p) {
  60     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context);
  61   }
  62 
  63 public:
  64   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  65     _queue(q),
  66     _heap(ShenandoahHeap::heap()),
  67     _mark_context(_heap->marking_context()) {};
  68 
  69   void do_oop(narrowOop* p) { do_oop_work(p); }
  70   void do_oop(oop* p)       { do_oop_work(p); }
  71 };
  72 
  73 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  74   MetadataVisitingOopIterateClosure(rp),
  75   _queue(q),
  76   _heap(ShenandoahHeap::heap()),
  77   _mark_context(_heap->marking_context())
  78 { }
  79 
  80 template<UpdateRefsMode UPDATE_REFS>


  82 private:
  83   ShenandoahRootProcessor* _rp;
  84   bool _process_refs;
  85 public:
  86   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  87     AbstractGangTask("Shenandoah init mark roots task"),
  88     _rp(rp),
  89     _process_refs(process_refs) {
  90   }
  91 
  92   void work(uint worker_id) {
  93     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  94     ShenandoahParallelWorkerSession worker_session(worker_id);
  95 
  96     ShenandoahHeap* heap = ShenandoahHeap::heap();
  97     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  98     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
  99 
 100     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 101 
 102     if (ShenandoahStringDedup::is_enabled()) {
 103       ShenandoahInitMarkRootsClosure<UPDATE_REFS, ENQUEUE_DEDUP> mark_cl(q);
 104       do_work(heap, &mark_cl, worker_id);
 105     } else {
 106       ShenandoahInitMarkRootsClosure<UPDATE_REFS, NO_DEDUP> mark_cl(q);
 107       do_work(heap, &mark_cl, worker_id);
 108     }
 109   }
 110 
 111 private:
 112   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 113     // The rationale for selecting the roots to scan is as follows:
 114     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 115     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 116     //      invalidate the relevant code cache blobs. This could be only done together with
 117     //      class unloading.
 118     //   b. With unload_classes = false, we have to nominally retain all the references from code
 119     //      cache, because there could be the case of embedded class/oop in the generated code,
 120     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 121     //      we risk executing that code cache blob, and crashing.
 122     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 123     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 124     //      pause time.
 125 
 126     CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
 127     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 128     OopClosure* weak_oops = _process_refs ? NULL : oops;




  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 
  45 #include "memory/iterator.inline.hpp"
  46 #include "memory/metaspace.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 
  51 template<UpdateRefsMode UPDATE_REFS>
  52 class ShenandoahInitMarkRootsClosure : public OopClosure {
  53 private:
  54   ShenandoahObjToScanQueue* _queue;
  55   ShenandoahHeap* _heap;
  56   ShenandoahMarkingContext* const _mark_context;
  57 
  58   template <class T>
  59   inline void do_oop_work(T* p) {
  60     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  61   }
  62 
  63 public:
  64   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  65     _queue(q),
  66     _heap(ShenandoahHeap::heap()),
  67     _mark_context(_heap->marking_context()) {};
  68 
  69   void do_oop(narrowOop* p) { do_oop_work(p); }
  70   void do_oop(oop* p)       { do_oop_work(p); }
  71 };
  72 
  73 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  74   MetadataVisitingOopIterateClosure(rp),
  75   _queue(q),
  76   _heap(ShenandoahHeap::heap()),
  77   _mark_context(_heap->marking_context())
  78 { }
  79 
  80 template<UpdateRefsMode UPDATE_REFS>


  82 private:
  83   ShenandoahRootProcessor* _rp;
  84   bool _process_refs;
  85 public:
  86   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  87     AbstractGangTask("Shenandoah init mark roots task"),
  88     _rp(rp),
  89     _process_refs(process_refs) {
  90   }
  91 
  92   void work(uint worker_id) {
  93     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  94     ShenandoahParallelWorkerSession worker_session(worker_id);
  95 
  96     ShenandoahHeap* heap = ShenandoahHeap::heap();
  97     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  98     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
  99 
 100     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 101 
 102     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);




 103     do_work(heap, &mark_cl, worker_id);

 104   }
 105 
 106 private:
 107   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 108     // The rationale for selecting the roots to scan is as follows:
 109     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 110     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 111     //      invalidate the relevant code cache blobs. This could be only done together with
 112     //      class unloading.
 113     //   b. With unload_classes = false, we have to nominally retain all the references from code
 114     //      cache, because there could be the case of embedded class/oop in the generated code,
 115     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 116     //      we risk executing that code cache blob, and crashing.
 117     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 118     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 119     //      pause time.
 120 
 121     CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
 122     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 123     OopClosure* weak_oops = _process_refs ? NULL : oops;


< prev index next >