< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp

Print this page
rev 50928 : Process remaining SATB buffers in final mark/traverse loop instead of separate phase


  49   // jushort * max_regions. The choice of jushort is not accidental:
  50   // there is a tradeoff between static/dynamic footprint that translates
  51   // into cache pressure (which is already high during marking), and
  52   // too many atomic updates. size_t/jint is too large, jbyte is too small.
  53   jushort** _liveness_local;
  54 
  55 private:
  56   template <class T>
  57   inline void do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task);
  58 
  59   template <class T>
  60   inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array);
  61 
  62   template <class T>
  63   inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow);
  64 
  65   inline void count_liveness(jushort* live_data, oop obj);
  66   inline void count_liveness_humongous(oop obj);
  67 
  68   // Actual mark loop with closures set up
  69   template <class T, bool CANCELLABLE, bool DRAIN_SATB>
  70   void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *t);
  71 
  72   template <bool CANCELLABLE, bool DRAIN_SATB>
  73   void mark_loop_prework(uint worker_id, ParallelTaskTerminator *terminator, ReferenceProcessor *rp,
  74                          bool class_unload, bool update_refs, bool strdedup);
  75 
  76 public:
  77   // Mark loop entry.
  78   // Translates dynamic arguments to template parameters with progressive currying.
  79   void mark_loop(uint worker_id, ParallelTaskTerminator* terminator, ReferenceProcessor *rp,
  80                  bool cancellable, bool drain_satb,
  81                  bool class_unload, bool update_refs, bool strdedup) {
  82     if (cancellable) {
  83       if (drain_satb) {
  84         mark_loop_prework<true, true>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  85       } else {
  86         mark_loop_prework<true, false>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  87       }
  88     } else {
  89       if (drain_satb) {
  90         mark_loop_prework<false, true>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  91       } else {
  92         mark_loop_prework<false, false>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  93       }
  94     }
  95   }
  96 
  97   // We need to do this later when the heap is already created.
  98   void initialize(uint workers);
  99 
 100   bool process_references() const;
 101   bool unload_classes() const;
 102 
 103   bool claim_codecache();
 104   void clear_claim_codecache();
 105 
 106   template<class T, UpdateRefsMode UPDATE_REFS>
 107   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q);
 108 
 109   template<class T, UpdateRefsMode UPDATE_REFS, bool STRING_DEDUP>
 110   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq = NULL);
 111 
 112   void mark_from_roots();
 113 
 114   // Prepares unmarked root objects by marking them and putting
 115   // them into the marking task queue.
 116   void init_mark_roots();
 117   void mark_roots(ShenandoahPhaseTimings::Phase root_phase);
 118   void update_roots(ShenandoahPhaseTimings::Phase root_phase);
 119 
 120   void shared_finish_mark_from_roots(bool full_gc);
 121   void finish_mark_from_roots();
 122   // Those are only needed public because they're called from closures.
 123 
 124   inline bool try_queue(ShenandoahObjToScanQueue* q, ShenandoahMarkTask &task);
 125 
 126   ShenandoahObjToScanQueue* get_queue(uint worker_id);
 127   void clear_queue(ShenandoahObjToScanQueue *q);
 128 
 129   void drain_satb_buffers(uint worker_id, bool remark = false);
 130   ShenandoahObjToScanQueueSet* task_queues() { return _task_queues;}
 131 
 132   jushort* get_liveness(uint worker_id);
 133 
 134   void cancel();
 135 
 136   void preclean_weak_refs();
 137 
 138   void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp, bool update_ref);
 139 private:
 140 
 141   void weak_refs_work(bool full_gc);
 142   void weak_refs_work_doit(bool full_gc);
 143 
 144 #if TASKQUEUE_STATS
 145   static void print_taskqueue_stats_hdr(outputStream* const st);
 146   void print_taskqueue_stats() const;
 147   void reset_taskqueue_stats();
 148 #endif // TASKQUEUE_STATS
 149 


  49   // jushort * max_regions. The choice of jushort is not accidental:
  50   // there is a tradeoff between static/dynamic footprint that translates
  51   // into cache pressure (which is already high during marking), and
  52   // too many atomic updates. size_t/jint is too large, jbyte is too small.
  53   jushort** _liveness_local;
  54 
  55 private:
  56   template <class T>
  57   inline void do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task);
  58 
  59   template <class T>
  60   inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array);
  61 
  62   template <class T>
  63   inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow);
  64 
  65   inline void count_liveness(jushort* live_data, oop obj);
  66   inline void count_liveness_humongous(oop obj);
  67 
  68   // Actual mark loop with closures set up
  69   template <class T, bool CANCELLABLE>
  70   void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *t);
  71 
  72   template <bool CANCELLABLE>
  73   void mark_loop_prework(uint worker_id, ParallelTaskTerminator *terminator, ReferenceProcessor *rp,
  74                          bool class_unload, bool update_refs, bool strdedup);
  75 
  76 public:
  77   // Mark loop entry.
  78   // Translates dynamic arguments to template parameters with progressive currying.
  79   void mark_loop(uint worker_id, ParallelTaskTerminator* terminator, ReferenceProcessor *rp,
  80                  bool cancellable,
  81                  bool class_unload, bool update_refs, bool strdedup) {
  82     if (cancellable) {
  83       mark_loop_prework<true>(worker_id, terminator, rp, class_unload, update_refs, strdedup);

  84     } else {
  85       mark_loop_prework<false>(worker_id, terminator, rp, class_unload, update_refs, strdedup);







  86     }
  87   }
  88 
  89   // We need to do this later when the heap is already created.
  90   void initialize(uint workers);
  91 
  92   bool process_references() const;
  93   bool unload_classes() const;
  94 
  95   bool claim_codecache();
  96   void clear_claim_codecache();
  97 
  98   template<class T, UpdateRefsMode UPDATE_REFS>
  99   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q);
 100 
 101   template<class T, UpdateRefsMode UPDATE_REFS, bool STRING_DEDUP>
 102   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq = NULL);
 103 
 104   void mark_from_roots();
 105 
 106   // Prepares unmarked root objects by marking them and putting
 107   // them into the marking task queue.
 108   void init_mark_roots();
 109   void mark_roots(ShenandoahPhaseTimings::Phase root_phase);
 110   void update_roots(ShenandoahPhaseTimings::Phase root_phase);
 111 
 112   void shared_finish_mark_from_roots(bool full_gc);
 113   void finish_mark_from_roots();
 114   // Those are only needed public because they're called from closures.
 115 
 116   inline bool try_queue(ShenandoahObjToScanQueue* q, ShenandoahMarkTask &task);
 117 
 118   ShenandoahObjToScanQueue* get_queue(uint worker_id);
 119   void clear_queue(ShenandoahObjToScanQueue *q);
 120 
 121   void drain_satb_buffers(uint worker_id);
 122   ShenandoahObjToScanQueueSet* task_queues() { return _task_queues;}
 123 
 124   jushort* get_liveness(uint worker_id);
 125 
 126   void cancel();
 127 
 128   void preclean_weak_refs();
 129 
 130   void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp, bool update_ref);
 131 private:
 132 
 133   void weak_refs_work(bool full_gc);
 134   void weak_refs_work_doit(bool full_gc);
 135 
 136 #if TASKQUEUE_STATS
 137   static void print_taskqueue_stats_hdr(outputStream* const st);
 138   void print_taskqueue_stats() const;
 139   void reset_taskqueue_stats();
 140 #endif // TASKQUEUE_STATS
 141 
< prev index next >