1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
  26 
  27 #include "gc/shared/taskqueue.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  30 #include "gc/shenandoah/shenandoahOopClosures.hpp"
  31 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  32 
  33 class ShenandoahStrDedupQueue;
  34 
  35 class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
  36   friend class ShenandoahTraversalGC;
  37 private:
  38   ShenandoahHeap* _heap;
  39 
  40   // The per-worker-thread work queues
  41   ShenandoahObjToScanQueueSet* _task_queues;
  42 
  43   ShenandoahSharedFlag _process_references;
  44   ShenandoahSharedFlag _unload_classes;
  45 
  46   ShenandoahSharedFlag _claimed_codecache;
  47 
  48   // Used for buffering per-region liveness data.
  49   // Needed since ShenandoahHeapRegion uses atomics to update liveness.
  50   //
  51   // The array has max-workers elements, each of which is an array of
  52   // jushort * max_regions. The choice of jushort is not accidental:
  53   // there is a tradeoff between static/dynamic footprint that translates
  54   // into cache pressure (which is already high during marking), and
  55   // too many atomic updates. size_t/jint is too large, jbyte is too small.
  56   jushort** _liveness_local;
  57 
  58 private:
  59   template <class T, bool COUNT_LIVENESS>
  60   inline void do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task);
  61 
  62   template <class T>
  63   inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array);
  64 
  65   template <class T>
  66   inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow);
  67 
  68   inline void count_liveness(jushort* live_data, oop obj);
  69 
  70   // Actual mark loop with closures set up
  71   template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
  72   void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *t);
  73 
  74   template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
  75   void mark_loop_prework(uint worker_id, ParallelTaskTerminator *terminator, ReferenceProcessor *rp,
  76                          bool class_unload, bool update_refs, bool strdedup);
  77 
  78   // ------------------------ Currying dynamic arguments to template args ----------------------------
  79 
  80   template <bool CANCELLABLE, bool DRAIN_SATB>
  81   void mark_loop_2(uint w, ParallelTaskTerminator* t, ReferenceProcessor* rp,
  82                    bool count_liveness,
  83                    bool class_unload, bool update_refs, bool strdedup) {
  84     if (count_liveness) {
  85       mark_loop_prework<CANCELLABLE, DRAIN_SATB, true>(w, t, rp, class_unload, update_refs, strdedup);
  86     } else {
  87       mark_loop_prework<CANCELLABLE, DRAIN_SATB, false>(w, t, rp, class_unload, update_refs, strdedup);
  88     }
  89   };
  90 
  91   template <bool CANCELLABLE>
  92   void mark_loop_1(uint w, ParallelTaskTerminator* t, ReferenceProcessor* rp,
  93                    bool drain_satb, bool count_liveness,
  94                    bool class_unload, bool update_refs, bool strdedup) {
  95     if (drain_satb) {
  96       mark_loop_2<CANCELLABLE, true>(w, t, rp, count_liveness, class_unload, update_refs, strdedup);
  97     } else {
  98       mark_loop_2<CANCELLABLE, false>(w, t, rp, count_liveness, class_unload, update_refs, strdedup);
  99     }
 100   };
 101 
 102   // ------------------------ END: Currying dynamic arguments to template args ----------------------------
 103 
 104 public:
 105   // Mark loop entry.
 106   // Translates dynamic arguments to template parameters with progressive currying.
 107   void mark_loop(uint worker_id, ParallelTaskTerminator* terminator, ReferenceProcessor *rp,
 108                  bool cancellable, bool drain_satb, bool count_liveness,
 109                  bool class_unload, bool update_refs, bool strdedup = false) {
 110     if (cancellable) {
 111       mark_loop_1<true>(worker_id, terminator, rp, drain_satb, count_liveness, class_unload, update_refs, strdedup);
 112     } else {
 113       mark_loop_1<false>(worker_id, terminator, rp, drain_satb, count_liveness, class_unload, update_refs, strdedup);
 114     }
 115   }
 116 
 117   // We need to do this later when the heap is already created.
 118   void initialize(uint workers);
 119 
 120   void set_process_references(bool pr);
 121   bool process_references() const;
 122 
 123   void set_unload_classes(bool uc);
 124   bool unload_classes() const;
 125 
 126   bool claim_codecache();
 127   void clear_claim_codecache();
 128 
 129   template<class T, UpdateRefsMode UPDATE_REFS>
 130   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q);
 131 
 132   template<class T, UpdateRefsMode UPDATE_REFS, bool STRING_DEDUP>
 133   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq = NULL);
 134 
 135   void mark_from_roots();
 136 
 137   // Prepares unmarked root objects by marking them and putting
 138   // them into the marking task queue.
 139   void init_mark_roots();
 140   void mark_roots(ShenandoahPhaseTimings::Phase root_phase);
 141   void update_roots(ShenandoahPhaseTimings::Phase root_phase);
 142 
 143   void shared_finish_mark_from_roots(bool full_gc);
 144   void finish_mark_from_roots();
 145   // Those are only needed public because they're called from closures.
 146 
 147   inline bool try_queue(ShenandoahObjToScanQueue* q, ShenandoahMarkTask &task);
 148 
 149   ShenandoahObjToScanQueue* get_queue(uint worker_id);
 150   void clear_queue(ShenandoahObjToScanQueue *q);
 151 
 152   inline bool try_draining_satb_buffer(ShenandoahObjToScanQueue *q, ShenandoahMarkTask &task);
 153   void drain_satb_buffers(uint worker_id, bool remark = false);
 154   ShenandoahObjToScanQueueSet* task_queues() { return _task_queues;}
 155 
 156   jushort* get_liveness(uint worker_id);
 157 
 158   void cancel();
 159 
 160   void preclean_weak_refs();
 161 
 162 private:
 163 
 164   void weak_refs_work(bool full_gc);
 165   void weak_refs_work_doit(bool full_gc);
 166 
 167 #if TASKQUEUE_STATS
 168   static void print_taskqueue_stats_hdr(outputStream* const st);
 169   void print_taskqueue_stats() const;
 170   void reset_taskqueue_stats();
 171 #endif // TASKQUEUE_STATS
 172 
 173 };
 174 
 175 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP