1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
  26 
  27 #include "gc/shared/taskqueue.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  30 #include "gc/shenandoah/shenandoahOopClosures.hpp"
  31 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  32 
  33 class ShenandoahStrDedupQueue;
  34 
  35 class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
  36   friend class ShenandoahTraversalGC;
  37 private:
  38   ShenandoahHeap* _heap;
  39 
  40   // The per-worker-thread work queues
  41   ShenandoahObjToScanQueueSet* _task_queues;
  42 
  43   ShenandoahSharedFlag _claimed_codecache;
  44 
  45   // Used for buffering per-region liveness data.
  46   // Needed since ShenandoahHeapRegion uses atomics to update liveness.
  47   //
  48   // The array has max-workers elements, each of which is an array of
  49   // jushort * max_regions. The choice of jushort is not accidental:
  50   // there is a tradeoff between static/dynamic footprint that translates
  51   // into cache pressure (which is already high during marking), and
  52   // too many atomic updates. size_t/jint is too large, jbyte is too small.
  53   jushort** _liveness_local;
  54 
  55 private:
  56   template <class T>
  57   inline void do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task);
  58 
  59   template <class T>
  60   inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array);
  61 
  62   template <class T>
  63   inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow);
  64 
  65   inline void count_liveness(jushort* live_data, oop obj);
  66   inline void count_liveness_humongous(oop obj);
  67 
  68   // Actual mark loop with closures set up
  69   template <class T, bool CANCELLABLE>
  70   void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *t);
  71 
  72   template <bool CANCELLABLE>
  73   void mark_loop_prework(uint worker_id, ParallelTaskTerminator *terminator, ReferenceProcessor *rp,
  74                          bool class_unload, bool update_refs, bool strdedup);
  75 
  76 public:
  77   // Mark loop entry.
  78   // Translates dynamic arguments to template parameters with progressive currying.
  79   void mark_loop(uint worker_id, ParallelTaskTerminator* terminator, ReferenceProcessor *rp,
  80                  bool cancellable,
  81                  bool class_unload, bool update_refs, bool strdedup) {
  82     if (cancellable) {
  83       mark_loop_prework<true>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  84     } else {
  85       mark_loop_prework<false>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  86     }
  87   }
  88 
  89   // We need to do this later when the heap is already created.
  90   void initialize(uint workers);
  91 
  92   bool process_references() const;
  93   bool unload_classes() const;
  94 
  95   bool claim_codecache();
  96   void clear_claim_codecache();
  97 
  98   template<class T, UpdateRefsMode UPDATE_REFS>
  99   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q);
 100 
 101   template<class T, UpdateRefsMode UPDATE_REFS, bool STRING_DEDUP>
 102   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq = NULL);
 103 
 104   void mark_from_roots();
 105 
 106   // Prepares unmarked root objects by marking them and putting
 107   // them into the marking task queue.
 108   void init_mark_roots();
 109   void mark_roots(ShenandoahPhaseTimings::Phase root_phase);
 110   void update_roots(ShenandoahPhaseTimings::Phase root_phase);
 111 
 112   void shared_finish_mark_from_roots(bool full_gc);
 113   void finish_mark_from_roots();
 114   // Those are only needed public because they're called from closures.
 115 
 116   inline bool try_queue(ShenandoahObjToScanQueue* q, ShenandoahMarkTask &task);
 117 
 118   ShenandoahObjToScanQueue* get_queue(uint worker_id);
 119   void clear_queue(ShenandoahObjToScanQueue *q);
 120 
 121   ShenandoahObjToScanQueueSet* task_queues() { return _task_queues;}
 122 
 123   jushort* get_liveness(uint worker_id);
 124 
 125   void cancel();
 126 
 127   void preclean_weak_refs();
 128 
 129   void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp, bool update_ref);
 130 private:
 131 
 132   void weak_refs_work(bool full_gc);
 133   void weak_refs_work_doit(bool full_gc);
 134 
 135 #if TASKQUEUE_STATS
 136   static void print_taskqueue_stats_hdr(outputStream* const st);
 137   void print_taskqueue_stats() const;
 138   void reset_taskqueue_stats();
 139 #endif // TASKQUEUE_STATS
 140 
 141 };
 142 
 143 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP