1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
  26 
  27 #include "utilities/taskqueue.hpp"
  28 #include "utilities/workgroup.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  31 
  32 class ShenandoahStrDedupQueue;
  33 
  34 class ShenandoahConcurrentMark: public CHeapObj<mtGC> {
  35 
  36 private:
  37   ShenandoahHeap* _heap;
  38 
  39   // The per-worker-thread work queues
  40   ShenandoahObjToScanQueueSet* _task_queues;
  41 
  42   ShenandoahSharedFlag _claimed_codecache;
  43 
  44   // Used for buffering per-region liveness data.
  45   // Needed since ShenandoahHeapRegion uses atomics to update liveness.
  46   //
  47   // The array has max-workers elements, each of which is an array of
  48   // jushort * max_regions. The choice of jushort is not accidental:
  49   // there is a tradeoff between static/dynamic footprint that translates
  50   // into cache pressure (which is already high during marking), and
  51   // too many atomic updates. size_t/jint is too large, jbyte is too small.
  52   jushort** _liveness_local;
  53 
  54 private:
  55   template <class T>
  56   inline void do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task);
  57 
  58   template <class T>
  59   inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array);
  60 
  61   template <class T>
  62   inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow);
  63 
  64   inline void count_liveness(jushort* live_data, oop obj);
  65   inline void count_liveness_humongous(oop obj);
  66 
  67   // Actual mark loop with closures set up
  68   template <class T, bool CANCELLABLE>
  69   void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *t);
  70 
  71   template <bool CANCELLABLE>
  72   void mark_loop_prework(uint worker_id, ParallelTaskTerminator *terminator, ReferenceProcessor *rp,
  73                          bool class_unload, bool update_refs, bool strdedup);
  74 
  75 public:
  76   // Mark loop entry.
  77   // Translates dynamic arguments to template parameters with progressive currying.
  78   void mark_loop(uint worker_id, ParallelTaskTerminator* terminator, ReferenceProcessor *rp,
  79                  bool cancellable,
  80                  bool class_unload, bool update_refs, bool strdedup) {
  81     if (cancellable) {
  82       mark_loop_prework<true>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  83     } else {
  84       mark_loop_prework<false>(worker_id, terminator, rp, class_unload, update_refs, strdedup);
  85     }
  86   }
  87 
  88   // We need to do this later when the heap is already created.
  89   void initialize(uint workers);
  90 
  91   bool process_references() const;
  92   bool unload_classes() const;
  93 
  94   bool claim_codecache();
  95   void clear_claim_codecache();
  96 
  97   template<class T, UpdateRefsMode UPDATE_REFS>
  98   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context);
  99 
 100   template<class T, UpdateRefsMode UPDATE_REFS, bool STRING_DEDUP>
 101   static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, ShenandoahStrDedupQueue* dq = NULL);
 102 
 103   void mark_from_roots();
 104 
 105   // Prepares unmarked root objects by marking them and putting
 106   // them into the marking task queue.
 107   void init_mark_roots();
 108   void mark_roots(ShenandoahPhaseTimings::Phase root_phase);
 109   void update_roots(ShenandoahPhaseTimings::Phase root_phase);
 110 
 111   void shared_finish_mark_from_roots(bool full_gc);
 112   void finish_mark_from_roots();
 113   // Those are only needed public because they're called from closures.
 114 
 115   inline bool try_queue(ShenandoahObjToScanQueue* q, ShenandoahMarkTask &task);
 116 
 117   ShenandoahObjToScanQueue* get_queue(uint worker_id);
 118   void clear_queue(ShenandoahObjToScanQueue *q);
 119 
 120   ShenandoahObjToScanQueueSet* task_queues() { return _task_queues;}
 121 
 122   jushort* get_liveness(uint worker_id);
 123 
 124   void cancel();
 125 
 126   void preclean_weak_refs();
 127 
 128   void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp, bool update_ref);
 129 private:
 130 
 131   void weak_refs_work(bool full_gc);
 132   void weak_refs_work_doit(bool full_gc);
 133 };
 134 
 135 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP