1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
  27 
  28 class HeapRegion;
  29 class G1CollectedHeap;
  30 class G1RemSet;
  31 class ConcurrentMark;
  32 class DirtyCardToOopClosure;
  33 class CMBitMap;
  34 class CMMarkStack;
  35 class G1ParScanThreadState;
  36 class CMTask;
  37 class ReferenceProcessor;
  38 
  39 // A class that scans oops in a given heap region (much as OopsInGenClosure
  40 // scans oops in a generation.)
  41 class OopsInHeapRegionClosure: public ExtendedOopClosure {
  42 protected:
  43   HeapRegion* _from;
  44 public:
  45   void set_region(HeapRegion* from) { _from = from; }
  46 };
  47 
  48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
  49 protected:
  50   G1CollectedHeap* _g1;
  51   G1ParScanThreadState* _par_scan_state;
  52   uint _worker_id;
  53 public:
  54   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
  55   bool apply_to_weak_ref_discovered_field() { return true; }
  56 };
  57 
  58 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
  59 public:
  60   G1ParPushHeapRSClosure(G1CollectedHeap* g1,
  61                          G1ParScanThreadState* par_scan_state):
  62     G1ParClosureSuper(g1, par_scan_state) { }
  63 
  64   template <class T> void do_oop_nv(T* p);
  65   virtual void do_oop(oop* p)          { do_oop_nv(p); }
  66   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  67 };
  68 
  69 class G1ParScanClosure : public G1ParClosureSuper {
  70 public:
  71   G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
  72     G1ParClosureSuper(g1, par_scan_state)
  73   {
  74     assert(_ref_processor == NULL, "sanity");
  75     _ref_processor = rp;
  76   }
  77 
  78   template <class T> void do_oop_nv(T* p);
  79   virtual void do_oop(oop* p)          { do_oop_nv(p); }
  80   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  81 };
  82 
  83 #define G1_PARTIAL_ARRAY_MASK 0x2
  84 
  85 inline bool has_partial_array_mask(oop* ref) {
  86   return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
  87 }
  88 
  89 // We never encode partial array oops as narrowOop*, so return false immediately.
  90 // This allows the compiler to create optimized code when popping references from
  91 // the work queue.
  92 inline bool has_partial_array_mask(narrowOop* ref) {
  93   assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
  94   return false;
  95 }
  96 
  97 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
  98 // We always encode partial arrays as regular oop, to allow the
  99 // specialization for has_partial_array_mask() for narrowOops above.
 100 // This means that unintentional use of this method with narrowOops are caught
 101 // by the compiler.
 102 inline oop* set_partial_array_mask(oop obj) {
 103   assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
 104   return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
 105 }
 106 
 107 template <class T> inline oop clear_partial_array_mask(T* ref) {
 108   return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
 109 }
 110 
 111 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
 112   G1ParScanClosure _scanner;
 113 
 114 public:
 115   G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
 116     G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
 117   {
 118     assert(_ref_processor == NULL, "sanity");
 119   }
 120 
 121   G1ParScanClosure* scanner() {
 122     return &_scanner;
 123   }
 124 
 125   template <class T> void do_oop_nv(T* p);
 126   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 127   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 128 };
 129 
 130 // Add back base class for metadata
 131 class G1ParCopyHelper : public G1ParClosureSuper {
 132 protected:
 133   Klass* _scanned_klass;
 134   ConcurrentMark* _cm;
 135 
 136   // Mark the object if it's not already marked. This is used to mark
 137   // objects pointed to by roots that are guaranteed not to move
 138   // during the GC (i.e., non-CSet objects). It is MT-safe.
 139   void mark_object(oop obj);
 140 
 141   // Mark the object if it's not already marked. This is used to mark
 142   // objects pointed to by roots that have been forwarded during a
 143   // GC. It is MT-safe.
 144   void mark_forwarded_object(oop from_obj, oop to_obj);
 145  public:
 146   G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state);
 147 
 148   void set_scanned_klass(Klass* k) { _scanned_klass = k; }
 149   template <class T> void do_klass_barrier(T* p, oop new_obj);
 150 };
 151 
 152 template <G1Barrier barrier, bool do_mark_object>
 153 class G1ParCopyClosure : public G1ParCopyHelper {
 154   G1ParScanClosure _scanner;
 155   template <class T> void do_oop_work(T* p);
 156 
 157 protected:
 158   oop copy_to_survivor_space(oop obj);
 159 
 160 public:
 161   G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
 162                    ReferenceProcessor* rp) :
 163       _scanner(g1, par_scan_state, rp),
 164       G1ParCopyHelper(g1, par_scan_state) {
 165     assert(_ref_processor == NULL, "sanity");
 166   }
 167 
 168   G1ParScanClosure* scanner() { return &_scanner; }
 169 
 170   template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
 171   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 172   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 173 };
 174 
 175 typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
 176 typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
 177 
 178 
 179 typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
 180 typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
 181 
 182 // The following closure type is defined in g1_specialized_oop_closures.hpp:
 183 //
 184 // typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 185 
 186 // We use a separate closure to handle references during evacuation
 187 // failure processing.
 188 // We could have used another instance of G1ParScanHeapEvacClosure
 189 // (since that closure no longer assumes that the references it
 190 // handles point into the collection set).
 191 
 192 typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 193 
 194 class FilterIntoCSClosure: public ExtendedOopClosure {
 195   G1CollectedHeap* _g1;
 196   OopClosure* _oc;
 197   DirtyCardToOopClosure* _dcto_cl;
 198 public:
 199   FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
 200                         G1CollectedHeap* g1,
 201                         OopClosure* oc) :
 202     _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
 203 
 204   template <class T> void do_oop_nv(T* p);
 205   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 206   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 207   bool apply_to_weak_ref_discovered_field() { return true; }
 208 };
 209 
 210 class FilterOutOfRegionClosure: public ExtendedOopClosure {
 211   HeapWord* _r_bottom;
 212   HeapWord* _r_end;
 213   OopClosure* _oc;
 214 public:
 215   FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
 216   template <class T> void do_oop_nv(T* p);
 217   virtual void do_oop(oop* p) { do_oop_nv(p); }
 218   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 219   bool apply_to_weak_ref_discovered_field() { return true; }
 220 };
 221 
 222 // Closure for iterating over object fields during concurrent marking
 223 class G1CMOopClosure : public ExtendedOopClosure {
 224 private:
 225   G1CollectedHeap*   _g1h;
 226   ConcurrentMark*    _cm;
 227   CMTask*            _task;
 228 public:
 229   G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
 230   template <class T> void do_oop_nv(T* p);
 231   virtual void do_oop(      oop* p) { do_oop_nv(p); }
 232   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 233 };
 234 
 235 // Closure to scan the root regions during concurrent marking
 236 class G1RootRegionScanClosure : public ExtendedOopClosure {
 237 private:
 238   G1CollectedHeap* _g1h;
 239   ConcurrentMark*  _cm;
 240   uint _worker_id;
 241 public:
 242   G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
 243                           uint worker_id) :
 244     _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
 245   template <class T> void do_oop_nv(T* p);
 246   virtual void do_oop(      oop* p) { do_oop_nv(p); }
 247   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 248 };
 249 
 250 // Closure that applies the given two closures in sequence.
 251 // Used by the RSet refinement code (when updating RSets
 252 // during an evacuation pause) to record cards containing
 253 // pointers into the collection set.
 254 
 255 class G1Mux2Closure : public ExtendedOopClosure {
 256   OopClosure* _c1;
 257   OopClosure* _c2;
 258 public:
 259   G1Mux2Closure(OopClosure *c1, OopClosure *c2);
 260   template <class T> void do_oop_nv(T* p);
 261   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 262   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 263 };
 264 
 265 // A closure that returns true if it is actually applied
 266 // to a reference
 267 
 268 class G1TriggerClosure : public ExtendedOopClosure {
 269   bool _triggered;
 270 public:
 271   G1TriggerClosure();
 272   bool triggered() const { return _triggered; }
 273   template <class T> void do_oop_nv(T* p);
 274   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 275   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 276 };
 277 
 278 // A closure which uses a triggering closure to determine
 279 // whether to apply an oop closure.
 280 
 281 class G1InvokeIfNotTriggeredClosure: public ExtendedOopClosure {
 282   G1TriggerClosure* _trigger_cl;
 283   OopClosure* _oop_cl;
 284 public:
 285   G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
 286   template <class T> void do_oop_nv(T* p);
 287   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 288   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 289 };
 290 
 291 class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
 292   G1CollectedHeap* _g1;
 293   G1RemSet* _g1_rem_set;
 294   HeapRegion* _from;
 295   OopsInHeapRegionClosure* _push_ref_cl;
 296   bool _record_refs_into_cset;
 297   int _worker_i;
 298 
 299 public:
 300   G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
 301                                 G1RemSet* rs,
 302                                 OopsInHeapRegionClosure* push_ref_cl,
 303                                 bool record_refs_into_cset,
 304                                 int worker_i = 0);
 305 
 306   void set_from(HeapRegion* from) {
 307     assert(from != NULL, "from region must be non-NULL");
 308     _from = from;
 309   }
 310 
 311   bool self_forwarded(oop obj) {
 312     bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
 313     return result;
 314   }
 315 
 316   bool apply_to_weak_ref_discovered_field() { return true; }
 317 
 318   template <class T> void do_oop_nv(T* p);
 319   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 320   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 321 };
 322 
 323 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP