1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
  27 
  28 class HeapRegion;
  29 class G1CollectedHeap;
  30 class G1RemSet;
  31 class ConcurrentMark;
  32 class DirtyCardToOopClosure;
  33 class CMBitMap;
  34 class CMMarkStack;
  35 class G1ParScanThreadState;
  36 class CMTask;
  37 class ReferenceProcessor;
  38 
  39 // A class that scans oops in a given heap region (much as OopsInGenClosure
  40 // scans oops in a generation.)
  41 class OopsInHeapRegionClosure: public ExtendedOopClosure {
  42 protected:
  43   HeapRegion* _from;
  44 public:
  45   void set_region(HeapRegion* from) { _from = from; }
  46 };
  47 
  48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
  49 protected:
  50   G1CollectedHeap* _g1;
  51   G1RemSet* _g1_rem;
  52   ConcurrentMark* _cm;
  53   G1ParScanThreadState* _par_scan_state;
  54   uint _worker_id;
  55   bool _during_initial_mark;
  56   bool _mark_in_progress;
  57 public:
  58   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
  59   bool apply_to_weak_ref_discovered_field() { return true; }
  60 };
  61 
  62 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
  63 public:
  64   G1ParPushHeapRSClosure(G1CollectedHeap* g1,
  65                          G1ParScanThreadState* par_scan_state):
  66     G1ParClosureSuper(g1, par_scan_state) { }
  67 
  68   template <class T> void do_oop_nv(T* p);
  69   virtual void do_oop(oop* p)          { do_oop_nv(p); }
  70   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  71 };
  72 
  73 class G1ParScanClosure : public G1ParClosureSuper {
  74 public:
  75   G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
  76     G1ParClosureSuper(g1, par_scan_state)
  77   {
  78     assert(_ref_processor == NULL, "sanity");
  79     _ref_processor = rp;
  80   }
  81 
  82   template <class T> void do_oop_nv(T* p);
  83   virtual void do_oop(oop* p)          { do_oop_nv(p); }
  84   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  85 };
  86 
  87 #define G1_PARTIAL_ARRAY_MASK 0x2
  88 
  89 inline bool has_partial_array_mask(oop* ref) {
  90   return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
  91 }
  92 
  93 // We never encode partial array oops as narrowOop*, so return false immediately.
  94 // This allows the compiler to create optimized code when popping references from
  95 // the work queue.
  96 inline bool has_partial_array_mask(narrowOop* ref) {
  97   assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
  98   return false;
  99 }
 100 
 101 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
 102 // We always encode partial arrays as regular oop, to allow the
 103 // specialization for has_partial_array_mask() for narrowOops above.
 104 // This means that unintentional use of this method with narrowOops are caught
 105 // by the compiler.
 106 inline oop* set_partial_array_mask(oop obj) {
 107   assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
 108   return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
 109 }
 110 
 111 template <class T> inline oop clear_partial_array_mask(T* ref) {
 112   return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
 113 }
 114 
 115 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
 116   G1ParScanClosure _scanner;
 117 
 118 public:
 119   G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
 120     G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
 121   {
 122     assert(_ref_processor == NULL, "sanity");
 123   }
 124 
 125   G1ParScanClosure* scanner() {
 126     return &_scanner;
 127   }
 128 
 129   template <class T> void do_oop_nv(T* p);
 130   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 131   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 132 };
 133 
 134 // Add back base class for metadata
 135 class G1ParCopyHelper : public G1ParClosureSuper {
 136   Klass* _scanned_klass;
 137 
 138  public:
 139   G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
 140       _scanned_klass(NULL),
 141       G1ParClosureSuper(g1, par_scan_state) {}
 142 
 143   void set_scanned_klass(Klass* k) { _scanned_klass = k; }
 144   template <class T> void do_klass_barrier(T* p, oop new_obj);
 145 };
 146 
 147 template <G1Barrier barrier, bool do_mark_object>
 148 class G1ParCopyClosure : public G1ParCopyHelper {
 149   G1ParScanClosure _scanner;
 150   template <class T> void do_oop_work(T* p);
 151 
 152 protected:
 153   // Mark the object if it's not already marked. This is used to mark
 154   // objects pointed to by roots that are guaranteed not to move
 155   // during the GC (i.e., non-CSet objects). It is MT-safe.
 156   void mark_object(oop obj);
 157 
 158   // Mark the object if it's not already marked. This is used to mark
 159   // objects pointed to by roots that have been forwarded during a
 160   // GC. It is MT-safe.
 161   void mark_forwarded_object(oop from_obj, oop to_obj);
 162 
 163   oop copy_to_survivor_space(oop obj);
 164 
 165 public:
 166   G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
 167                    ReferenceProcessor* rp) :
 168       _scanner(g1, par_scan_state, rp),
 169       G1ParCopyHelper(g1, par_scan_state) {
 170     assert(_ref_processor == NULL, "sanity");
 171   }
 172 
 173   G1ParScanClosure* scanner() { return &_scanner; }
 174 
 175   template <class T> void do_oop_nv(T* p) {
 176     do_oop_work(p);
 177   }
 178   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 179   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 180 };
 181 
 182 typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
 183 typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
 184 
 185 
 186 typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
 187 typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
 188 
 189 // The following closure type is defined in g1_specialized_oop_closures.hpp:
 190 //
 191 // typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 192 
 193 // We use a separate closure to handle references during evacuation
 194 // failure processing.
 195 // We could have used another instance of G1ParScanHeapEvacClosure
 196 // (since that closure no longer assumes that the references it
 197 // handles point into the collection set).
 198 
 199 typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 200 
 201 class FilterIntoCSClosure: public ExtendedOopClosure {
 202   G1CollectedHeap* _g1;
 203   OopClosure* _oc;
 204   DirtyCardToOopClosure* _dcto_cl;
 205 public:
 206   FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
 207                         G1CollectedHeap* g1,
 208                         OopClosure* oc) :
 209     _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
 210 
 211   template <class T> void do_oop_nv(T* p);
 212   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 213   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 214   bool apply_to_weak_ref_discovered_field() { return true; }
 215 };
 216 
 217 class FilterOutOfRegionClosure: public ExtendedOopClosure {
 218   HeapWord* _r_bottom;
 219   HeapWord* _r_end;
 220   OopClosure* _oc;
 221 public:
 222   FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
 223   template <class T> void do_oop_nv(T* p);
 224   virtual void do_oop(oop* p) { do_oop_nv(p); }
 225   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 226   bool apply_to_weak_ref_discovered_field() { return true; }
 227 };
 228 
 229 // Closure for iterating over object fields during concurrent marking
 230 class G1CMOopClosure : public ExtendedOopClosure {
 231 private:
 232   G1CollectedHeap*   _g1h;
 233   ConcurrentMark*    _cm;
 234   CMTask*            _task;
 235 public:
 236   G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
 237   template <class T> void do_oop_nv(T* p);
 238   virtual void do_oop(      oop* p) { do_oop_nv(p); }
 239   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 240 };
 241 
 242 // Closure to scan the root regions during concurrent marking
 243 class G1RootRegionScanClosure : public ExtendedOopClosure {
 244 private:
 245   G1CollectedHeap* _g1h;
 246   ConcurrentMark*  _cm;
 247   uint _worker_id;
 248 public:
 249   G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
 250                           uint worker_id) :
 251     _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
 252   template <class T> void do_oop_nv(T* p);
 253   virtual void do_oop(      oop* p) { do_oop_nv(p); }
 254   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 255 };
 256 
 257 // Closure that applies the given two closures in sequence.
 258 // Used by the RSet refinement code (when updating RSets
 259 // during an evacuation pause) to record cards containing
 260 // pointers into the collection set.
 261 
 262 class G1Mux2Closure : public ExtendedOopClosure {
 263   OopClosure* _c1;
 264   OopClosure* _c2;
 265 public:
 266   G1Mux2Closure(OopClosure *c1, OopClosure *c2);
 267   template <class T> void do_oop_nv(T* p);
 268   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 269   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 270 };
 271 
 272 // A closure that returns true if it is actually applied
 273 // to a reference
 274 
 275 class G1TriggerClosure : public ExtendedOopClosure {
 276   bool _triggered;
 277 public:
 278   G1TriggerClosure();
 279   bool triggered() const { return _triggered; }
 280   template <class T> void do_oop_nv(T* p);
 281   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 282   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 283 };
 284 
 285 // A closure which uses a triggering closure to determine
 286 // whether to apply an oop closure.
 287 
 288 class G1InvokeIfNotTriggeredClosure: public ExtendedOopClosure {
 289   G1TriggerClosure* _trigger_cl;
 290   OopClosure* _oop_cl;
 291 public:
 292   G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
 293   template <class T> void do_oop_nv(T* p);
 294   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 295   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 296 };
 297 
 298 class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
 299   G1CollectedHeap* _g1;
 300   G1RemSet* _g1_rem_set;
 301   HeapRegion* _from;
 302   OopsInHeapRegionClosure* _push_ref_cl;
 303   bool _record_refs_into_cset;
 304   int _worker_i;
 305 
 306 public:
 307   G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
 308                                 G1RemSet* rs,
 309                                 OopsInHeapRegionClosure* push_ref_cl,
 310                                 bool record_refs_into_cset,
 311                                 int worker_i = 0);
 312 
 313   void set_from(HeapRegion* from) {
 314     assert(from != NULL, "from region must be non-NULL");
 315     _from = from;
 316   }
 317 
 318   bool self_forwarded(oop obj) {
 319     bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
 320     return result;
 321   }
 322 
 323   bool apply_to_weak_ref_discovered_field() { return true; }
 324 
 325   template <class T> void do_oop_nv(T* p);
 326   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 327   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 328 };
 329 
 330 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP