1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
  27 
  28 class HeapRegion;
  29 class G1CollectedHeap;
  30 class G1RemSet;
  31 class ConcurrentMark;
  32 class DirtyCardToOopClosure;
  33 class CMBitMap;
  34 class CMMarkStack;
  35 class G1ParScanThreadState;
  36 class CMTask;
  37 class ReferenceProcessor;
  38 
  39 // A class that scans oops in a given heap region (much as OopsInGenClosure
  40 // scans oops in a generation.)
  41 class OopsInHeapRegionClosure: public OopsInGenClosure {
  42 protected:
  43   HeapRegion* _from;
  44 public:
  45   void set_region(HeapRegion* from) { _from = from; }
  46 };
  47 
  48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
  49 protected:
  50   G1CollectedHeap* _g1;
  51   G1RemSet* _g1_rem;
  52   ConcurrentMark* _cm;
  53   G1ParScanThreadState* _par_scan_state;
  54   uint _worker_id;
  55   bool _during_initial_mark;
  56   bool _mark_in_progress;
  57 public:
  58   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
  59   bool apply_to_weak_ref_discovered_field() { return true; }
  60 };
  61 
  62 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
  63 public:
  64   G1ParPushHeapRSClosure(G1CollectedHeap* g1,
  65                          G1ParScanThreadState* par_scan_state):
  66     G1ParClosureSuper(g1, par_scan_state) { }
  67 
  68   template <class T> void do_oop_nv(T* p);
  69   virtual void do_oop(oop* p)          { do_oop_nv(p); }
  70   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  71 };
  72 
  73 class G1ParScanClosure : public G1ParClosureSuper {
  74 public:
  75   G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
  76     G1ParClosureSuper(g1, par_scan_state)
  77   {
  78     assert(_ref_processor == NULL, "sanity");
  79     _ref_processor = rp;
  80   }
  81 
  82   template <class T> void do_oop_nv(T* p);
  83   virtual void do_oop(oop* p)          { do_oop_nv(p); }
  84   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  85 };
  86 
  87 #define G1_PARTIAL_ARRAY_MASK 0x2
  88 
  89 template <class T> inline bool has_partial_array_mask(T* ref) {
  90   return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
  91 }
  92 
  93 template <class T> inline T* set_partial_array_mask(T obj) {
  94   assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
  95   return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
  96 }
  97 
  98 template <class T> inline oop clear_partial_array_mask(T* ref) {
  99   return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
 100 }
 101 
 102 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
 103   G1ParScanClosure _scanner;
 104 
 105 public:
 106   G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
 107     G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
 108   {
 109     assert(_ref_processor == NULL, "sanity");
 110   }
 111 
 112   G1ParScanClosure* scanner() {
 113     return &_scanner;
 114   }
 115 
 116   template <class T> void do_oop_nv(T* p);
 117   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 118   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 119 };
 120 
 121 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 122 class G1ParCopyClosure : public G1ParClosureSuper {
 123   G1ParScanClosure _scanner;
 124   template <class T> void do_oop_work(T* p);
 125 
 126 protected:
 127   // Mark the object if it's not already marked. This is used to mark
 128   // objects pointed to by roots that are guaranteed not to move
 129   // during the GC (i.e., non-CSet objects). It is MT-safe.
 130   void mark_object(oop obj);
 131 
 132   // Mark the object if it's not already marked. This is used to mark
 133   // objects pointed to by roots that have been forwarded during a
 134   // GC. It is MT-safe.
 135   void mark_forwarded_object(oop from_obj, oop to_obj);
 136 
 137   oop copy_to_survivor_space(oop obj);
 138 
 139 public:
 140   G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
 141                    ReferenceProcessor* rp) :
 142       _scanner(g1, par_scan_state, rp),
 143       G1ParClosureSuper(g1, par_scan_state) {
 144     assert(_ref_processor == NULL, "sanity");
 145   }
 146 
 147   G1ParScanClosure* scanner() { return &_scanner; }
 148 
 149   template <class T> void do_oop_nv(T* p) {
 150     do_oop_work(p);
 151   }
 152   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 153   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 154 };
 155 
 156 typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
 157 typedef G1ParCopyClosure<true,  G1BarrierNone, false> G1ParScanPermClosure;
 158 
 159 typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
 160 typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkPermClosure;
 161 
 162 // The following closure types are no longer used but are retained
 163 // for historical reasons:
 164 // typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
 165 // typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
 166 
 167 // The following closure type is defined in g1_specialized_oop_closures.hpp:
 168 //
 169 // typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 170 
 171 // We use a separate closure to handle references during evacuation
 172 // failure processing.
 173 // We could have used another instance of G1ParScanHeapEvacClosure
 174 // (since that closure no longer assumes that the references it
 175 // handles point into the collection set).
 176 
 177 typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 178 
 179 class FilterIntoCSClosure: public OopClosure {
 180   G1CollectedHeap* _g1;
 181   OopClosure* _oc;
 182   DirtyCardToOopClosure* _dcto_cl;
 183 public:
 184   FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
 185                         G1CollectedHeap* g1,
 186                         OopClosure* oc) :
 187     _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
 188 
 189   template <class T> void do_oop_nv(T* p);
 190   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 191   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 192   bool apply_to_weak_ref_discovered_field() { return true; }
 193   bool do_header() { return false; }
 194 };
 195 
 196 class FilterOutOfRegionClosure: public OopClosure {
 197   HeapWord* _r_bottom;
 198   HeapWord* _r_end;
 199   OopClosure* _oc;
 200   int _out_of_region;
 201 public:
 202   FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
 203   template <class T> void do_oop_nv(T* p);
 204   virtual void do_oop(oop* p) { do_oop_nv(p); }
 205   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 206   bool apply_to_weak_ref_discovered_field() { return true; }
 207   bool do_header() { return false; }
 208   int out_of_region() { return _out_of_region; }
 209 };
 210 
 211 // Closure for iterating over object fields during concurrent marking
 212 class G1CMOopClosure : public OopClosure {
 213 private:
 214   G1CollectedHeap*   _g1h;
 215   ConcurrentMark*    _cm;
 216   CMTask*            _task;
 217 public:
 218   G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
 219   template <class T> void do_oop_nv(T* p);
 220   virtual void do_oop(      oop* p) { do_oop_nv(p); }
 221   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 222 };
 223 
 224 // Closure to scan the root regions during concurrent marking
 225 class G1RootRegionScanClosure : public OopClosure {
 226 private:
 227   G1CollectedHeap* _g1h;
 228   ConcurrentMark*  _cm;
 229   uint _worker_id;
 230 public:
 231   G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
 232                           uint worker_id) :
 233     _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
 234   template <class T> void do_oop_nv(T* p);
 235   virtual void do_oop(      oop* p) { do_oop_nv(p); }
 236   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 237 };
 238 
 239 // Closure that applies the given two closures in sequence.
 240 // Used by the RSet refinement code (when updating RSets
 241 // during an evacuation pause) to record cards containing
 242 // pointers into the collection set.
 243 
 244 class G1Mux2Closure : public OopClosure {
 245   OopClosure* _c1;
 246   OopClosure* _c2;
 247 public:
 248   G1Mux2Closure(OopClosure *c1, OopClosure *c2);
 249   template <class T> void do_oop_nv(T* p);
 250   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 251   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 252 };
 253 
 254 // A closure that returns true if it is actually applied
 255 // to a reference
 256 
 257 class G1TriggerClosure : public OopClosure {
 258   bool _triggered;
 259 public:
 260   G1TriggerClosure();
 261   bool triggered() const { return _triggered; }
 262   template <class T> void do_oop_nv(T* p);
 263   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 264   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 265 };
 266 
 267 // A closure which uses a triggering closure to determine
 268 // whether to apply an oop closure.
 269 
 270 class G1InvokeIfNotTriggeredClosure: public OopClosure {
 271   G1TriggerClosure* _trigger_cl;
 272   OopClosure* _oop_cl;
 273 public:
 274   G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
 275   template <class T> void do_oop_nv(T* p);
 276   virtual void do_oop(oop* p)        { do_oop_nv(p); }
 277   virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 278 };
 279 
 280 class G1UpdateRSOrPushRefOopClosure: public OopClosure {
 281   G1CollectedHeap* _g1;
 282   G1RemSet* _g1_rem_set;
 283   HeapRegion* _from;
 284   OopsInHeapRegionClosure* _push_ref_cl;
 285   bool _record_refs_into_cset;
 286   int _worker_i;
 287 
 288 public:
 289   G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
 290                                 G1RemSet* rs,
 291                                 OopsInHeapRegionClosure* push_ref_cl,
 292                                 bool record_refs_into_cset,
 293                                 int worker_i = 0);
 294 
 295   void set_from(HeapRegion* from) {
 296     assert(from != NULL, "from region must be non-NULL");
 297     _from = from;
 298   }
 299 
 300   bool self_forwarded(oop obj) {
 301     bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
 302     return result;
 303   }
 304 
 305   bool apply_to_weak_ref_discovered_field() { return true; }
 306 
 307   template <class T> void do_oop_nv(T* p);
 308   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 309   virtual void do_oop(oop* p)       { do_oop_nv(p); }
 310 };
 311 
 312 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP