Print this page
rev 2585 : [mq]: g1-reference-processing

Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/g1OopClosures.hpp
          +++ new/src/share/vm/gc_implementation/g1/g1OopClosures.hpp
↓ open down ↓ 26 lines elided ↑ open up ↑
  27   27  
  28   28  class HeapRegion;
  29   29  class G1CollectedHeap;
  30   30  class G1RemSet;
  31   31  class ConcurrentMark;
  32   32  class DirtyCardToOopClosure;
  33   33  class CMBitMap;
  34   34  class CMMarkStack;
  35   35  class G1ParScanThreadState;
  36   36  class CMTask;
       37 +class ReferenceProcessor;
  37   38  
  38   39  // A class that scans oops in a given heap region (much as OopsInGenClosure
  39   40  // scans oops in a generation.)
  40   41  class OopsInHeapRegionClosure: public OopsInGenClosure {
  41   42  protected:
  42   43    HeapRegion* _from;
  43   44  public:
  44   45    void set_region(HeapRegion* from) { _from = from; }
  45   46  };
  46   47  
↓ open down ↓ 3 lines elided ↑ open up ↑
  50   51    G1RemSet* _g1_rem;
  51   52    ConcurrentMark* _cm;
  52   53    G1ParScanThreadState* _par_scan_state;
  53   54  public:
  54   55    G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
  55   56    bool apply_to_weak_ref_discovered_field() { return true; }
  56   57  };
  57   58  
  58   59  class G1ParPushHeapRSClosure : public G1ParClosureSuper {
  59   60  public:
  60      -  G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  61      -    G1ParClosureSuper(g1, par_scan_state) { }
       61 +  G1ParPushHeapRSClosure(G1CollectedHeap* g1,
       62 +                         G1ParScanThreadState* par_scan_state,
       63 +                         ReferenceProcessor* rp) :
       64 +    G1ParClosureSuper(g1, par_scan_state)
       65 +  {
       66 +    assert(_ref_processor == NULL, "sanity");
       67 +    _ref_processor = rp;
       68 +  }
       69 +
  62   70    template <class T> void do_oop_nv(T* p);
  63   71    virtual void do_oop(oop* p)          { do_oop_nv(p); }
  64   72    virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  65   73  };
  66   74  
  67   75  class G1ParScanClosure : public G1ParClosureSuper {
  68   76  public:
  69      -  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  70      -    G1ParClosureSuper(g1, par_scan_state) { }
       77 +  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
       78 +    G1ParClosureSuper(g1, par_scan_state)
       79 +  {
       80 +    assert(_ref_processor == NULL, "sanity");
       81 +    _ref_processor = rp;
       82 +  }
       83 +
  71   84    template <class T> void do_oop_nv(T* p);
  72   85    virtual void do_oop(oop* p)          { do_oop_nv(p); }
  73   86    virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
  74   87  };
  75   88  
  76   89  #define G1_PARTIAL_ARRAY_MASK 0x2
  77   90  
  78   91  template <class T> inline bool has_partial_array_mask(T* ref) {
  79   92    return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
  80   93  }
↓ open down ↓ 2 lines elided ↑ open up ↑
  83   96    assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
  84   97    return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
  85   98  }
  86   99  
  87  100  template <class T> inline oop clear_partial_array_mask(T* ref) {
  88  101    return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
  89  102  }
  90  103  
  91  104  class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
  92  105    G1ParScanClosure _scanner;
      106 +
  93  107  public:
  94      -  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
  95      -    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
      108 +  G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
      109 +    G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
      110 +  {
      111 +    assert(_ref_processor == NULL, "sanity");
      112 +  }
      113 +
      114 +  G1ParScanClosure* scanner() {
      115 +    return &_scanner;
      116 +  }
      117 +
  96  118    template <class T> void do_oop_nv(T* p);
  97  119    virtual void do_oop(oop* p)       { do_oop_nv(p); }
  98  120    virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
  99  121  };
 100  122  
 101  123  
 102  124  class G1ParCopyHelper : public G1ParClosureSuper {
 103  125    G1ParScanClosure *_scanner;
 104  126  protected:
 105  127    template <class T> void mark_forwardee(T* p);
 106  128    oop copy_to_survivor_space(oop obj);
      129 +
 107  130  public:
 108  131    G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
 109  132                    G1ParScanClosure *scanner) :
 110  133      G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
 111  134  };
 112  135  
 113      -template<bool do_gen_barrier, G1Barrier barrier,
 114      -         bool do_mark_forwardee>
      136 +template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
 115  137  class G1ParCopyClosure : public G1ParCopyHelper {
 116  138    G1ParScanClosure _scanner;
      139 +
 117  140    template <class T> void do_oop_work(T* p);
      141 +
 118  142  public:
 119      -  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
 120      -    _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
      143 +  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
      144 +                   ReferenceProcessor* rp) :
      145 +    _scanner(g1, par_scan_state, rp),
      146 +    G1ParCopyHelper(g1, par_scan_state, &_scanner)
      147 +  {
      148 +    assert(_ref_processor == NULL, "sanity");
      149 +  }
      150 +
      151 +  G1ParScanClosure* scanner() { return &_scanner; }
      152 +
 121  153    template <class T> void do_oop_nv(T* p) {
 122  154      do_oop_work(p);
 123  155      if (do_mark_forwardee)
 124  156        mark_forwardee(p);
 125  157    }
 126  158    virtual void do_oop(oop* p)       { do_oop_nv(p); }
 127  159    virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 128  160  };
 129  161  
 130  162  typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
 131  163  typedef G1ParCopyClosure<true,  G1BarrierNone, false> G1ParScanPermClosure;
 132      -typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
      164 +
 133  165  typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
 134  166  typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkPermClosure;
 135      -typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
 136  167  
 137      -// This is the only case when we set skip_cset_test. Basically, this
 138      -// closure is (should?) only be called directly while we're draining
 139      -// the overflow and task queues. In that case we know that the
 140      -// reference in question points into the collection set, otherwise we
 141      -// would not have pushed it on the queue. The following is defined in
 142      -// g1_specialized_oop_closures.hpp.
 143      -// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
 144      -// We need a separate closure to handle references during evacuation
 145      -// failure processing, as we cannot asume that the reference already
 146      -// points into the collection set (like G1ParScanHeapEvacClosure does).
      168 +// The following closure types are no longer used but are retained
      169 +// for historical reasons:
      170 +// typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
      171 +// typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
      172 +
      173 +// The following closure type is defined in g1_specialized_oop_closures.hpp:
      174 +//
      175 +// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
      176 +
      177 +// We use a separate closure to handle references during evacuation
      178 +// failure processing.
      179 +// We could have used another instance of G1ParScanHeapEvacClosure
      180 +// (since that closure no longer assumes that the references it
      181 +// handles point into the collection set).
      182 +
 147  183  typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 148  184  
 149  185  class FilterIntoCSClosure: public OopClosure {
 150  186    G1CollectedHeap* _g1;
 151  187    OopClosure* _oc;
 152  188    DirtyCardToOopClosure* _dcto_cl;
 153  189  public:
 154  190    FilterIntoCSClosure(  DirtyCardToOopClosure* dcto_cl,
 155      -                        G1CollectedHeap* g1, OopClosure* oc) :
      191 +                        G1CollectedHeap* g1,
      192 +                        OopClosure* oc,
      193 +                        ReferenceProcessor* rp) :
 156  194      _dcto_cl(dcto_cl), _g1(g1), _oc(oc)
 157      -  {}
      195 +  {
      196 +    assert(_ref_processor == NULL, "sanity");
      197 +    _ref_processor = rp;
      198 +  }
      199 +
 158  200    template <class T> void do_oop_nv(T* p);
 159  201    virtual void do_oop(oop* p)        { do_oop_nv(p); }
 160  202    virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
 161  203    bool apply_to_weak_ref_discovered_field() { return true; }
 162  204    bool do_header() { return false; }
 163  205  };
 164  206  
 165  207  class FilterOutOfRegionClosure: public OopClosure {
 166  208    HeapWord* _r_bottom;
 167  209    HeapWord* _r_end;
↓ open down ↓ 25 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX