< prev index next >

src/hotspot/share/gc/cms/cmsOopClosures.hpp

Print this page




  27 
  28 #include "gc/shared/genOopClosures.hpp"
  29 #include "gc/shared/taskqueue.hpp"
  30 #include "memory/iterator.hpp"
  31 
  32 /////////////////////////////////////////////////////////////////
  33 // Closures used by ConcurrentMarkSweepGeneration's collector
  34 /////////////////////////////////////////////////////////////////
  35 class ConcurrentMarkSweepGeneration;
  36 class CMSBitMap;
  37 class CMSMarkStack;
  38 class CMSCollector;
  39 class MarkFromRootsClosure;
  40 class ParMarkFromRootsClosure;
  41 
  42 // Decode the oop and call do_oop on it.
  43 #define DO_OOP_WORK_DEFN                             \
  44   void do_oop(oop obj);                              \
  45   template <class T> inline void do_oop_work(T* p);
  46 
  47 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
  48 //       because some CMS OopClosures derive from OopsInGenClosure. It would be
  49 //       good to get rid of them completely.
  50 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
  51  public:
  52   virtual bool do_metadata()    { return do_metadata_nv(); }
  53   inline  bool do_metadata_nv() { return true; }
  54 
  55   virtual void do_klass(Klass* k);
  56   void do_klass_nv(Klass* k);
  57 
  58   virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
  59   void do_cld_nv(ClassLoaderData* cld);
  60 };
  61 
  62 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  63  private:
  64   const MemRegion _span;
  65   CMSBitMap*      _bitMap;
  66  protected:
  67   DO_OOP_WORK_DEFN
  68  public:
  69   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  70   virtual void do_oop(oop* p);
  71   virtual void do_oop(narrowOop* p);
  72 };
  73 
  74 class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  75  private:
  76   const MemRegion _span;
  77   CMSBitMap*      _bitMap;
  78  protected:
  79   DO_OOP_WORK_DEFN
  80  public:
  81   ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  82   virtual void do_oop(oop* p);
  83   virtual void do_oop(narrowOop* p);
  84 };
  85 
  86 // A variant of the above used in certain kinds of CMS
  87 // marking verification.
  88 class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
  89  private:
  90   const MemRegion _span;
  91   CMSBitMap*      _verification_bm;
  92   CMSBitMap*      _cms_bm;
  93  protected:
  94   DO_OOP_WORK_DEFN
  95  public:
  96   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
  97                             CMSBitMap* cms_bm);
  98   virtual void do_oop(oop* p);
  99   virtual void do_oop(narrowOop* p);
 100 };
 101 
 102 // The non-parallel version (the parallel version appears further below).
 103 class PushAndMarkClosure: public MetadataAwareOopClosure {
 104  private:
 105   CMSCollector* _collector;
 106   MemRegion     _span;
 107   CMSBitMap*    _bit_map;
 108   CMSBitMap*    _mod_union_table;
 109   CMSMarkStack* _mark_stack;
 110   bool          _concurrent_precleaning;
 111  protected:
 112   DO_OOP_WORK_DEFN
 113  public:
 114   PushAndMarkClosure(CMSCollector* collector,
 115                      MemRegion span,
 116                      ReferenceDiscoverer* rd,
 117                      CMSBitMap* bit_map,
 118                      CMSBitMap* mod_union_table,
 119                      CMSMarkStack* mark_stack,
 120                      bool concurrent_precleaning);
 121   virtual void do_oop(oop* p);
 122   virtual void do_oop(narrowOop* p);
 123   inline void do_oop_nv(oop* p);
 124   inline void do_oop_nv(narrowOop* p);
 125 };
 126 
 127 // In the parallel case, the bit map and the
 128 // reference processor are currently all shared. Access to
 129 // these shared mutable structures must use appropriate
 130 // synchronization (for instance, via CAS). The marking stack
 131 // used in the non-parallel case above is here replaced with
 132 // an OopTaskQueue structure to allow efficient work stealing.
 133 class ParPushAndMarkClosure: public MetadataAwareOopClosure {
 134  private:
 135   CMSCollector* _collector;
 136   MemRegion     _span;
 137   CMSBitMap*    _bit_map;
 138   OopTaskQueue* _work_queue;
 139  protected:
 140   DO_OOP_WORK_DEFN
 141  public:
 142   ParPushAndMarkClosure(CMSCollector* collector,
 143                         MemRegion span,
 144                         ReferenceDiscoverer* rd,
 145                         CMSBitMap* bit_map,
 146                         OopTaskQueue* work_queue);
 147   virtual void do_oop(oop* p);
 148   virtual void do_oop(narrowOop* p);
 149   inline void do_oop_nv(oop* p);
 150   inline void do_oop_nv(narrowOop* p);
 151 };
 152 
 153 // The non-parallel version (the parallel version appears further below).
 154 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
 155  private:
 156   MemRegion          _span;
 157   CMSBitMap*         _bit_map;
 158   CMSMarkStack*      _mark_stack;
 159   PushAndMarkClosure _pushAndMarkClosure;
 160   CMSCollector*      _collector;
 161   Mutex*             _freelistLock;
 162   bool               _yield;
 163   // Whether closure is being used for concurrent precleaning
 164   bool               _concurrent_precleaning;
 165  protected:
 166   DO_OOP_WORK_DEFN
 167  public:
 168   MarkRefsIntoAndScanClosure(MemRegion span,
 169                              ReferenceDiscoverer* rd,
 170                              CMSBitMap* bit_map,
 171                              CMSBitMap* mod_union_table,
 172                              CMSMarkStack* mark_stack,
 173                              CMSCollector* collector,
 174                              bool should_yield,
 175                              bool concurrent_precleaning);
 176   virtual void do_oop(oop* p);
 177   virtual void do_oop(narrowOop* p);
 178   inline void do_oop_nv(oop* p);
 179   inline void do_oop_nv(narrowOop* p);
 180 
 181   void set_freelistLock(Mutex* m) {
 182     _freelistLock = m;
 183   }
 184 
 185  private:
 186   inline void do_yield_check();
 187   void do_yield_work();
 188   bool take_from_overflow_list();
 189 };
 190 
 191 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 192 // stack and the bitMap are shared, so access needs to be suitably
 193 // synchronized. An OopTaskQueue structure, supporting efficient
 194 // work stealing, replaces a CMSMarkStack for storing grey objects.
 195 class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
 196  private:
 197   MemRegion             _span;
 198   CMSBitMap*            _bit_map;
 199   OopTaskQueue*         _work_queue;
 200   const uint            _low_water_mark;
 201   ParPushAndMarkClosure _parPushAndMarkClosure;
 202  protected:
 203   DO_OOP_WORK_DEFN
 204  public:
 205   ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
 206                                  MemRegion span,
 207                                  ReferenceDiscoverer* rd,
 208                                  CMSBitMap* bit_map,
 209                                  OopTaskQueue* work_queue);
 210   virtual void do_oop(oop* p);
 211   virtual void do_oop(narrowOop* p);
 212   inline void do_oop_nv(oop* p);
 213   inline void do_oop_nv(narrowOop* p);
 214 
 215   void trim_queue(uint size);
 216 };
 217 
 218 // This closure is used during the concurrent marking phase
 219 // following the first checkpoint. Its use is buried in
 220 // the closure MarkFromRootsClosure.
 221 class PushOrMarkClosure: public MetadataAwareOopClosure {
 222  private:
 223   CMSCollector*   _collector;
 224   MemRegion       _span;
 225   CMSBitMap*      _bitMap;
 226   CMSMarkStack*   _markStack;
 227   HeapWord* const _finger;
 228   MarkFromRootsClosure* const
 229                   _parent;
 230  protected:
 231   DO_OOP_WORK_DEFN
 232  public:
 233   PushOrMarkClosure(CMSCollector* cms_collector,
 234                     MemRegion span,
 235                     CMSBitMap* bitMap,
 236                     CMSMarkStack* markStack,
 237                     HeapWord* finger,
 238                     MarkFromRootsClosure* parent);
 239   virtual void do_oop(oop* p);
 240   virtual void do_oop(narrowOop* p);
 241   inline void do_oop_nv(oop* p);
 242   inline void do_oop_nv(narrowOop* p);
 243 
 244   // Deal with a stack overflow condition
 245   void handle_stack_overflow(HeapWord* lost);
 246  private:
 247   inline void do_yield_check();
 248 };
 249 
 250 // A parallel (MT) version of the above.
 251 // This closure is used during the concurrent marking phase
 252 // following the first checkpoint. Its use is buried in
 253 // the closure ParMarkFromRootsClosure.
 254 class ParPushOrMarkClosure: public MetadataAwareOopClosure {
 255  private:
 256   CMSCollector*                  _collector;
 257   MemRegion                      _whole_span;
 258   MemRegion                      _span;       // local chunk
 259   CMSBitMap*                     _bit_map;
 260   OopTaskQueue*                  _work_queue;
 261   CMSMarkStack*                  _overflow_stack;
 262   HeapWord*  const               _finger;
 263   HeapWord* volatile* const      _global_finger_addr;
 264   ParMarkFromRootsClosure* const _parent;
 265  protected:
 266   DO_OOP_WORK_DEFN
 267  public:
 268   ParPushOrMarkClosure(CMSCollector* cms_collector,
 269                        MemRegion span,
 270                        CMSBitMap* bit_map,
 271                        OopTaskQueue* work_queue,
 272                        CMSMarkStack* mark_stack,
 273                        HeapWord* finger,
 274                        HeapWord* volatile* global_finger_addr,
 275                        ParMarkFromRootsClosure* parent);
 276   virtual void do_oop(oop* p);
 277   virtual void do_oop(narrowOop* p);
 278   inline void do_oop_nv(oop* p);
 279   inline void do_oop_nv(narrowOop* p);
 280 
 281   // Deal with a stack overflow condition
 282   void handle_stack_overflow(HeapWord* lost);
 283  private:
 284   inline void do_yield_check();
 285 };
 286 
 287 // For objects in CMS generation, this closure marks
 288 // given objects (transitively) as being reachable/live.
 289 // This is currently used during the (weak) reference object
 290 // processing phase of the CMS final checkpoint step, as
 291 // well as during the concurrent precleaning of the discovered
 292 // reference lists.
 293 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
 294  private:
 295   CMSCollector* _collector;
 296   const MemRegion _span;
 297   CMSMarkStack* _mark_stack;
 298   CMSBitMap*    _bit_map;
 299   bool          _concurrent_precleaning;
 300  protected:
 301   DO_OOP_WORK_DEFN
 302  public:
 303   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
 304                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
 305                       bool cpc);
 306   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
 307   virtual void do_oop(oop* p);
 308   virtual void do_oop(narrowOop* p);
 309   inline void do_oop_nv(oop* p);
 310   inline void do_oop_nv(narrowOop* p);
 311 };
 312 
 313 class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
 314  private:
 315   CMSCollector* _collector;
 316   MemRegion     _span;
 317   OopTaskQueue* _work_queue;
 318   CMSBitMap*    _bit_map;
 319  protected:
 320   DO_OOP_WORK_DEFN
 321  public:
 322   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
 323                                 MemRegion span, CMSBitMap* bit_map,
 324                                 OopTaskQueue* work_queue);
 325   virtual void do_oop(oop* p);
 326   virtual void do_oop(narrowOop* p);
 327   inline void do_oop_nv(oop* p);
 328   inline void do_oop_nv(narrowOop* p);
 329 };
 330 
 331 // A parallel (MT) version of the above, used when
 332 // reference processing is parallel; the only difference
 333 // is in the do_oop method.
 334 class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
 335  private:
 336   MemRegion     _span;
 337   OopTaskQueue* _work_queue;
 338   CMSBitMap*    _bit_map;
 339   CMSInnerParMarkAndPushClosure
 340                 _mark_and_push;
 341   const uint    _low_water_mark;
 342   void trim_queue(uint max);
 343  protected:
 344   DO_OOP_WORK_DEFN
 345  public:
 346   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
 347                          CMSBitMap* bit_map, OopTaskQueue* work_queue);
 348   virtual void do_oop(oop* p);
 349   virtual void do_oop(narrowOop* p);
 350 };
 351 
 352 #endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP


  27 
  28 #include "gc/shared/genOopClosures.hpp"
  29 #include "gc/shared/taskqueue.hpp"
  30 #include "memory/iterator.hpp"
  31 
  32 /////////////////////////////////////////////////////////////////
  33 // Closures used by ConcurrentMarkSweepGeneration's collector
  34 /////////////////////////////////////////////////////////////////
  35 class ConcurrentMarkSweepGeneration;
  36 class CMSBitMap;
  37 class CMSMarkStack;
  38 class CMSCollector;
  39 class MarkFromRootsClosure;
  40 class ParMarkFromRootsClosure;
  41 
  42 // Decode the oop and call do_oop on it.
  43 #define DO_OOP_WORK_DEFN                             \
  44   void do_oop(oop obj);                              \
  45   template <class T> inline void do_oop_work(T* p);
  46 
  47 // TODO: This duplication of the MetadataVisitingOopIterateClosure class is only needed
  48 //       because some CMS OopClosures derive from OopsInGenClosure. It would be
  49 //       good to get rid of them completely.
  50 class MetadataVisitingOopsInGenClosure: public OopsInGenClosure {
  51  public:
  52   virtual bool do_metadata() { return true; }


  53   virtual void do_klass(Klass* k);
  54   virtual void do_cld(ClassLoaderData* cld);



  55 };
  56 
  57 class MarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
  58  private:
  59   const MemRegion _span;
  60   CMSBitMap*      _bitMap;
  61  protected:
  62   DO_OOP_WORK_DEFN
  63  public:
  64   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  65   virtual void do_oop(oop* p);
  66   virtual void do_oop(narrowOop* p);
  67 };
  68 
  69 class ParMarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
  70  private:
  71   const MemRegion _span;
  72   CMSBitMap*      _bitMap;
  73  protected:
  74   DO_OOP_WORK_DEFN
  75  public:
  76   ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  77   virtual void do_oop(oop* p);
  78   virtual void do_oop(narrowOop* p);
  79 };
  80 
  81 // A variant of the above used in certain kinds of CMS
  82 // marking verification.
  83 class MarkRefsIntoVerifyClosure: public MetadataVisitingOopsInGenClosure {
  84  private:
  85   const MemRegion _span;
  86   CMSBitMap*      _verification_bm;
  87   CMSBitMap*      _cms_bm;
  88  protected:
  89   DO_OOP_WORK_DEFN
  90  public:
  91   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
  92                             CMSBitMap* cms_bm);
  93   virtual void do_oop(oop* p);
  94   virtual void do_oop(narrowOop* p);
  95 };
  96 
  97 // The non-parallel version (the parallel version appears further below).
  98 class PushAndMarkClosure: public MetadataVisitingOopIterateClosure {
  99  private:
 100   CMSCollector* _collector;
 101   MemRegion     _span;
 102   CMSBitMap*    _bit_map;
 103   CMSBitMap*    _mod_union_table;
 104   CMSMarkStack* _mark_stack;
 105   bool          _concurrent_precleaning;
 106  protected:
 107   DO_OOP_WORK_DEFN
 108  public:
 109   PushAndMarkClosure(CMSCollector* collector,
 110                      MemRegion span,
 111                      ReferenceDiscoverer* rd,
 112                      CMSBitMap* bit_map,
 113                      CMSBitMap* mod_union_table,
 114                      CMSMarkStack* mark_stack,
 115                      bool concurrent_precleaning);
 116   virtual void do_oop(oop* p);
 117   virtual void do_oop(narrowOop* p);


 118 };
 119 
 120 // In the parallel case, the bit map and the
 121 // reference processor are currently all shared. Access to
 122 // these shared mutable structures must use appropriate
 123 // synchronization (for instance, via CAS). The marking stack
 124 // used in the non-parallel case above is here replaced with
 125 // an OopTaskQueue structure to allow efficient work stealing.
 126 class ParPushAndMarkClosure: public MetadataVisitingOopIterateClosure {
 127  private:
 128   CMSCollector* _collector;
 129   MemRegion     _span;
 130   CMSBitMap*    _bit_map;
 131   OopTaskQueue* _work_queue;
 132  protected:
 133   DO_OOP_WORK_DEFN
 134  public:
 135   ParPushAndMarkClosure(CMSCollector* collector,
 136                         MemRegion span,
 137                         ReferenceDiscoverer* rd,
 138                         CMSBitMap* bit_map,
 139                         OopTaskQueue* work_queue);
 140   virtual void do_oop(oop* p);
 141   virtual void do_oop(narrowOop* p);


 142 };
 143 
 144 // The non-parallel version (the parallel version appears further below).
 145 class MarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
 146  private:
 147   MemRegion          _span;
 148   CMSBitMap*         _bit_map;
 149   CMSMarkStack*      _mark_stack;
 150   PushAndMarkClosure _pushAndMarkClosure;
 151   CMSCollector*      _collector;
 152   Mutex*             _freelistLock;
 153   bool               _yield;
 154   // Whether closure is being used for concurrent precleaning
 155   bool               _concurrent_precleaning;
 156  protected:
 157   DO_OOP_WORK_DEFN
 158  public:
 159   MarkRefsIntoAndScanClosure(MemRegion span,
 160                              ReferenceDiscoverer* rd,
 161                              CMSBitMap* bit_map,
 162                              CMSBitMap* mod_union_table,
 163                              CMSMarkStack* mark_stack,
 164                              CMSCollector* collector,
 165                              bool should_yield,
 166                              bool concurrent_precleaning);
 167   virtual void do_oop(oop* p);
 168   virtual void do_oop(narrowOop* p);


 169 
 170   void set_freelistLock(Mutex* m) {
 171     _freelistLock = m;
 172   }
 173 
 174  private:
 175   inline void do_yield_check();
 176   void do_yield_work();
 177   bool take_from_overflow_list();
 178 };
 179 
 180 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 181 // stack and the bitMap are shared, so access needs to be suitably
 182 // synchronized. An OopTaskQueue structure, supporting efficient
 183 // work stealing, replaces a CMSMarkStack for storing grey objects.
 184 class ParMarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
 185  private:
 186   MemRegion             _span;
 187   CMSBitMap*            _bit_map;
 188   OopTaskQueue*         _work_queue;
 189   const uint            _low_water_mark;
 190   ParPushAndMarkClosure _parPushAndMarkClosure;
 191  protected:
 192   DO_OOP_WORK_DEFN
 193  public:
 194   ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
 195                                  MemRegion span,
 196                                  ReferenceDiscoverer* rd,
 197                                  CMSBitMap* bit_map,
 198                                  OopTaskQueue* work_queue);
 199   virtual void do_oop(oop* p);
 200   virtual void do_oop(narrowOop* p);


 201 
 202   void trim_queue(uint size);
 203 };
 204 
 205 // This closure is used during the concurrent marking phase
 206 // following the first checkpoint. Its use is buried in
 207 // the closure MarkFromRootsClosure.
 208 class PushOrMarkClosure: public MetadataVisitingOopIterateClosure {
 209  private:
 210   CMSCollector*   _collector;
 211   MemRegion       _span;
 212   CMSBitMap*      _bitMap;
 213   CMSMarkStack*   _markStack;
 214   HeapWord* const _finger;
 215   MarkFromRootsClosure* const
 216                   _parent;
 217  protected:
 218   DO_OOP_WORK_DEFN
 219  public:
 220   PushOrMarkClosure(CMSCollector* cms_collector,
 221                     MemRegion span,
 222                     CMSBitMap* bitMap,
 223                     CMSMarkStack* markStack,
 224                     HeapWord* finger,
 225                     MarkFromRootsClosure* parent);
 226   virtual void do_oop(oop* p);
 227   virtual void do_oop(narrowOop* p);


 228 
 229   // Deal with a stack overflow condition
 230   void handle_stack_overflow(HeapWord* lost);
 231  private:
 232   inline void do_yield_check();
 233 };
 234 
 235 // A parallel (MT) version of the above.
 236 // This closure is used during the concurrent marking phase
 237 // following the first checkpoint. Its use is buried in
 238 // the closure ParMarkFromRootsClosure.
 239 class ParPushOrMarkClosure: public MetadataVisitingOopIterateClosure {
 240  private:
 241   CMSCollector*                  _collector;
 242   MemRegion                      _whole_span;
 243   MemRegion                      _span;       // local chunk
 244   CMSBitMap*                     _bit_map;
 245   OopTaskQueue*                  _work_queue;
 246   CMSMarkStack*                  _overflow_stack;
 247   HeapWord*  const               _finger;
 248   HeapWord* volatile* const      _global_finger_addr;
 249   ParMarkFromRootsClosure* const _parent;
 250  protected:
 251   DO_OOP_WORK_DEFN
 252  public:
 253   ParPushOrMarkClosure(CMSCollector* cms_collector,
 254                        MemRegion span,
 255                        CMSBitMap* bit_map,
 256                        OopTaskQueue* work_queue,
 257                        CMSMarkStack* mark_stack,
 258                        HeapWord* finger,
 259                        HeapWord* volatile* global_finger_addr,
 260                        ParMarkFromRootsClosure* parent);
 261   virtual void do_oop(oop* p);
 262   virtual void do_oop(narrowOop* p);


 263 
 264   // Deal with a stack overflow condition
 265   void handle_stack_overflow(HeapWord* lost);
 266  private:
 267   inline void do_yield_check();
 268 };
 269 
 270 // For objects in CMS generation, this closure marks
 271 // given objects (transitively) as being reachable/live.
 272 // This is currently used during the (weak) reference object
 273 // processing phase of the CMS final checkpoint step, as
 274 // well as during the concurrent precleaning of the discovered
 275 // reference lists.
 276 class CMSKeepAliveClosure: public MetadataVisitingOopIterateClosure {
 277  private:
 278   CMSCollector* _collector;
 279   const MemRegion _span;
 280   CMSMarkStack* _mark_stack;
 281   CMSBitMap*    _bit_map;
 282   bool          _concurrent_precleaning;
 283  protected:
 284   DO_OOP_WORK_DEFN
 285  public:
 286   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
 287                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
 288                       bool cpc);
 289   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
 290   virtual void do_oop(oop* p);
 291   virtual void do_oop(narrowOop* p);


 292 };
 293 
 294 class CMSInnerParMarkAndPushClosure: public MetadataVisitingOopIterateClosure {
 295  private:
 296   CMSCollector* _collector;
 297   MemRegion     _span;
 298   OopTaskQueue* _work_queue;
 299   CMSBitMap*    _bit_map;
 300  protected:
 301   DO_OOP_WORK_DEFN
 302  public:
 303   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
 304                                 MemRegion span, CMSBitMap* bit_map,
 305                                 OopTaskQueue* work_queue);
 306   virtual void do_oop(oop* p);
 307   virtual void do_oop(narrowOop* p);


 308 };
 309 
 310 // A parallel (MT) version of the above, used when
 311 // reference processing is parallel; the only difference
 312 // is in the do_oop method.
 313 class CMSParKeepAliveClosure: public MetadataVisitingOopIterateClosure {
 314  private:
 315   MemRegion     _span;
 316   OopTaskQueue* _work_queue;
 317   CMSBitMap*    _bit_map;
 318   CMSInnerParMarkAndPushClosure
 319                 _mark_and_push;
 320   const uint    _low_water_mark;
 321   void trim_queue(uint max);
 322  protected:
 323   DO_OOP_WORK_DEFN
 324  public:
 325   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
 326                          CMSBitMap* bit_map, OopTaskQueue* work_queue);
 327   virtual void do_oop(oop* p);
 328   virtual void do_oop(narrowOop* p);
 329 };
 330 
 331 #endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
< prev index next >