1 /*
   2  * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
  27 
  28 #include "memory/genOopClosures.hpp"
  29 
  30 /////////////////////////////////////////////////////////////////
  31 // Closures used by ConcurrentMarkSweepGeneration's collector
  32 /////////////////////////////////////////////////////////////////
  33 class ConcurrentMarkSweepGeneration;
  34 class CMSBitMap;
  35 class CMSMarkStack;
  36 class CMSCollector;
  37 class MarkFromRootsClosure;
  38 class Par_MarkFromRootsClosure;
  39 
  40 // Decode the oop and call do_oop on it.
  41 #define DO_OOP_WORK_DEFN \
  42   void do_oop(oop obj);                                   \
  43   template <class T> inline void do_oop_work(T* p) {      \
  44     T heap_oop = oopDesc::load_heap_oop(p);               \
  45     if (!oopDesc::is_null(heap_oop)) {                    \
  46       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);       \
  47       do_oop(obj);                                        \
  48     }                                                     \
  49   }
  50 
  51 // Applies the given oop closure to all oops in all klasses visited.
  52 class CMKlassClosure : public KlassClosure {
  53   friend class CMSOopClosure;
  54   friend class CMSOopsInGenClosure;
  55 
  56   OopClosure* _oop_closure;
  57 
  58   // Used when _oop_closure couldn't be set in an initialization list.
  59   void initialize(OopClosure* oop_closure) {
  60     assert(_oop_closure == NULL, "Should only be called once");
  61     _oop_closure = oop_closure;
  62   }
  63  public:
  64   CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { }
  65 
  66   void do_klass(Klass* k);
  67 };
  68 
  69 // The base class for all CMS marking closures.
  70 // It's used to proxy through the metadata to the oops defined in them.
  71 class CMSOopClosure: public ExtendedOopClosure {
  72   CMKlassClosure      _klass_closure;
  73  public:
  74   CMSOopClosure() : ExtendedOopClosure() {
  75     _klass_closure.initialize(this);
  76   }
  77   CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
  78     _klass_closure.initialize(this);
  79   }
  80 
  81   virtual bool do_metadata()    { return do_metadata_nv(); }
  82   inline  bool do_metadata_nv() { return true; }
  83 
  84   virtual void do_klass(Klass* k);
  85   void do_klass_nv(Klass* k);
  86 
  87   virtual void do_class_loader_data(ClassLoaderData* cld);
  88 };
  89 
  90 // TODO: This duplication of the CMSOopClosure class is only needed because
  91 //       some CMS OopClosures derive from OopsInGenClosure. It would be good
  92 //       to get rid of them completely.
  93 class CMSOopsInGenClosure: public OopsInGenClosure {
  94   CMKlassClosure _klass_closure;
  95  public:
  96   CMSOopsInGenClosure() {
  97     _klass_closure.initialize(this);
  98   }
  99 
 100   virtual bool do_metadata()    { return do_metadata_nv(); }
 101   inline  bool do_metadata_nv() { return true; }
 102 
 103   virtual void do_klass(Klass* k);
 104   void do_klass_nv(Klass* k);
 105 
 106   virtual void do_class_loader_data(ClassLoaderData* cld);
 107 };
 108 
 109 class MarkRefsIntoClosure: public CMSOopsInGenClosure {
 110  private:
 111   const MemRegion _span;
 112   CMSBitMap*      _bitMap;
 113  protected:
 114   DO_OOP_WORK_DEFN
 115  public:
 116   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
 117   virtual void do_oop(oop* p);
 118   virtual void do_oop(narrowOop* p);
 119 };
 120 
 121 class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure {
 122  private:
 123   const MemRegion _span;
 124   CMSBitMap*      _bitMap;
 125  protected:
 126   DO_OOP_WORK_DEFN
 127  public:
 128   Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
 129   virtual void do_oop(oop* p);
 130   virtual void do_oop(narrowOop* p);
 131 };
 132 
 133 // A variant of the above used in certain kinds of CMS
 134 // marking verification.
 135 class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure {
 136  private:
 137   const MemRegion _span;
 138   CMSBitMap*      _verification_bm;
 139   CMSBitMap*      _cms_bm;
 140  protected:
 141   DO_OOP_WORK_DEFN
 142  public:
 143   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
 144                             CMSBitMap* cms_bm);
 145   virtual void do_oop(oop* p);
 146   virtual void do_oop(narrowOop* p);
 147 };
 148 
 149 // The non-parallel version (the parallel version appears further below).
 150 class PushAndMarkClosure: public CMSOopClosure {
 151  private:
 152   CMSCollector* _collector;
 153   MemRegion     _span;
 154   CMSBitMap*    _bit_map;
 155   CMSBitMap*    _mod_union_table;
 156   CMSMarkStack* _mark_stack;
 157   bool          _concurrent_precleaning;
 158  protected:
 159   DO_OOP_WORK_DEFN
 160  public:
 161   PushAndMarkClosure(CMSCollector* collector,
 162                      MemRegion span,
 163                      ReferenceProcessor* rp,
 164                      CMSBitMap* bit_map,
 165                      CMSBitMap* mod_union_table,
 166                      CMSMarkStack* mark_stack,
 167                      bool concurrent_precleaning);
 168   virtual void do_oop(oop* p);
 169   virtual void do_oop(narrowOop* p);
 170   inline void do_oop_nv(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
 171   inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
 172 };
 173 
 174 // In the parallel case, the bit map and the
 175 // reference processor are currently all shared. Access to
 176 // these shared mutable structures must use appropriate
 177 // synchronization (for instance, via CAS). The marking stack
 178 // used in the non-parallel case above is here replaced with
 179 // an OopTaskQueue structure to allow efficient work stealing.
 180 class Par_PushAndMarkClosure: public CMSOopClosure {
 181  private:
 182   CMSCollector* _collector;
 183   MemRegion     _span;
 184   CMSBitMap*    _bit_map;
 185   OopTaskQueue* _work_queue;
 186  protected:
 187   DO_OOP_WORK_DEFN
 188  public:
 189   Par_PushAndMarkClosure(CMSCollector* collector,
 190                          MemRegion span,
 191                          ReferenceProcessor* rp,
 192                          CMSBitMap* bit_map,
 193                          OopTaskQueue* work_queue);
 194   virtual void do_oop(oop* p);
 195   virtual void do_oop(narrowOop* p);
 196   inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
 197   inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
 198 };
 199 
 200 // The non-parallel version (the parallel version appears further below).
 201 class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
 202  private:
 203   MemRegion          _span;
 204   CMSBitMap*         _bit_map;
 205   CMSMarkStack*      _mark_stack;
 206   PushAndMarkClosure _pushAndMarkClosure;
 207   CMSCollector*      _collector;
 208   Mutex*             _freelistLock;
 209   bool               _yield;
 210   // Whether closure is being used for concurrent precleaning
 211   bool               _concurrent_precleaning;
 212  protected:
 213   DO_OOP_WORK_DEFN
 214  public:
 215   MarkRefsIntoAndScanClosure(MemRegion span,
 216                              ReferenceProcessor* rp,
 217                              CMSBitMap* bit_map,
 218                              CMSBitMap* mod_union_table,
 219                              CMSMarkStack* mark_stack,
 220                              CMSCollector* collector,
 221                              bool should_yield,
 222                              bool concurrent_precleaning);
 223   virtual void do_oop(oop* p);
 224   virtual void do_oop(narrowOop* p);
 225   inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 226   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 227 
 228   void set_freelistLock(Mutex* m) {
 229     _freelistLock = m;
 230   }
 231 
 232  private:
 233   inline void do_yield_check();
 234   void do_yield_work();
 235   bool take_from_overflow_list();
 236 };
 237 
 238 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 239 // stack and the bitMap are shared, so access needs to be suitably
 240 // synchronized. An OopTaskQueue structure, supporting efficient
 241 // work stealing, replaces a CMSMarkStack for storing grey objects.
 242 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
 243  private:
 244   MemRegion              _span;
 245   CMSBitMap*             _bit_map;
 246   OopTaskQueue*          _work_queue;
 247   const uint             _low_water_mark;
 248   Par_PushAndMarkClosure _par_pushAndMarkClosure;
 249  protected:
 250   DO_OOP_WORK_DEFN
 251  public:
 252   Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
 253                                  MemRegion span,
 254                                  ReferenceProcessor* rp,
 255                                  CMSBitMap* bit_map,
 256                                  OopTaskQueue* work_queue);
 257   virtual void do_oop(oop* p);
 258   virtual void do_oop(narrowOop* p);
 259   inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 260   inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 261 
 262   void trim_queue(uint size);
 263 };
 264 
 265 // This closure is used during the concurrent marking phase
 266 // following the first checkpoint. Its use is buried in
 267 // the closure MarkFromRootsClosure.
 268 class PushOrMarkClosure: public CMSOopClosure {
 269  private:
 270   CMSCollector*   _collector;
 271   MemRegion       _span;
 272   CMSBitMap*      _bitMap;
 273   CMSMarkStack*   _markStack;
 274   HeapWord* const _finger;
 275   MarkFromRootsClosure* const
 276                   _parent;
 277  protected:
 278   DO_OOP_WORK_DEFN
 279  public:
 280   PushOrMarkClosure(CMSCollector* cms_collector,
 281                     MemRegion span,
 282                     CMSBitMap* bitMap,
 283                     CMSMarkStack* markStack,
 284                     HeapWord* finger,
 285                     MarkFromRootsClosure* parent);
 286   virtual void do_oop(oop* p);
 287   virtual void do_oop(narrowOop* p);
 288   inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
 289   inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
 290 
 291   // Deal with a stack overflow condition
 292   void handle_stack_overflow(HeapWord* lost);
 293  private:
 294   inline void do_yield_check();
 295 };
 296 
 297 // A parallel (MT) version of the above.
 298 // This closure is used during the concurrent marking phase
 299 // following the first checkpoint. Its use is buried in
 300 // the closure Par_MarkFromRootsClosure.
 301 class Par_PushOrMarkClosure: public CMSOopClosure {
 302  private:
 303   CMSCollector*    _collector;
 304   MemRegion        _whole_span;
 305   MemRegion        _span;        // local chunk
 306   CMSBitMap*       _bit_map;
 307   OopTaskQueue*    _work_queue;
 308   CMSMarkStack*    _overflow_stack;
 309   HeapWord*  const _finger;
 310   HeapWord** const _global_finger_addr;
 311   Par_MarkFromRootsClosure* const
 312                    _parent;
 313  protected:
 314   DO_OOP_WORK_DEFN
 315  public:
 316   Par_PushOrMarkClosure(CMSCollector* cms_collector,
 317                         MemRegion span,
 318                         CMSBitMap* bit_map,
 319                         OopTaskQueue* work_queue,
 320                         CMSMarkStack* mark_stack,
 321                         HeapWord* finger,
 322                         HeapWord** global_finger_addr,
 323                         Par_MarkFromRootsClosure* parent);
 324   virtual void do_oop(oop* p);
 325   virtual void do_oop(narrowOop* p);
 326   inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
 327   inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
 328 
 329   // Deal with a stack overflow condition
 330   void handle_stack_overflow(HeapWord* lost);
 331  private:
 332   inline void do_yield_check();
 333 };
 334 
 335 // For objects in CMS generation, this closure marks
 336 // given objects (transitively) as being reachable/live.
 337 // This is currently used during the (weak) reference object
 338 // processing phase of the CMS final checkpoint step, as
 339 // well as during the concurrent precleaning of the discovered
 340 // reference lists.
 341 class CMSKeepAliveClosure: public CMSOopClosure {
 342  private:
 343   CMSCollector* _collector;
 344   const MemRegion _span;
 345   CMSMarkStack* _mark_stack;
 346   CMSBitMap*    _bit_map;
 347   bool          _concurrent_precleaning;
 348  protected:
 349   DO_OOP_WORK_DEFN
 350  public:
 351   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
 352                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
 353                       bool cpc);
 354   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
 355   virtual void do_oop(oop* p);
 356   virtual void do_oop(narrowOop* p);
 357   inline void do_oop_nv(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
 358   inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
 359 };
 360 
 361 class CMSInnerParMarkAndPushClosure: public CMSOopClosure {
 362  private:
 363   CMSCollector* _collector;
 364   MemRegion     _span;
 365   OopTaskQueue* _work_queue;
 366   CMSBitMap*    _bit_map;
 367  protected:
 368   DO_OOP_WORK_DEFN
 369  public:
 370   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
 371                                 MemRegion span, CMSBitMap* bit_map,
 372                                 OopTaskQueue* work_queue);
 373   virtual void do_oop(oop* p);
 374   virtual void do_oop(narrowOop* p);
 375   inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 376   inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 377 };
 378 
 379 // A parallel (MT) version of the above, used when
 380 // reference processing is parallel; the only difference
 381 // is in the do_oop method.
 382 class CMSParKeepAliveClosure: public CMSOopClosure {
 383  private:
 384   MemRegion     _span;
 385   OopTaskQueue* _work_queue;
 386   CMSBitMap*    _bit_map;
 387   CMSInnerParMarkAndPushClosure
 388                 _mark_and_push;
 389   const uint    _low_water_mark;
 390   void trim_queue(uint max);
 391  protected:
 392   DO_OOP_WORK_DEFN
 393  public:
 394   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
 395                          CMSBitMap* bit_map, OopTaskQueue* work_queue);
 396   virtual void do_oop(oop* p);
 397   virtual void do_oop(narrowOop* p);
 398 };
 399 
 400 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP