1 /*
   2  * Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 /////////////////////////////////////////////////////////////////
  26 // Closures used by ConcurrentMarkSweepGeneration's collector
  27 /////////////////////////////////////////////////////////////////
  28 class ConcurrentMarkSweepGeneration;
  29 class CMSBitMap;
  30 class CMSMarkStack;
  31 class CMSCollector;
  32 class MarkFromRootsClosure;
  33 class Par_MarkFromRootsClosure;
  34 
  35 // Decode the oop and call do_oop on it.
  36 #define DO_OOP_WORK_DEFN \
  37   void do_oop(oop obj);                                   \
  38   template <class T> inline void do_oop_work(T* p) {      \
  39     T heap_oop = oopDesc::load_heap_oop(p);               \
  40     if (!oopDesc::is_null(heap_oop)) {                    \
  41       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);       \
  42       do_oop(obj);                                        \
  43     }                                                     \
  44   }
  45 
  46 class MarkRefsIntoClosure: public OopsInGenClosure {
  47  private:
  48   const MemRegion _span;
  49   CMSBitMap*      _bitMap;
  50  protected:
  51   DO_OOP_WORK_DEFN
  52  public:
  53   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  54   virtual void do_oop(oop* p);
  55   virtual void do_oop(narrowOop* p);
  56   inline void do_oop_nv(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
  57   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
  58   bool do_header() { return true; }
  59   Prefetch::style prefetch_style() {
  60     return Prefetch::do_read;
  61   }
  62 };
  63 
  64 // A variant of the above used in certain kinds of CMS
  65 // marking verification.
  66 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
  67  private:
  68   const MemRegion _span;
  69   CMSBitMap*      _verification_bm;
  70   CMSBitMap*      _cms_bm;
  71  protected:
  72   DO_OOP_WORK_DEFN
  73  public:
  74   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
  75                             CMSBitMap* cms_bm);
  76   virtual void do_oop(oop* p);
  77   virtual void do_oop(narrowOop* p);
  78   inline void do_oop_nv(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  79   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  80   bool do_header() { return true; }
  81   Prefetch::style prefetch_style() {
  82     return Prefetch::do_read;
  83   }
  84 };
  85 
  86 // KlassRememberingOopClosure is used when marking of the permanent generation
  87 // is being done.  It adds fields to support revisiting of klasses
  88 // for class unloading.  _should_remember_klasses should be set to
  89 // indicate if klasses should be remembered.  Currently that is whenever
  90 // CMS class unloading is turned on.  The _revisit_stack is used
  91 // to save the klasses for later processing.
  92 class KlassRememberingOopClosure : public OopClosure {
  93  protected:
  94   CMSCollector* _collector;
  95   CMSMarkStack* _revisit_stack;
  96   bool const    _should_remember_klasses;
  97  public:
  98   void check_remember_klasses() const PRODUCT_RETURN;
  99   virtual const bool should_remember_klasses() const {
 100     check_remember_klasses();
 101     return _should_remember_klasses;
 102   }
 103   virtual void remember_klass(Klass* k);
 104 
 105   KlassRememberingOopClosure(CMSCollector* collector,
 106                              ReferenceProcessor* rp,
 107                              CMSMarkStack* revisit_stack);
 108 };
 109 
 110 // Similar to KlassRememberingOopClosure for use when multiple
 111 // GC threads will execute the closure.
 112 
 113 class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
 114  public:
 115   Par_KlassRememberingOopClosure(CMSCollector* collector,
 116                                  ReferenceProcessor* rp,
 117                                  CMSMarkStack* revisit_stack):
 118     KlassRememberingOopClosure(collector, rp, revisit_stack) {}
 119   virtual void remember_klass(Klass* k);
 120 };
 121 
 122 // The non-parallel version (the parallel version appears further below).
 123 class PushAndMarkClosure: public KlassRememberingOopClosure {
 124  private:
 125   MemRegion     _span;
 126   CMSBitMap*    _bit_map;
 127   CMSBitMap*    _mod_union_table;
 128   CMSMarkStack* _mark_stack;
 129   bool          _concurrent_precleaning;
 130  protected:
 131   DO_OOP_WORK_DEFN
 132  public:
 133   PushAndMarkClosure(CMSCollector* collector,
 134                      MemRegion span,
 135                      ReferenceProcessor* rp,
 136                      CMSBitMap* bit_map,
 137                      CMSBitMap* mod_union_table,
 138                      CMSMarkStack* mark_stack,
 139                      CMSMarkStack* revisit_stack,
 140                      bool concurrent_precleaning);
 141   virtual void do_oop(oop* p);
 142   virtual void do_oop(narrowOop* p);
 143   inline void do_oop_nv(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
 144   inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
 145   bool do_header() { return true; }
 146   Prefetch::style prefetch_style() {
 147     return Prefetch::do_read;
 148   }
 149   // In support of class unloading
 150   virtual const bool should_remember_mdo() const {
 151     return false;
 152     // return _should_remember_klasses;
 153   }
 154   virtual void remember_mdo(DataLayout* v);
 155 };
 156 
 157 // In the parallel case, the revisit stack, the bit map and the
 158 // reference processor are currently all shared. Access to
 159 // these shared mutable structures must use appropriate
 160 // synchronization (for instance, via CAS). The marking stack
 161 // used in the non-parallel case above is here replaced with
 162 // an OopTaskQueue structure to allow efficient work stealing.
 163 class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
 164  private:
 165   MemRegion     _span;
 166   CMSBitMap*    _bit_map;
 167   OopTaskQueue* _work_queue;
 168  protected:
 169   DO_OOP_WORK_DEFN
 170  public:
 171   Par_PushAndMarkClosure(CMSCollector* collector,
 172                          MemRegion span,
 173                          ReferenceProcessor* rp,
 174                          CMSBitMap* bit_map,
 175                          OopTaskQueue* work_queue,
 176                          CMSMarkStack* revisit_stack);
 177   virtual void do_oop(oop* p);
 178   virtual void do_oop(narrowOop* p);
 179   inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
 180   inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
 181   bool do_header() { return true; }
 182   Prefetch::style prefetch_style() {
 183     return Prefetch::do_read;
 184   }
 185   // In support of class unloading
 186   virtual const bool should_remember_mdo() const {
 187     return false;
 188     // return _should_remember_klasses;
 189   }
 190   virtual void remember_mdo(DataLayout* v);
 191 };
 192 
 193 // The non-parallel version (the parallel version appears further below).
 194 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
 195  private:
 196   MemRegion          _span;
 197   CMSBitMap*         _bit_map;
 198   CMSMarkStack*      _mark_stack;
 199   PushAndMarkClosure _pushAndMarkClosure;
 200   CMSCollector*      _collector;
 201   Mutex*             _freelistLock;
 202   bool               _yield;
 203   // Whether closure is being used for concurrent precleaning
 204   bool               _concurrent_precleaning;
 205  protected:
 206   DO_OOP_WORK_DEFN
 207  public:
 208   MarkRefsIntoAndScanClosure(MemRegion span,
 209                              ReferenceProcessor* rp,
 210                              CMSBitMap* bit_map,
 211                              CMSBitMap* mod_union_table,
 212                              CMSMarkStack* mark_stack,
 213                              CMSMarkStack* revisit_stack,
 214                              CMSCollector* collector,
 215                              bool should_yield,
 216                              bool concurrent_precleaning);
 217   virtual void do_oop(oop* p);
 218   virtual void do_oop(narrowOop* p);
 219   inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 220   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 221   bool do_header() { return true; }
 222   Prefetch::style prefetch_style() {
 223     return Prefetch::do_read;
 224   }
 225   void set_freelistLock(Mutex* m) {
 226     _freelistLock = m;
 227   }
 228   virtual const bool should_remember_klasses() const {
 229     return _pushAndMarkClosure.should_remember_klasses();
 230   }
 231   virtual void remember_klass(Klass* k) {
 232     _pushAndMarkClosure.remember_klass(k);
 233   }
 234 
 235  private:
 236   inline void do_yield_check();
 237   void do_yield_work();
 238   bool take_from_overflow_list();
 239 };
 240 
 241 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 242 // stack and the bitMap are shared, so access needs to be suitably
 243 // sycnhronized. An OopTaskQueue structure, supporting efficient
 244 // workstealing, replaces a CMSMarkStack for storing grey objects.
 245 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
 246  private:
 247   MemRegion              _span;
 248   CMSBitMap*             _bit_map;
 249   OopTaskQueue*          _work_queue;
 250   const uint             _low_water_mark;
 251   Par_PushAndMarkClosure _par_pushAndMarkClosure;
 252  protected:
 253   DO_OOP_WORK_DEFN
 254  public:
 255   Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
 256                                  MemRegion span,
 257                                  ReferenceProcessor* rp,
 258                                  CMSBitMap* bit_map,
 259                                  OopTaskQueue* work_queue,
 260                                  CMSMarkStack*  revisit_stack);
 261   virtual void do_oop(oop* p);
 262   virtual void do_oop(narrowOop* p);
 263   inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 264   inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 265   bool do_header() { return true; }
 266   // When ScanMarkedObjectsAgainClosure is used,
 267   // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
 268   // and this delegation is used.
 269   virtual const bool should_remember_klasses() const {
 270     return _par_pushAndMarkClosure.should_remember_klasses();
 271   }
 272   // See comment on should_remember_klasses() above.
 273   virtual void remember_klass(Klass* k) {
 274     _par_pushAndMarkClosure.remember_klass(k);
 275   }
 276   Prefetch::style prefetch_style() {
 277     return Prefetch::do_read;
 278   }
 279   void trim_queue(uint size);
 280 };
 281 
 282 // This closure is used during the concurrent marking phase
 283 // following the first checkpoint. Its use is buried in
 284 // the closure MarkFromRootsClosure.
 285 class PushOrMarkClosure: public KlassRememberingOopClosure {
 286  private:
 287   MemRegion       _span;
 288   CMSBitMap*      _bitMap;
 289   CMSMarkStack*   _markStack;
 290   HeapWord* const _finger;
 291   MarkFromRootsClosure* const
 292                   _parent;
 293  protected:
 294   DO_OOP_WORK_DEFN
 295  public:
 296   PushOrMarkClosure(CMSCollector* cms_collector,
 297                     MemRegion span,
 298                     CMSBitMap* bitMap,
 299                     CMSMarkStack* markStack,
 300                     CMSMarkStack* revisitStack,
 301                     HeapWord* finger,
 302                     MarkFromRootsClosure* parent);
 303   virtual void do_oop(oop* p);
 304   virtual void do_oop(narrowOop* p);
 305   inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
 306   inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
 307   // In support of class unloading
 308   virtual const bool should_remember_mdo() const {
 309     return false;
 310     // return _should_remember_klasses;
 311   }
 312   virtual void remember_mdo(DataLayout* v);
 313 
 314   // Deal with a stack overflow condition
 315   void handle_stack_overflow(HeapWord* lost);
 316  private:
 317   inline void do_yield_check();
 318 };
 319 
 320 // A parallel (MT) version of the above.
 321 // This closure is used during the concurrent marking phase
 322 // following the first checkpoint. Its use is buried in
 323 // the closure Par_MarkFromRootsClosure.
 324 class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
 325  private:
 326   MemRegion        _whole_span;
 327   MemRegion        _span;        // local chunk
 328   CMSBitMap*       _bit_map;
 329   OopTaskQueue*    _work_queue;
 330   CMSMarkStack*    _overflow_stack;
 331   HeapWord*  const _finger;
 332   HeapWord** const _global_finger_addr;
 333   Par_MarkFromRootsClosure* const
 334                    _parent;
 335  protected:
 336   DO_OOP_WORK_DEFN
 337  public:
 338   Par_PushOrMarkClosure(CMSCollector* cms_collector,
 339                         MemRegion span,
 340                         CMSBitMap* bit_map,
 341                         OopTaskQueue* work_queue,
 342                         CMSMarkStack* mark_stack,
 343                         CMSMarkStack* revisit_stack,
 344                         HeapWord* finger,
 345                         HeapWord** global_finger_addr,
 346                         Par_MarkFromRootsClosure* parent);
 347   virtual void do_oop(oop* p);
 348   virtual void do_oop(narrowOop* p);
 349   inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
 350   inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
 351   // In support of class unloading
 352   virtual const bool should_remember_mdo() const {
 353     return false;
 354     // return _should_remember_klasses;
 355   }
 356   virtual void remember_mdo(DataLayout* v);
 357 
 358   // Deal with a stack overflow condition
 359   void handle_stack_overflow(HeapWord* lost);
 360  private:
 361   inline void do_yield_check();
 362 };
 363 
 364 // For objects in CMS generation, this closure marks
 365 // given objects (transitively) as being reachable/live.
 366 // This is currently used during the (weak) reference object
 367 // processing phase of the CMS final checkpoint step, as
 368 // well as during the concurrent precleaning of the discovered
 369 // reference lists.
 370 class CMSKeepAliveClosure: public KlassRememberingOopClosure {
 371  private:
 372   const MemRegion _span;
 373   CMSMarkStack* _mark_stack;
 374   CMSBitMap*    _bit_map;
 375   bool          _concurrent_precleaning;
 376  protected:
 377   DO_OOP_WORK_DEFN
 378  public:
 379   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
 380                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
 381                       CMSMarkStack* revisit_stack, bool cpc);
 382   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
 383   virtual void do_oop(oop* p);
 384   virtual void do_oop(narrowOop* p);
 385   inline void do_oop_nv(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
 386   inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
 387 };
 388 
 389 class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
 390  private:
 391   MemRegion     _span;
 392   OopTaskQueue* _work_queue;
 393   CMSBitMap*    _bit_map;
 394  protected:
 395   DO_OOP_WORK_DEFN
 396  public:
 397   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
 398                                 MemRegion span, CMSBitMap* bit_map,
 399                                 CMSMarkStack* revisit_stack,
 400                                 OopTaskQueue* work_queue);
 401   virtual void do_oop(oop* p);
 402   virtual void do_oop(narrowOop* p);
 403   inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 404   inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 405 };
 406 
 407 // A parallel (MT) version of the above, used when
 408 // reference processing is parallel; the only difference
 409 // is in the do_oop method.
 410 class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
 411  private:
 412   MemRegion     _span;
 413   OopTaskQueue* _work_queue;
 414   CMSBitMap*    _bit_map;
 415   CMSInnerParMarkAndPushClosure
 416                 _mark_and_push;
 417   const uint    _low_water_mark;
 418   void trim_queue(uint max);
 419  protected:
 420   DO_OOP_WORK_DEFN
 421  public:
 422   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
 423                          CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
 424                          OopTaskQueue* work_queue);
 425   virtual void do_oop(oop* p);
 426   virtual void do_oop(narrowOop* p);
 427   inline void do_oop_nv(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
 428   inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
 429 };