1 /*
   2  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
  27 
  28 #include "memory/genOopClosures.hpp"
  29 
  30 /////////////////////////////////////////////////////////////////
  31 // Closures used by ConcurrentMarkSweepGeneration's collector
  32 /////////////////////////////////////////////////////////////////
  33 class ConcurrentMarkSweepGeneration;
  34 class CMSBitMap;
  35 class CMSMarkStack;
  36 class CMSCollector;
  37 class MarkFromRootsClosure;
  38 class Par_MarkFromRootsClosure;
  39 
  40 // Decode the oop and call do_oop on it.
  41 #define DO_OOP_WORK_DEFN \
  42   void do_oop(oop obj);                                   \
  43   template <class T> inline void do_oop_work(T* p) {      \
  44     T heap_oop = oopDesc::load_heap_oop(p);               \
  45     if (!oopDesc::is_null(heap_oop)) {                    \
  46       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);       \
  47       do_oop(obj);                                        \
  48     }                                                     \
  49   }
  50 
  51 class MarkRefsIntoClosure: public OopsInGenClosure {
  52  private:
  53   const MemRegion _span;
  54   CMSBitMap*      _bitMap;
  55  protected:
  56   DO_OOP_WORK_DEFN
  57  public:
  58   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  59   virtual void do_oop(oop* p);
  60   virtual void do_oop(narrowOop* p);
  61 
  62   Prefetch::style prefetch_style() {
  63     return Prefetch::do_read;
  64   }
  65 };
  66 
  67 class Par_MarkRefsIntoClosure: public OopsInGenClosure {
  68  private:
  69   const MemRegion _span;
  70   CMSBitMap*      _bitMap;
  71  protected:
  72   DO_OOP_WORK_DEFN
  73  public:
  74   Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  75   virtual void do_oop(oop* p);
  76   virtual void do_oop(narrowOop* p);
  77   bool do_header() { return true; }
  78   Prefetch::style prefetch_style() {
  79     return Prefetch::do_read;
  80   }
  81 };
  82 
  83 // A variant of the above used in certain kinds of CMS
  84 // marking verification.
  85 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
  86  private:
  87   const MemRegion _span;
  88   CMSBitMap*      _verification_bm;
  89   CMSBitMap*      _cms_bm;
  90  protected:
  91   DO_OOP_WORK_DEFN
  92  public:
  93   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
  94                             CMSBitMap* cms_bm);
  95   virtual void do_oop(oop* p);
  96   virtual void do_oop(narrowOop* p);
  97   inline void do_oop_nv(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  98   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
  99   bool do_header() { return true; }
 100   Prefetch::style prefetch_style() {
 101     return Prefetch::do_read;
 102   }
 103 };
 104 
 105 // KlassRememberingOopClosure is used when marking of the permanent generation
 106 // is being done.  It adds fields to support revisiting of klasses
 107 // for class unloading.  _should_remember_klasses should be set to
 108 // indicate if klasses should be remembered.  Currently that is whenever
 109 // CMS class unloading is turned on.  The _revisit_stack is used
 110 // to save the klasses for later processing.
 111 class KlassRememberingOopClosure : public OopClosure {
 112  protected:
 113   CMSCollector* _collector;
 114   CMSMarkStack* _revisit_stack;
 115   bool const    _should_remember_klasses;
 116  public:
 117   void check_remember_klasses() const PRODUCT_RETURN;
 118   virtual const bool should_remember_klasses() const {
 119     check_remember_klasses();
 120     return _should_remember_klasses;
 121   }
 122   virtual void remember_klass(Klass* k);
 123 
 124   KlassRememberingOopClosure(CMSCollector* collector,
 125                              ReferenceProcessor* rp,
 126                              CMSMarkStack* revisit_stack);
 127 };
 128 
 129 // Similar to KlassRememberingOopClosure for use when multiple
 130 // GC threads will execute the closure.
 131 
 132 class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
 133  public:
 134   Par_KlassRememberingOopClosure(CMSCollector* collector,
 135                                  ReferenceProcessor* rp,
 136                                  CMSMarkStack* revisit_stack):
 137     KlassRememberingOopClosure(collector, rp, revisit_stack) {}
 138   virtual void remember_klass(Klass* k);
 139 };
 140 
 141 // The non-parallel version (the parallel version appears further below).
 142 class PushAndMarkClosure: public KlassRememberingOopClosure {
 143  private:
 144   MemRegion     _span;
 145   CMSBitMap*    _bit_map;
 146   CMSBitMap*    _mod_union_table;
 147   CMSMarkStack* _mark_stack;
 148   bool          _concurrent_precleaning;
 149  protected:
 150   DO_OOP_WORK_DEFN
 151  public:
 152   PushAndMarkClosure(CMSCollector* collector,
 153                      MemRegion span,
 154                      ReferenceProcessor* rp,
 155                      CMSBitMap* bit_map,
 156                      CMSBitMap* mod_union_table,
 157                      CMSMarkStack* mark_stack,
 158                      CMSMarkStack* revisit_stack,
 159                      bool concurrent_precleaning);
 160   virtual void do_oop(oop* p);
 161   virtual void do_oop(narrowOop* p);
 162   inline void do_oop_nv(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
 163   inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
 164   bool do_header() { return true; }
 165   Prefetch::style prefetch_style() {
 166     return Prefetch::do_read;
 167   }
 168   // In support of class unloading
 169   virtual const bool should_remember_mdo() const {
 170     return false;
 171     // return _should_remember_klasses;
 172   }
 173   virtual void remember_mdo(DataLayout* v);
 174 };
 175 
 176 // In the parallel case, the revisit stack, the bit map and the
 177 // reference processor are currently all shared. Access to
 178 // these shared mutable structures must use appropriate
 179 // synchronization (for instance, via CAS). The marking stack
 180 // used in the non-parallel case above is here replaced with
 181 // an OopTaskQueue structure to allow efficient work stealing.
 182 class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
 183  private:
 184   MemRegion     _span;
 185   CMSBitMap*    _bit_map;
 186   OopTaskQueue* _work_queue;
 187  protected:
 188   DO_OOP_WORK_DEFN
 189  public:
 190   Par_PushAndMarkClosure(CMSCollector* collector,
 191                          MemRegion span,
 192                          ReferenceProcessor* rp,
 193                          CMSBitMap* bit_map,
 194                          OopTaskQueue* work_queue,
 195                          CMSMarkStack* revisit_stack);
 196   virtual void do_oop(oop* p);
 197   virtual void do_oop(narrowOop* p);
 198   inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
 199   inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
 200   bool do_header() { return true; }
 201   Prefetch::style prefetch_style() {
 202     return Prefetch::do_read;
 203   }
 204   // In support of class unloading
 205   virtual const bool should_remember_mdo() const {
 206     return false;
 207     // return _should_remember_klasses;
 208   }
 209   virtual void remember_mdo(DataLayout* v);
 210 };
 211 
 212 // The non-parallel version (the parallel version appears further below).
 213 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
 214  private:
 215   MemRegion          _span;
 216   CMSBitMap*         _bit_map;
 217   CMSMarkStack*      _mark_stack;
 218   PushAndMarkClosure _pushAndMarkClosure;
 219   CMSCollector*      _collector;
 220   Mutex*             _freelistLock;
 221   bool               _yield;
 222   // Whether closure is being used for concurrent precleaning
 223   bool               _concurrent_precleaning;
 224  protected:
 225   DO_OOP_WORK_DEFN
 226  public:
 227   MarkRefsIntoAndScanClosure(MemRegion span,
 228                              ReferenceProcessor* rp,
 229                              CMSBitMap* bit_map,
 230                              CMSBitMap* mod_union_table,
 231                              CMSMarkStack* mark_stack,
 232                              CMSMarkStack* revisit_stack,
 233                              CMSCollector* collector,
 234                              bool should_yield,
 235                              bool concurrent_precleaning);
 236   virtual void do_oop(oop* p);
 237   virtual void do_oop(narrowOop* p);
 238   inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 239   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 240   bool do_header() { return true; }
 241   Prefetch::style prefetch_style() {
 242     return Prefetch::do_read;
 243   }
 244   void set_freelistLock(Mutex* m) {
 245     _freelistLock = m;
 246   }
 247   virtual const bool should_remember_klasses() const {
 248     return _pushAndMarkClosure.should_remember_klasses();
 249   }
 250   virtual void remember_klass(Klass* k) {
 251     _pushAndMarkClosure.remember_klass(k);
 252   }
 253 
 254  private:
 255   inline void do_yield_check();
 256   void do_yield_work();
 257   bool take_from_overflow_list();
 258 };
 259 
 260 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 261 // stack and the bitMap are shared, so access needs to be suitably
 262 // sycnhronized. An OopTaskQueue structure, supporting efficient
 263 // workstealing, replaces a CMSMarkStack for storing grey objects.
 264 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
 265  private:
 266   MemRegion              _span;
 267   CMSBitMap*             _bit_map;
 268   OopTaskQueue*          _work_queue;
 269   const uint             _low_water_mark;
 270   Par_PushAndMarkClosure _par_pushAndMarkClosure;
 271  protected:
 272   DO_OOP_WORK_DEFN
 273  public:
 274   Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
 275                                  MemRegion span,
 276                                  ReferenceProcessor* rp,
 277                                  CMSBitMap* bit_map,
 278                                  OopTaskQueue* work_queue,
 279                                  CMSMarkStack*  revisit_stack);
 280   virtual void do_oop(oop* p);
 281   virtual void do_oop(narrowOop* p);
 282   inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 283   inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 284   bool do_header() { return true; }
 285   // When ScanMarkedObjectsAgainClosure is used,
 286   // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
 287   // and this delegation is used.
 288   virtual const bool should_remember_klasses() const {
 289     return _par_pushAndMarkClosure.should_remember_klasses();
 290   }
 291   // See comment on should_remember_klasses() above.
 292   virtual void remember_klass(Klass* k) {
 293     _par_pushAndMarkClosure.remember_klass(k);
 294   }
 295   Prefetch::style prefetch_style() {
 296     return Prefetch::do_read;
 297   }
 298   void trim_queue(uint size);
 299 };
 300 
 301 // This closure is used during the concurrent marking phase
 302 // following the first checkpoint. Its use is buried in
 303 // the closure MarkFromRootsClosure.
 304 class PushOrMarkClosure: public KlassRememberingOopClosure {
 305  private:
 306   MemRegion       _span;
 307   CMSBitMap*      _bitMap;
 308   CMSMarkStack*   _markStack;
 309   HeapWord* const _finger;
 310   MarkFromRootsClosure* const
 311                   _parent;
 312  protected:
 313   DO_OOP_WORK_DEFN
 314  public:
 315   PushOrMarkClosure(CMSCollector* cms_collector,
 316                     MemRegion span,
 317                     CMSBitMap* bitMap,
 318                     CMSMarkStack* markStack,
 319                     CMSMarkStack* revisitStack,
 320                     HeapWord* finger,
 321                     MarkFromRootsClosure* parent);
 322   virtual void do_oop(oop* p);
 323   virtual void do_oop(narrowOop* p);
 324   inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
 325   inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
 326   // In support of class unloading
 327   virtual const bool should_remember_mdo() const {
 328     return false;
 329     // return _should_remember_klasses;
 330   }
 331   virtual void remember_mdo(DataLayout* v);
 332 
 333   // Deal with a stack overflow condition
 334   void handle_stack_overflow(HeapWord* lost);
 335  private:
 336   inline void do_yield_check();
 337 };
 338 
 339 // A parallel (MT) version of the above.
 340 // This closure is used during the concurrent marking phase
 341 // following the first checkpoint. Its use is buried in
 342 // the closure Par_MarkFromRootsClosure.
 343 class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
 344  private:
 345   MemRegion        _whole_span;
 346   MemRegion        _span;        // local chunk
 347   CMSBitMap*       _bit_map;
 348   OopTaskQueue*    _work_queue;
 349   CMSMarkStack*    _overflow_stack;
 350   HeapWord*  const _finger;
 351   HeapWord** const _global_finger_addr;
 352   Par_MarkFromRootsClosure* const
 353                    _parent;
 354  protected:
 355   DO_OOP_WORK_DEFN
 356  public:
 357   Par_PushOrMarkClosure(CMSCollector* cms_collector,
 358                         MemRegion span,
 359                         CMSBitMap* bit_map,
 360                         OopTaskQueue* work_queue,
 361                         CMSMarkStack* mark_stack,
 362                         CMSMarkStack* revisit_stack,
 363                         HeapWord* finger,
 364                         HeapWord** global_finger_addr,
 365                         Par_MarkFromRootsClosure* parent);
 366   virtual void do_oop(oop* p);
 367   virtual void do_oop(narrowOop* p);
 368   inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
 369   inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
 370   // In support of class unloading
 371   virtual const bool should_remember_mdo() const {
 372     return false;
 373     // return _should_remember_klasses;
 374   }
 375   virtual void remember_mdo(DataLayout* v);
 376 
 377   // Deal with a stack overflow condition
 378   void handle_stack_overflow(HeapWord* lost);
 379  private:
 380   inline void do_yield_check();
 381 };
 382 
 383 // For objects in CMS generation, this closure marks
 384 // given objects (transitively) as being reachable/live.
 385 // This is currently used during the (weak) reference object
 386 // processing phase of the CMS final checkpoint step, as
 387 // well as during the concurrent precleaning of the discovered
 388 // reference lists.
 389 class CMSKeepAliveClosure: public KlassRememberingOopClosure {
 390  private:
 391   const MemRegion _span;
 392   CMSMarkStack* _mark_stack;
 393   CMSBitMap*    _bit_map;
 394   bool          _concurrent_precleaning;
 395  protected:
 396   DO_OOP_WORK_DEFN
 397  public:
 398   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
 399                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
 400                       CMSMarkStack* revisit_stack, bool cpc);
 401   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
 402   virtual void do_oop(oop* p);
 403   virtual void do_oop(narrowOop* p);
 404   inline void do_oop_nv(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
 405   inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
 406 };
 407 
 408 class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
 409  private:
 410   MemRegion     _span;
 411   OopTaskQueue* _work_queue;
 412   CMSBitMap*    _bit_map;
 413  protected:
 414   DO_OOP_WORK_DEFN
 415  public:
 416   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
 417                                 MemRegion span, CMSBitMap* bit_map,
 418                                 CMSMarkStack* revisit_stack,
 419                                 OopTaskQueue* work_queue);
 420   virtual void do_oop(oop* p);
 421   virtual void do_oop(narrowOop* p);
 422   inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 423   inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 424 };
 425 
 426 // A parallel (MT) version of the above, used when
 427 // reference processing is parallel; the only difference
 428 // is in the do_oop method.
 429 class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
 430  private:
 431   MemRegion     _span;
 432   OopTaskQueue* _work_queue;
 433   CMSBitMap*    _bit_map;
 434   CMSInnerParMarkAndPushClosure
 435                 _mark_and_push;
 436   const uint    _low_water_mark;
 437   void trim_queue(uint max);
 438  protected:
 439   DO_OOP_WORK_DEFN
 440  public:
 441   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
 442                          CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
 443                          OopTaskQueue* work_queue);
 444   virtual void do_oop(oop* p);
 445   virtual void do_oop(narrowOop* p);
 446   inline void do_oop_nv(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
 447   inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
 448 };
 449 
 450 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP