1 /*
   2  * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
  26 #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
  27 
  28 #include "gc/shared/genOopClosures.hpp"
  29 #include "memory/iterator.hpp"
  30 
  31 /////////////////////////////////////////////////////////////////
  32 // Closures used by ConcurrentMarkSweepGeneration's collector
  33 /////////////////////////////////////////////////////////////////
  34 class ConcurrentMarkSweepGeneration;
  35 class CMSBitMap;
  36 class CMSMarkStack;
  37 class CMSCollector;
  38 class MarkFromRootsClosure;
  39 class Par_MarkFromRootsClosure;
  40 
  41 // Decode the oop and call do_oop on it.
  42 #define DO_OOP_WORK_DEFN \
  43   void do_oop(oop obj);                                   \
  44   template <class T> inline void do_oop_work(T* p) {      \
  45     T heap_oop = oopDesc::load_heap_oop(p);               \
  46     if (!oopDesc::is_null(heap_oop)) {                    \
  47       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);       \
  48       do_oop(obj);                                        \
  49     }                                                     \
  50   }
  51 
  52 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
  53 //       because some CMS OopClosures derive from OopsInGenClosure. It would be
  54 //       good to get rid of them completely.
  55 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
  56   KlassToOopClosure _klass_closure;
  57  public:
  58   MetadataAwareOopsInGenClosure() {
  59     _klass_closure.initialize(this);
  60   }
  61 
  62   virtual bool do_metadata()    { return do_metadata_nv(); }
  63   inline  bool do_metadata_nv() { return true; }
  64 
  65   virtual void do_klass(Klass* k);
  66   void do_klass_nv(Klass* k);
  67 
  68   virtual void do_class_loader_data(ClassLoaderData* cld);
  69 };
  70 
  71 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  72  private:
  73   const MemRegion _span;
  74   CMSBitMap*      _bitMap;
  75  protected:
  76   DO_OOP_WORK_DEFN
  77  public:
  78   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  79   virtual void do_oop(oop* p);
  80   virtual void do_oop(narrowOop* p);
  81 };
  82 
  83 class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  84  private:
  85   const MemRegion _span;
  86   CMSBitMap*      _bitMap;
  87  protected:
  88   DO_OOP_WORK_DEFN
  89  public:
  90   Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  91   virtual void do_oop(oop* p);
  92   virtual void do_oop(narrowOop* p);
  93 };
  94 
  95 // A variant of the above used in certain kinds of CMS
  96 // marking verification.
  97 class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
  98  private:
  99   const MemRegion _span;
 100   CMSBitMap*      _verification_bm;
 101   CMSBitMap*      _cms_bm;
 102  protected:
 103   DO_OOP_WORK_DEFN
 104  public:
 105   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
 106                             CMSBitMap* cms_bm);
 107   virtual void do_oop(oop* p);
 108   virtual void do_oop(narrowOop* p);
 109 };
 110 
 111 // The non-parallel version (the parallel version appears further below).
 112 class PushAndMarkClosure: public MetadataAwareOopClosure {
 113  private:
 114   CMSCollector* _collector;
 115   MemRegion     _span;
 116   CMSBitMap*    _bit_map;
 117   CMSBitMap*    _mod_union_table;
 118   CMSMarkStack* _mark_stack;
 119   bool          _concurrent_precleaning;
 120  protected:
 121   DO_OOP_WORK_DEFN
 122  public:
 123   PushAndMarkClosure(CMSCollector* collector,
 124                      MemRegion span,
 125                      ReferenceProcessor* rp,
 126                      CMSBitMap* bit_map,
 127                      CMSBitMap* mod_union_table,
 128                      CMSMarkStack* mark_stack,
 129                      bool concurrent_precleaning);
 130   virtual void do_oop(oop* p);
 131   virtual void do_oop(narrowOop* p);
 132   inline void do_oop_nv(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
 133   inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
 134 };
 135 
 136 // In the parallel case, the bit map and the
 137 // reference processor are currently all shared. Access to
 138 // these shared mutable structures must use appropriate
 139 // synchronization (for instance, via CAS). The marking stack
 140 // used in the non-parallel case above is here replaced with
 141 // an OopTaskQueue structure to allow efficient work stealing.
 142 class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
 143  private:
 144   CMSCollector* _collector;
 145   MemRegion     _span;
 146   CMSBitMap*    _bit_map;
 147   OopTaskQueue* _work_queue;
 148  protected:
 149   DO_OOP_WORK_DEFN
 150  public:
 151   Par_PushAndMarkClosure(CMSCollector* collector,
 152                          MemRegion span,
 153                          ReferenceProcessor* rp,
 154                          CMSBitMap* bit_map,
 155                          OopTaskQueue* work_queue);
 156   virtual void do_oop(oop* p);
 157   virtual void do_oop(narrowOop* p);
 158   inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
 159   inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
 160 };
 161 
 162 // The non-parallel version (the parallel version appears further below).
 163 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
 164  private:
 165   MemRegion          _span;
 166   CMSBitMap*         _bit_map;
 167   CMSMarkStack*      _mark_stack;
 168   PushAndMarkClosure _pushAndMarkClosure;
 169   CMSCollector*      _collector;
 170   Mutex*             _freelistLock;
 171   bool               _yield;
 172   // Whether closure is being used for concurrent precleaning
 173   bool               _concurrent_precleaning;
 174  protected:
 175   DO_OOP_WORK_DEFN
 176  public:
 177   MarkRefsIntoAndScanClosure(MemRegion span,
 178                              ReferenceProcessor* rp,
 179                              CMSBitMap* bit_map,
 180                              CMSBitMap* mod_union_table,
 181                              CMSMarkStack* mark_stack,
 182                              CMSCollector* collector,
 183                              bool should_yield,
 184                              bool concurrent_precleaning);
 185   virtual void do_oop(oop* p);
 186   virtual void do_oop(narrowOop* p);
 187   inline void do_oop_nv(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 188   inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
 189 
 190   void set_freelistLock(Mutex* m) {
 191     _freelistLock = m;
 192   }
 193 
 194  private:
 195   inline void do_yield_check();
 196   void do_yield_work();
 197   bool take_from_overflow_list();
 198 };
 199 
 200 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 201 // stack and the bitMap are shared, so access needs to be suitably
 202 // synchronized. An OopTaskQueue structure, supporting efficient
 203 // work stealing, replaces a CMSMarkStack for storing grey objects.
 204 class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
 205  private:
 206   MemRegion              _span;
 207   CMSBitMap*             _bit_map;
 208   OopTaskQueue*          _work_queue;
 209   const uint             _low_water_mark;
 210   Par_PushAndMarkClosure _par_pushAndMarkClosure;
 211  protected:
 212   DO_OOP_WORK_DEFN
 213  public:
 214   Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
 215                                  MemRegion span,
 216                                  ReferenceProcessor* rp,
 217                                  CMSBitMap* bit_map,
 218                                  OopTaskQueue* work_queue);
 219   virtual void do_oop(oop* p);
 220   virtual void do_oop(narrowOop* p);
 221   inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 222   inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
 223 
 224   void trim_queue(uint size);
 225 };
 226 
 227 // This closure is used during the concurrent marking phase
 228 // following the first checkpoint. Its use is buried in
 229 // the closure MarkFromRootsClosure.
 230 class PushOrMarkClosure: public MetadataAwareOopClosure {
 231  private:
 232   CMSCollector*   _collector;
 233   MemRegion       _span;
 234   CMSBitMap*      _bitMap;
 235   CMSMarkStack*   _markStack;
 236   HeapWord* const _finger;
 237   MarkFromRootsClosure* const
 238                   _parent;
 239  protected:
 240   DO_OOP_WORK_DEFN
 241  public:
 242   PushOrMarkClosure(CMSCollector* cms_collector,
 243                     MemRegion span,
 244                     CMSBitMap* bitMap,
 245                     CMSMarkStack* markStack,
 246                     HeapWord* finger,
 247                     MarkFromRootsClosure* parent);
 248   virtual void do_oop(oop* p);
 249   virtual void do_oop(narrowOop* p);
 250   inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
 251   inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
 252 
 253   // Deal with a stack overflow condition
 254   void handle_stack_overflow(HeapWord* lost);
 255  private:
 256   inline void do_yield_check();
 257 };
 258 
 259 // A parallel (MT) version of the above.
 260 // This closure is used during the concurrent marking phase
 261 // following the first checkpoint. Its use is buried in
 262 // the closure Par_MarkFromRootsClosure.
 263 class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
 264  private:
 265   CMSCollector*    _collector;
 266   MemRegion        _whole_span;
 267   MemRegion        _span;        // local chunk
 268   CMSBitMap*       _bit_map;
 269   OopTaskQueue*    _work_queue;
 270   CMSMarkStack*    _overflow_stack;
 271   HeapWord*  const _finger;
 272   HeapWord** const _global_finger_addr;
 273   Par_MarkFromRootsClosure* const
 274                    _parent;
 275  protected:
 276   DO_OOP_WORK_DEFN
 277  public:
 278   Par_PushOrMarkClosure(CMSCollector* cms_collector,
 279                         MemRegion span,
 280                         CMSBitMap* bit_map,
 281                         OopTaskQueue* work_queue,
 282                         CMSMarkStack* mark_stack,
 283                         HeapWord* finger,
 284                         HeapWord** global_finger_addr,
 285                         Par_MarkFromRootsClosure* parent);
 286   virtual void do_oop(oop* p);
 287   virtual void do_oop(narrowOop* p);
 288   inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
 289   inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
 290 
 291   // Deal with a stack overflow condition
 292   void handle_stack_overflow(HeapWord* lost);
 293  private:
 294   inline void do_yield_check();
 295 };
 296 
 297 // For objects in CMS generation, this closure marks
 298 // given objects (transitively) as being reachable/live.
 299 // This is currently used during the (weak) reference object
 300 // processing phase of the CMS final checkpoint step, as
 301 // well as during the concurrent precleaning of the discovered
 302 // reference lists.
 303 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
 304  private:
 305   CMSCollector* _collector;
 306   const MemRegion _span;
 307   CMSMarkStack* _mark_stack;
 308   CMSBitMap*    _bit_map;
 309   bool          _concurrent_precleaning;
 310  protected:
 311   DO_OOP_WORK_DEFN
 312  public:
 313   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
 314                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
 315                       bool cpc);
 316   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
 317   virtual void do_oop(oop* p);
 318   virtual void do_oop(narrowOop* p);
 319   inline void do_oop_nv(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
 320   inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
 321 };
 322 
 323 class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
 324  private:
 325   CMSCollector* _collector;
 326   MemRegion     _span;
 327   OopTaskQueue* _work_queue;
 328   CMSBitMap*    _bit_map;
 329  protected:
 330   DO_OOP_WORK_DEFN
 331  public:
 332   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
 333                                 MemRegion span, CMSBitMap* bit_map,
 334                                 OopTaskQueue* work_queue);
 335   virtual void do_oop(oop* p);
 336   virtual void do_oop(narrowOop* p);
 337   inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 338   inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
 339 };
 340 
 341 // A parallel (MT) version of the above, used when
 342 // reference processing is parallel; the only difference
 343 // is in the do_oop method.
 344 class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
 345  private:
 346   MemRegion     _span;
 347   OopTaskQueue* _work_queue;
 348   CMSBitMap*    _bit_map;
 349   CMSInnerParMarkAndPushClosure
 350                 _mark_and_push;
 351   const uint    _low_water_mark;
 352   void trim_queue(uint max);
 353  protected:
 354   DO_OOP_WORK_DEFN
 355  public:
 356   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
 357                          CMSBitMap* bit_map, OopTaskQueue* work_queue);
 358   virtual void do_oop(oop* p);
 359   virtual void do_oop(narrowOop* p);
 360 };
 361 
 362 #endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP