1 /*
   2  * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
  26 #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
  27 
  28 #include "gc/shared/genOopClosures.hpp"
  29 #include "gc/shared/taskqueue.hpp"
  30 #include "memory/iterator.hpp"
  31 
  32 /////////////////////////////////////////////////////////////////
  33 // Closures used by ConcurrentMarkSweepGeneration's collector
  34 /////////////////////////////////////////////////////////////////
  35 class ConcurrentMarkSweepGeneration;
  36 class CMSBitMap;
  37 class CMSMarkStack;
  38 class CMSCollector;
  39 class MarkFromRootsClosure;
  40 class ParMarkFromRootsClosure;
  41 
  42 // Decode the oop and call do_oop on it.
  43 #define DO_OOP_WORK_DEFN                             \
  44   void do_oop(oop obj);                              \
  45   template <class T> inline void do_oop_work(T* p);
  46 
  47 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
  48 //       because some CMS OopClosures derive from OopsInGenClosure. It would be
  49 //       good to get rid of them completely.
  50 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
  51   KlassToOopClosure _klass_closure;
  52  public:
  53   MetadataAwareOopsInGenClosure() {
  54     _klass_closure.initialize(this);
  55   }
  56 
  57   virtual bool do_metadata()    { return do_metadata_nv(); }
  58   inline  bool do_metadata_nv() { return true; }
  59 
  60   virtual void do_klass(Klass* k);
  61   void do_klass_nv(Klass* k);
  62 
  63   virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
  64   void do_cld_nv(ClassLoaderData* cld);
  65 };
  66 
  67 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  68  private:
  69   const MemRegion _span;
  70   CMSBitMap*      _bitMap;
  71  protected:
  72   DO_OOP_WORK_DEFN
  73  public:
  74   MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  75   virtual void do_oop(oop* p);
  76   virtual void do_oop(narrowOop* p);
  77 };
  78 
  79 class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  80  private:
  81   const MemRegion _span;
  82   CMSBitMap*      _bitMap;
  83  protected:
  84   DO_OOP_WORK_DEFN
  85  public:
  86   ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
  87   virtual void do_oop(oop* p);
  88   virtual void do_oop(narrowOop* p);
  89 };
  90 
  91 // A variant of the above used in certain kinds of CMS
  92 // marking verification.
  93 class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
  94  private:
  95   const MemRegion _span;
  96   CMSBitMap*      _verification_bm;
  97   CMSBitMap*      _cms_bm;
  98  protected:
  99   DO_OOP_WORK_DEFN
 100  public:
 101   MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
 102                             CMSBitMap* cms_bm);
 103   virtual void do_oop(oop* p);
 104   virtual void do_oop(narrowOop* p);
 105 };
 106 
 107 // The non-parallel version (the parallel version appears further below).
 108 class PushAndMarkClosure: public MetadataAwareOopClosure {
 109  private:
 110   CMSCollector* _collector;
 111   MemRegion     _span;
 112   CMSBitMap*    _bit_map;
 113   CMSBitMap*    _mod_union_table;
 114   CMSMarkStack* _mark_stack;
 115   bool          _concurrent_precleaning;
 116  protected:
 117   DO_OOP_WORK_DEFN
 118  public:
 119   PushAndMarkClosure(CMSCollector* collector,
 120                      MemRegion span,
 121                      ReferenceProcessor* rp,
 122                      CMSBitMap* bit_map,
 123                      CMSBitMap* mod_union_table,
 124                      CMSMarkStack* mark_stack,
 125                      bool concurrent_precleaning);
 126   virtual void do_oop(oop* p);
 127   virtual void do_oop(narrowOop* p);
 128   inline void do_oop_nv(oop* p);
 129   inline void do_oop_nv(narrowOop* p);
 130 };
 131 
 132 // In the parallel case, the bit map and the
 133 // reference processor are currently all shared. Access to
 134 // these shared mutable structures must use appropriate
 135 // synchronization (for instance, via CAS). The marking stack
 136 // used in the non-parallel case above is here replaced with
 137 // an OopTaskQueue structure to allow efficient work stealing.
 138 class ParPushAndMarkClosure: public MetadataAwareOopClosure {
 139  private:
 140   CMSCollector* _collector;
 141   MemRegion     _span;
 142   CMSBitMap*    _bit_map;
 143   OopTaskQueue* _work_queue;
 144  protected:
 145   DO_OOP_WORK_DEFN
 146  public:
 147   ParPushAndMarkClosure(CMSCollector* collector,
 148                         MemRegion span,
 149                         ReferenceProcessor* rp,
 150                         CMSBitMap* bit_map,
 151                         OopTaskQueue* work_queue);
 152   virtual void do_oop(oop* p);
 153   virtual void do_oop(narrowOop* p);
 154   inline void do_oop_nv(oop* p);
 155   inline void do_oop_nv(narrowOop* p);
 156 };
 157 
 158 // The non-parallel version (the parallel version appears further below).
 159 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
 160  private:
 161   MemRegion          _span;
 162   CMSBitMap*         _bit_map;
 163   CMSMarkStack*      _mark_stack;
 164   PushAndMarkClosure _pushAndMarkClosure;
 165   CMSCollector*      _collector;
 166   Mutex*             _freelistLock;
 167   bool               _yield;
 168   // Whether closure is being used for concurrent precleaning
 169   bool               _concurrent_precleaning;
 170  protected:
 171   DO_OOP_WORK_DEFN
 172  public:
 173   MarkRefsIntoAndScanClosure(MemRegion span,
 174                              ReferenceProcessor* rp,
 175                              CMSBitMap* bit_map,
 176                              CMSBitMap* mod_union_table,
 177                              CMSMarkStack* mark_stack,
 178                              CMSCollector* collector,
 179                              bool should_yield,
 180                              bool concurrent_precleaning);
 181   virtual void do_oop(oop* p);
 182   virtual void do_oop(narrowOop* p);
 183   inline void do_oop_nv(oop* p);
 184   inline void do_oop_nv(narrowOop* p);
 185 
 186   void set_freelistLock(Mutex* m) {
 187     _freelistLock = m;
 188   }
 189 
 190  private:
 191   inline void do_yield_check();
 192   void do_yield_work();
 193   bool take_from_overflow_list();
 194 };
 195 
 196 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 197 // stack and the bitMap are shared, so access needs to be suitably
 198 // synchronized. An OopTaskQueue structure, supporting efficient
 199 // work stealing, replaces a CMSMarkStack for storing grey objects.
 200 class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
 201  private:
 202   MemRegion             _span;
 203   CMSBitMap*            _bit_map;
 204   OopTaskQueue*         _work_queue;
 205   const uint            _low_water_mark;
 206   ParPushAndMarkClosure _parPushAndMarkClosure;
 207  protected:
 208   DO_OOP_WORK_DEFN
 209  public:
 210   ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
 211                                  MemRegion span,
 212                                  ReferenceProcessor* rp,
 213                                  CMSBitMap* bit_map,
 214                                  OopTaskQueue* work_queue);
 215   virtual void do_oop(oop* p);
 216   virtual void do_oop(narrowOop* p);
 217   inline void do_oop_nv(oop* p);
 218   inline void do_oop_nv(narrowOop* p);
 219 
 220   void trim_queue(uint size);
 221 };
 222 
 223 // This closure is used during the concurrent marking phase
 224 // following the first checkpoint. Its use is buried in
 225 // the closure MarkFromRootsClosure.
 226 class PushOrMarkClosure: public MetadataAwareOopClosure {
 227  private:
 228   CMSCollector*   _collector;
 229   MemRegion       _span;
 230   CMSBitMap*      _bitMap;
 231   CMSMarkStack*   _markStack;
 232   HeapWord* const _finger;
 233   MarkFromRootsClosure* const
 234                   _parent;
 235  protected:
 236   DO_OOP_WORK_DEFN
 237  public:
 238   PushOrMarkClosure(CMSCollector* cms_collector,
 239                     MemRegion span,
 240                     CMSBitMap* bitMap,
 241                     CMSMarkStack* markStack,
 242                     HeapWord* finger,
 243                     MarkFromRootsClosure* parent);
 244   virtual void do_oop(oop* p);
 245   virtual void do_oop(narrowOop* p);
 246   inline void do_oop_nv(oop* p);
 247   inline void do_oop_nv(narrowOop* p);
 248 
 249   // Deal with a stack overflow condition
 250   void handle_stack_overflow(HeapWord* lost);
 251  private:
 252   inline void do_yield_check();
 253 };
 254 
 255 // A parallel (MT) version of the above.
 256 // This closure is used during the concurrent marking phase
 257 // following the first checkpoint. Its use is buried in
 258 // the closure ParMarkFromRootsClosure.
 259 class ParPushOrMarkClosure: public MetadataAwareOopClosure {
 260  private:
 261   CMSCollector*    _collector;
 262   MemRegion        _whole_span;
 263   MemRegion        _span;        // local chunk
 264   CMSBitMap*       _bit_map;
 265   OopTaskQueue*    _work_queue;
 266   CMSMarkStack*    _overflow_stack;
 267   HeapWord*  const _finger;
 268   HeapWord** const _global_finger_addr;
 269   ParMarkFromRootsClosure* const
 270                    _parent;
 271  protected:
 272   DO_OOP_WORK_DEFN
 273  public:
 274   ParPushOrMarkClosure(CMSCollector* cms_collector,
 275                        MemRegion span,
 276                        CMSBitMap* bit_map,
 277                        OopTaskQueue* work_queue,
 278                        CMSMarkStack* mark_stack,
 279                        HeapWord* finger,
 280                        HeapWord** global_finger_addr,
 281                        ParMarkFromRootsClosure* parent);
 282   virtual void do_oop(oop* p);
 283   virtual void do_oop(narrowOop* p);
 284   inline void do_oop_nv(oop* p);
 285   inline void do_oop_nv(narrowOop* p);
 286 
 287   // Deal with a stack overflow condition
 288   void handle_stack_overflow(HeapWord* lost);
 289  private:
 290   inline void do_yield_check();
 291 };
 292 
 293 // For objects in CMS generation, this closure marks
 294 // given objects (transitively) as being reachable/live.
 295 // This is currently used during the (weak) reference object
 296 // processing phase of the CMS final checkpoint step, as
 297 // well as during the concurrent precleaning of the discovered
 298 // reference lists.
 299 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
 300  private:
 301   CMSCollector* _collector;
 302   const MemRegion _span;
 303   CMSMarkStack* _mark_stack;
 304   CMSBitMap*    _bit_map;
 305   bool          _concurrent_precleaning;
 306  protected:
 307   DO_OOP_WORK_DEFN
 308  public:
 309   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
 310                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
 311                       bool cpc);
 312   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
 313   virtual void do_oop(oop* p);
 314   virtual void do_oop(narrowOop* p);
 315   inline void do_oop_nv(oop* p);
 316   inline void do_oop_nv(narrowOop* p);
 317 };
 318 
 319 class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
 320  private:
 321   CMSCollector* _collector;
 322   MemRegion     _span;
 323   OopTaskQueue* _work_queue;
 324   CMSBitMap*    _bit_map;
 325  protected:
 326   DO_OOP_WORK_DEFN
 327  public:
 328   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
 329                                 MemRegion span, CMSBitMap* bit_map,
 330                                 OopTaskQueue* work_queue);
 331   virtual void do_oop(oop* p);
 332   virtual void do_oop(narrowOop* p);
 333   inline void do_oop_nv(oop* p);
 334   inline void do_oop_nv(narrowOop* p);
 335 };
 336 
 337 // A parallel (MT) version of the above, used when
 338 // reference processing is parallel; the only difference
 339 // is in the do_oop method.
 340 class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
 341  private:
 342   MemRegion     _span;
 343   OopTaskQueue* _work_queue;
 344   CMSBitMap*    _bit_map;
 345   CMSInnerParMarkAndPushClosure
 346                 _mark_and_push;
 347   const uint    _low_water_mark;
 348   void trim_queue(uint max);
 349  protected:
 350   DO_OOP_WORK_DEFN
 351  public:
 352   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
 353                          CMSBitMap* bit_map, OopTaskQueue* work_queue);
 354   virtual void do_oop(oop* p);
 355   virtual void do_oop(narrowOop* p);
 356 };
 357 
 358 #endif // SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP