1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_CMS_PARNEWGENERATION_HPP
  26 #define SHARE_VM_GC_CMS_PARNEWGENERATION_HPP
  27 
  28 #include "gc/cms/parOopClosures.hpp"
  29 #include "gc/serial/defNewGeneration.hpp"
  30 #include "gc/shared/copyFailedInfo.hpp"
  31 #include "gc/shared/gcTrace.hpp"
  32 #include "gc/shared/oopStorageParState.hpp"
  33 #include "gc/shared/plab.hpp"
  34 #include "gc/shared/preservedMarks.hpp"
  35 #include "gc/shared/taskqueue.hpp"
  36 #include "memory/padded.hpp"
  37 
  38 class ChunkArray;
  39 class CMSHeap;
  40 class ParScanWithoutBarrierClosure;
  41 class ParScanWithBarrierClosure;
  42 class ParRootScanWithoutBarrierClosure;
  43 class ParRootScanWithBarrierTwoGensClosure;
  44 class ParEvacuateFollowersClosure;
  45 class StrongRootsScope;
  46 
  47 // It would be better if these types could be kept local to the .cpp file,
  48 // but they must be here to allow ParScanClosure::do_oop_work to be defined
  49 // in genOopClosures.inline.hpp.
  50 
  51 typedef Padded<OopTaskQueue> ObjToScanQueue;
  52 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
  53 
  54 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
  55  private:
  56   ParScanWeakRefClosure* _par_cl;
  57  protected:
  58   template <class T> void do_oop_work(T* p);
  59  public:
  60   ParKeepAliveClosure(ParScanWeakRefClosure* cl);
  61   virtual void do_oop(oop* p);
  62   virtual void do_oop(narrowOop* p);
  63 };
  64 
  65 // The state needed by thread performing parallel young-gen collection.
  66 class ParScanThreadState {
  67   friend class ParScanThreadStateSet;
  68  private:
  69   ObjToScanQueue *_work_queue;
  70   Stack<oop, mtGC>* const _overflow_stack;
  71   PreservedMarks* const _preserved_marks;
  72 
  73   PLAB _to_space_alloc_buffer;
  74 
  75   ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
  76   ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
  77   ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
  78   // Will be passed to process_roots to set its generation.
  79   ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
  80   // This closure will always be bound to the old gen; it will be used
  81   // in evacuate_followers.
  82   ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
  83   ParEvacuateFollowersClosure          _evacuate_followers;
  84   DefNewGeneration::IsAliveClosure     _is_alive_closure;
  85   ParScanWeakRefClosure                _scan_weak_ref_closure;
  86   ParKeepAliveClosure                  _keep_alive_closure;
  87 
  88   Space* _to_space;
  89   Space* to_space() { return _to_space; }
  90 
  91   ParNewGeneration* _young_gen;
  92   ParNewGeneration* young_gen() const { return _young_gen; }
  93 
  94   Generation* _old_gen;
  95   Generation* old_gen() { return _old_gen; }
  96 
  97   HeapWord *_young_old_boundary;
  98 
  99   int _thread_num;
 100   AgeTable _ageTable;
 101 
 102   bool _to_space_full;
 103 
 104 #if TASKQUEUE_STATS
 105   size_t _term_attempts;
 106   size_t _overflow_refills;
 107   size_t _overflow_refill_objs;
 108 #endif // TASKQUEUE_STATS
 109 
 110   // Stats for promotion failure
 111   PromotionFailedInfo _promotion_failed_info;
 112 
 113   // Timing numbers.
 114   double _start;
 115   double _start_strong_roots;
 116   double _strong_roots_time;
 117   double _start_term;
 118   double _term_time;
 119 
 120   // Helper for trim_queues. Scans subset of an array and makes
 121   // remainder available for work stealing.
 122   void scan_partial_array_and_push_remainder(oop obj);
 123 
 124   // In support of CMS' parallel rescan of survivor space.
 125   ChunkArray* _survivor_chunk_array;
 126   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 127 
 128   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 129 
 130   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 131                      Generation* old_gen_, int thread_num_,
 132                      ObjToScanQueueSet* work_queue_set_,
 133                      Stack<oop, mtGC>* overflow_stacks_,
 134                      PreservedMarks* preserved_marks_,
 135                      size_t desired_plab_sz_,
 136                      ParallelTaskTerminator& term_);
 137 
 138  public:
 139   AgeTable* age_table() {return &_ageTable;}
 140 
 141   ObjToScanQueue* work_queue() { return _work_queue; }
 142 
 143   PreservedMarks* preserved_marks() const { return _preserved_marks; }
 144 
 145   PLAB* to_space_alloc_buffer() {
 146     return &_to_space_alloc_buffer;
 147   }
 148 
 149   ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
 150   DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
 151   ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
 152   ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
 153   ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
 154   ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
 155 
 156   // Decrease queue size below "max_size".
 157   void trim_queues(int max_size);
 158 
 159   // Private overflow stack usage
 160   Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
 161   bool take_from_overflow_stack();
 162   void push_on_overflow_stack(oop p);
 163 
 164   // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
 165   inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
 166 
 167   int  thread_num() { return _thread_num; }
 168 
 169   // Allocate a to-space block of size "sz", or else return NULL.
 170   HeapWord* alloc_in_to_space_slow(size_t word_sz);
 171 
 172   inline HeapWord* alloc_in_to_space(size_t word_sz);
 173 
 174   HeapWord* young_old_boundary() { return _young_old_boundary; }
 175 
 176   void set_young_old_boundary(HeapWord *boundary) {
 177     _young_old_boundary = boundary;
 178   }
 179 
 180   // Undo the most recent allocation ("obj", of "word_sz").
 181   void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
 182 
 183   // Promotion failure stats
 184   void register_promotion_failure(size_t sz) {
 185     _promotion_failed_info.register_copy_failure(sz);
 186   }
 187   PromotionFailedInfo& promotion_failed_info() {
 188     return _promotion_failed_info;
 189   }
 190   bool promotion_failed() {
 191     return _promotion_failed_info.has_failed();
 192   }
 193   void print_promotion_failure_size();
 194 
 195 #if TASKQUEUE_STATS
 196   TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
 197 
 198   size_t term_attempts() const             { return _term_attempts; }
 199   size_t overflow_refills() const          { return _overflow_refills; }
 200   size_t overflow_refill_objs() const      { return _overflow_refill_objs; }
 201 
 202   void note_term_attempt()                 { ++_term_attempts; }
 203   void note_overflow_refill(size_t objs)   {
 204     ++_overflow_refills; _overflow_refill_objs += objs;
 205   }
 206 
 207   void reset_stats();
 208 #endif // TASKQUEUE_STATS
 209 
 210   void start_strong_roots() {
 211     _start_strong_roots = os::elapsedTime();
 212   }
 213   void end_strong_roots() {
 214     _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
 215   }
 216   double strong_roots_time() const { return _strong_roots_time; }
 217   void start_term_time() {
 218     TASKQUEUE_STATS_ONLY(note_term_attempt());
 219     _start_term = os::elapsedTime();
 220   }
 221   void end_term_time() {
 222     _term_time += (os::elapsedTime() - _start_term);
 223   }
 224   double term_time() const { return _term_time; }
 225 
 226   double elapsed_time() const {
 227     return os::elapsedTime() - _start;
 228   }
 229 };
 230 
 231 class ParNewGenTask: public AbstractGangTask {
 232  private:
 233   ParNewGeneration*            _young_gen;
 234   Generation*                  _old_gen;
 235   HeapWord*                    _young_old_boundary;
 236   class ParScanThreadStateSet* _state_set;
 237   StrongRootsScope*            _strong_roots_scope;
 238   OopStorage::ParState<false, false> _par_state_string;
 239 
 240 public:
 241   ParNewGenTask(ParNewGeneration*      young_gen,
 242                 Generation*            old_gen,
 243                 HeapWord*              young_old_boundary,
 244                 ParScanThreadStateSet* state_set,
 245                 StrongRootsScope*      strong_roots_scope);
 246 
 247   HeapWord* young_old_boundary() { return _young_old_boundary; }
 248 
 249   void work(uint worker_id);
 250 };
 251 
 252 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
 253  protected:
 254   template <class T> void do_oop_work(T* p);
 255  public:
 256   KeepAliveClosure(ScanWeakRefClosure* cl);
 257   virtual void do_oop(oop* p);
 258   virtual void do_oop(narrowOop* p);
 259 };
 260 
 261 template <typename OopClosureType1, typename OopClosureType2>
 262 class EvacuateFollowersClosureGeneral: public VoidClosure {
 263  private:
 264   CMSHeap* _heap;
 265   OopClosureType1* _scan_cur_or_nonheap;
 266   OopClosureType2* _scan_older;
 267  public:
 268   EvacuateFollowersClosureGeneral(CMSHeap* heap,
 269                                   OopClosureType1* cur,
 270                                   OopClosureType2* older);
 271   virtual void do_void();
 272 };
 273 
 274 // Closure for scanning ParNewGeneration.
 275 // Same as ScanClosure, except does parallel GC barrier.
 276 class ScanClosureWithParBarrier: public OopsInClassLoaderDataOrGenClosure {
 277  private:
 278   ParNewGeneration* _g;
 279   HeapWord*         _boundary;
 280   bool              _gc_barrier;
 281 
 282   template <class T> void do_oop_work(T* p);
 283 
 284  public:
 285   ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
 286   virtual void do_oop(oop* p);
 287   virtual void do_oop(narrowOop* p);
 288 };
 289 
 290 // Implements AbstractRefProcTaskExecutor for ParNew.
 291 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 292  private:
 293   ParNewGeneration&      _young_gen;
 294   Generation&            _old_gen;
 295   ParScanThreadStateSet& _state_set;
 296  public:
 297   ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
 298                             Generation& old_gen,
 299                             ParScanThreadStateSet& state_set)
 300     : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
 301   { }
 302 
 303   // Executes a task using worker threads.
 304   virtual void execute(ProcessTask& task, uint ergo_workers);
 305   // Switch to single threaded mode.
 306   virtual void set_single_threaded_mode();
 307 };
 308 
 309 
 310 // A Generation that does parallel young-gen collection.
 311 
 312 class ParNewGeneration: public DefNewGeneration {
 313   friend class ParNewGenTask;
 314   friend class ParNewRefProcTask;
 315   friend class ParNewRefProcTaskExecutor;
 316   friend class ParScanThreadStateSet;
 317   friend class ParEvacuateFollowersClosure;
 318 
 319  private:
 320   // The per-worker-thread work queues
 321   ObjToScanQueueSet* _task_queues;
 322 
 323   // Per-worker-thread local overflow stacks
 324   Stack<oop, mtGC>* _overflow_stacks;
 325 
 326   // Desired size of survivor space plab's
 327   PLABStats _plab_stats;
 328 
 329   // A list of from-space images of to-be-scanned objects, threaded through
 330   // klass-pointers (klass information already copied to the forwarded
 331   // image.)  Manipulated with CAS.
 332   oopDesc* volatile _overflow_list;
 333   NOT_PRODUCT(ssize_t _num_par_pushes;)
 334 
 335   // This closure is used by the reference processor to filter out
 336   // references to live referent.
 337   DefNewGeneration::IsAliveClosure _is_alive_closure;
 338 
 339   // GC tracer that should be used during collection.
 340   ParNewTracer _gc_tracer;
 341 
 342   static oop real_forwardee_slow(oop obj);
 343   static void waste_some_time();
 344 
 345   void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set);
 346 
 347  protected:
 348 
 349   void restore_preserved_marks();
 350 
 351  public:
 352   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
 353 
 354   ~ParNewGeneration() {
 355     for (uint i = 0; i < ParallelGCThreads; i++)
 356         delete _task_queues->queue(i);
 357 
 358     delete _task_queues;
 359   }
 360 
 361   virtual void ref_processor_init();
 362   virtual Generation::Name kind()        { return Generation::ParNew; }
 363   virtual const char* name() const;
 364   virtual const char* short_name() const { return "ParNew"; }
 365 
 366   // override
 367   virtual bool refs_discovery_is_mt()     const {
 368     return ParallelGCThreads > 1;
 369   }
 370 
 371   // Make the collection virtual.
 372   virtual void collect(bool   full,
 373                        bool   clear_all_soft_refs,
 374                        size_t size,
 375                        bool   is_tlab);
 376 
 377   // This needs to be visible to the closure function.
 378   // "obj" is the object to be copied, "m" is a recent value of its mark
 379   // that must not contain a forwarding pointer (though one might be
 380   // inserted in "obj"s mark word by a parallel thread).
 381   oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
 382                              oop obj, size_t obj_sz, markOop m);
 383 
 384   // in support of testing overflow code
 385   NOT_PRODUCT(int _overflow_counter;)
 386   NOT_PRODUCT(bool should_simulate_overflow();)
 387 
 388   // Accessor for overflow list
 389   oop overflow_list() { return _overflow_list; }
 390 
 391   // Push the given (from-space) object on the global overflow list.
 392   void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
 393 
 394   // If the global overflow list is non-empty, move some tasks from it
 395   // onto "work_q" (which need not be empty).  No more than 1/4 of the
 396   // available space on "work_q" is used.
 397   bool take_from_overflow_list(ParScanThreadState* par_scan_state);
 398   bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
 399 
 400   // The task queues to be used by parallel GC threads.
 401   ObjToScanQueueSet* task_queues() {
 402     return _task_queues;
 403   }
 404 
 405   PLABStats* plab_stats() {
 406     return &_plab_stats;
 407   }
 408 
 409   size_t desired_plab_sz();
 410 
 411   const ParNewTracer* gc_tracer() const {
 412     return &_gc_tracer;
 413   }
 414 
 415   static oop real_forwardee(oop obj);
 416 };
 417 
 418 #endif // SHARE_VM_GC_CMS_PARNEWGENERATION_HPP