1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/gcTrace.hpp"
  29 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  30 #include "gc_implementation/shared/copyFailedInfo.hpp"
  31 #include "memory/defNewGeneration.hpp"
  32 #include "memory/padded.hpp"
  33 #include "utilities/taskqueue.hpp"
  34 
  35 class ChunkArray;
  36 class ParScanWithoutBarrierClosure;
  37 class ParScanWithBarrierClosure;
  38 class ParRootScanWithoutBarrierClosure;
  39 class ParRootScanWithBarrierTwoGensClosure;
  40 class ParEvacuateFollowersClosure;
  41 
  42 // It would be better if these types could be kept local to the .cpp file,
  43 // but they must be here to allow ParScanClosure::do_oop_work to be defined
  44 // in genOopClosures.inline.hpp.
  45 
  46 typedef Padded<OopTaskQueue> ObjToScanQueue;
  47 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
  48 
  49 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
  50  private:
  51   ParScanWeakRefClosure* _par_cl;
  52  protected:
  53   template <class T> void do_oop_work(T* p);
  54  public:
  55   ParKeepAliveClosure(ParScanWeakRefClosure* cl);
  56   virtual void do_oop(oop* p);
  57   virtual void do_oop(narrowOop* p);
  58 };
  59 
  60 // The state needed by thread performing parallel young-gen collection.
  61 class ParScanThreadState {
  62   friend class ParScanThreadStateSet;
  63  private:
  64   ObjToScanQueue *_work_queue;
  65   Stack<oop, mtGC>* const _overflow_stack;
  66 
  67   ParGCAllocBuffer _to_space_alloc_buffer;
  68 
  69   ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
  70   ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
  71   ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
  72   // One of these two will be passed to process_roots, which will
  73   // set its generation.  The first is for two-gen configs where the
  74   // old gen collects the perm gen; the second is for arbitrary configs.
  75   // The second isn't used right now (it used to be used for the train, an
  76   // incremental collector) but the declaration has been left as a reminder.
  77   ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
  78   // This closure will always be bound to the old gen; it will be used
  79   // in evacuate_followers.
  80   ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
  81   ParEvacuateFollowersClosure          _evacuate_followers;
  82   DefNewGeneration::IsAliveClosure     _is_alive_closure;
  83   ParScanWeakRefClosure                _scan_weak_ref_closure;
  84   ParKeepAliveClosure                  _keep_alive_closure;
  85 
  86 
  87   Space* _to_space;
  88   Space* to_space() { return _to_space; }
  89 
  90   ParNewGeneration* _young_gen;
  91   ParNewGeneration* young_gen() const { return _young_gen; }
  92 
  93   Generation* _old_gen;
  94   Generation* old_gen() { return _old_gen; }
  95 
  96   HeapWord *_young_old_boundary;
  97 
  98   ParNewTracer* _gc_tracer;
  99   
 100   int _hash_seed;
 101   int _thread_num;
 102   ageTable _ageTable;
 103 
 104   bool _to_space_full;
 105 
 106 #if TASKQUEUE_STATS
 107   size_t _term_attempts;
 108   size_t _overflow_refills;
 109   size_t _overflow_refill_objs;
 110 #endif // TASKQUEUE_STATS
 111 
 112   // Stats for promotion failure
 113   PromotionFailedInfo _promotion_failed_info;
 114 
 115   // Timing numbers.
 116   double _start;
 117   double _start_strong_roots;
 118   double _strong_roots_time;
 119   double _start_term;
 120   double _term_time;
 121 
 122   // Helper for trim_queues. Scans subset of an array and makes
 123   // remainder available for work stealing.
 124   void scan_partial_array_and_push_remainder(oop obj);
 125 
 126   // In support of CMS' parallel rescan of survivor space.
 127   ChunkArray* _survivor_chunk_array;
 128   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 129 
 130   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 131 
 132   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 133                      Generation* old_gen_, int thread_num_,
 134                      ObjToScanQueueSet* work_queue_set_,
 135                      Stack<oop, mtGC>* overflow_stacks_,
 136                      size_t desired_plab_sz_,
 137                      ParNewTracer* gc_tracer,
 138                      ParallelTaskTerminator& term_);
 139 
 140  public:
 141   ageTable* age_table() {return &_ageTable;}
 142 
 143   ObjToScanQueue* work_queue() { return _work_queue; }
 144 
 145   ParGCAllocBuffer* to_space_alloc_buffer() {
 146     return &_to_space_alloc_buffer;
 147   }
 148 
 149   ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
 150   DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
 151   ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
 152   ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
 153   ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
 154   ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
 155 
 156   // Decrease queue size below "max_size".
 157   void trim_queues(int max_size);
 158 
 159   // Private overflow stack usage
 160   Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
 161   bool take_from_overflow_stack();
 162   void push_on_overflow_stack(oop p);
 163 
 164   // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
 165   inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
 166 
 167   ParNewTracer* const gc_tracer() { return _gc_tracer; }
 168   
 169   int* hash_seed()  { return &_hash_seed; }
 170   int  thread_num() { return _thread_num; }
 171 
 172   // Allocate a to-space block of size "sz", or else return NULL.
 173   // The oop (old) is used to extract information for promotion trace event
 174   HeapWord* alloc_in_to_space_slow(size_t word_sz, const oop old, const uint age);
 175 
 176   HeapWord* alloc_in_to_space(size_t word_sz, const oop old, const uint age) {
 177     HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
 178     if (obj != NULL) {
 179       return obj;
 180     } else {
 181       return alloc_in_to_space_slow(word_sz, old, age);
 182     }
 183   }
 184 
 185   HeapWord* young_old_boundary() { return _young_old_boundary; }
 186 
 187   void set_young_old_boundary(HeapWord *boundary) {
 188     _young_old_boundary = boundary;
 189   }
 190 
 191   // Undo the most recent allocation ("obj", of "word_sz").
 192   void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
 193 
 194   // Promotion failure stats
 195   void register_promotion_failure(size_t sz) {
 196     _promotion_failed_info.register_copy_failure(sz);
 197   }
 198   PromotionFailedInfo& promotion_failed_info() {
 199     return _promotion_failed_info;
 200   }
 201   bool promotion_failed() {
 202     return _promotion_failed_info.has_failed();
 203   }
 204   void print_promotion_failure_size();
 205 
 206 #if TASKQUEUE_STATS
 207   TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
 208 
 209   size_t term_attempts() const             { return _term_attempts; }
 210   size_t overflow_refills() const          { return _overflow_refills; }
 211   size_t overflow_refill_objs() const      { return _overflow_refill_objs; }
 212 
 213   void note_term_attempt()                 { ++_term_attempts; }
 214   void note_overflow_refill(size_t objs)   {
 215     ++_overflow_refills; _overflow_refill_objs += objs;
 216   }
 217 
 218   void reset_stats();
 219 #endif // TASKQUEUE_STATS
 220 
 221   void start_strong_roots() {
 222     _start_strong_roots = os::elapsedTime();
 223   }
 224   void end_strong_roots() {
 225     _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
 226   }
 227   double strong_roots_time() const { return _strong_roots_time; }
 228   void start_term_time() {
 229     TASKQUEUE_STATS_ONLY(note_term_attempt());
 230     _start_term = os::elapsedTime();
 231   }
 232   void end_term_time() {
 233     _term_time += (os::elapsedTime() - _start_term);
 234   }
 235   double term_time() const { return _term_time; }
 236 
 237   double elapsed_time() const {
 238     return os::elapsedTime() - _start;
 239   }
 240 };
 241 
 242 class ParNewGenTask: public AbstractGangTask {
 243  private:
 244   ParNewGeneration*            _gen;
 245   Generation*                  _next_gen;
 246   HeapWord*                    _young_old_boundary;
 247   class ParScanThreadStateSet* _state_set;
 248 
 249 public:
 250   ParNewGenTask(ParNewGeneration*      gen,
 251                 Generation*            next_gen,
 252                 HeapWord*              young_old_boundary,
 253                 ParScanThreadStateSet* state_set);
 254 
 255   HeapWord* young_old_boundary() { return _young_old_boundary; }
 256 
 257   void work(uint worker_id);
 258 
 259   // Reset the terminator in ParScanThreadStateSet for
 260   // "active_workers" threads.
 261   virtual void set_for_termination(int active_workers);
 262 };
 263 
 264 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
 265  protected:
 266   template <class T> void do_oop_work(T* p);
 267  public:
 268   KeepAliveClosure(ScanWeakRefClosure* cl);
 269   virtual void do_oop(oop* p);
 270   virtual void do_oop(narrowOop* p);
 271 };
 272 
 273 class EvacuateFollowersClosureGeneral: public VoidClosure {
 274  private:
 275   GenCollectedHeap* _gch;
 276   int               _level;
 277   OopsInGenClosure* _scan_cur_or_nonheap;
 278   OopsInGenClosure* _scan_older;
 279  public:
 280   EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
 281                                   OopsInGenClosure* cur,
 282                                   OopsInGenClosure* older);
 283   virtual void do_void();
 284 };
 285 
 286 // Closure for scanning ParNewGeneration.
 287 // Same as ScanClosure, except does parallel GC barrier.
 288 class ScanClosureWithParBarrier: public ScanClosure {
 289  protected:
 290   template <class T> void do_oop_work(T* p);
 291  public:
 292   ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
 293   virtual void do_oop(oop* p);
 294   virtual void do_oop(narrowOop* p);
 295 };
 296 
 297 // Implements AbstractRefProcTaskExecutor for ParNew.
 298 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
 299  private:
 300   ParNewGeneration&      _generation;
 301   ParScanThreadStateSet& _state_set;
 302  public:
 303   ParNewRefProcTaskExecutor(ParNewGeneration& generation,
 304                             ParScanThreadStateSet& state_set)
 305     : _generation(generation), _state_set(state_set)
 306   { }
 307 
 308   // Executes a task using worker threads.
 309   virtual void execute(ProcessTask& task);
 310   virtual void execute(EnqueueTask& task);
 311   // Switch to single threaded mode.
 312   virtual void set_single_threaded_mode();
 313 };
 314 
 315 
 316 // A Generation that does parallel young-gen collection.
 317 
 318 class ParNewGeneration: public DefNewGeneration {
 319   friend class ParNewGenTask;
 320   friend class ParNewRefProcTask;
 321   friend class ParNewRefProcTaskExecutor;
 322   friend class ParScanThreadStateSet;
 323   friend class ParEvacuateFollowersClosure;
 324 
 325  private:
 326   // The per-worker-thread work queues
 327   ObjToScanQueueSet* _task_queues;
 328 
 329   // Per-worker-thread local overflow stacks
 330   Stack<oop, mtGC>* _overflow_stacks;
 331 
 332   // Desired size of survivor space plab's
 333   PLABStats _plab_stats;
 334 
 335   // A list of from-space images of to-be-scanned objects, threaded through
 336   // klass-pointers (klass information already copied to the forwarded
 337   // image.)  Manipulated with CAS.
 338   oop _overflow_list;
 339   NOT_PRODUCT(ssize_t _num_par_pushes;)
 340 
 341   // If true, older generation does not support promotion undo, so avoid.
 342   static bool _avoid_promotion_undo;
 343 
 344   // This closure is used by the reference processor to filter out
 345   // references to live referent.
 346   DefNewGeneration::IsAliveClosure _is_alive_closure;
 347 
 348   static oop real_forwardee_slow(oop obj);
 349   static void waste_some_time();
 350 
 351   // Preserve the mark of "obj", if necessary, in preparation for its mark
 352   // word being overwritten with a self-forwarding-pointer.
 353   void preserve_mark_if_necessary(oop obj, markOop m);
 354 
 355   void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
 356 
 357  protected:
 358 
 359   bool _survivor_overflow;
 360 
 361   bool avoid_promotion_undo() { return _avoid_promotion_undo; }
 362   void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
 363 
 364   bool survivor_overflow() { return _survivor_overflow; }
 365   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 366 
 367  public:
 368   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
 369 
 370   ~ParNewGeneration() {
 371     for (uint i = 0; i < ParallelGCThreads; i++)
 372         delete _task_queues->queue(i);
 373 
 374     delete _task_queues;
 375   }
 376 
 377   virtual void ref_processor_init();
 378   virtual Generation::Name kind()        { return Generation::ParNew; }
 379   virtual const char* name() const;
 380   virtual const char* short_name() const { return "ParNew"; }
 381 
 382   // override
 383   virtual bool refs_discovery_is_mt()     const {
 384     assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
 385     return ParallelGCThreads > 1;
 386   }
 387 
 388   // Make the collection virtual.
 389   virtual void collect(bool   full,
 390                        bool   clear_all_soft_refs,
 391                        size_t size,
 392                        bool   is_tlab);
 393 
 394   // This needs to be visible to the closure function.
 395   // "obj" is the object to be copied, "m" is a recent value of its mark
 396   // that must not contain a forwarding pointer (though one might be
 397   // inserted in "obj"s mark word by a parallel thread).
 398   inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
 399                              oop obj, size_t obj_sz, markOop m) {
 400     if (_avoid_promotion_undo) {
 401        return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
 402                                                              obj, obj_sz, m);
 403     }
 404 
 405     return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
 406   }
 407 
 408   oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
 409                              oop obj, size_t obj_sz, markOop m);
 410 
 411   oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
 412                              oop obj, size_t obj_sz, markOop m);
 413 
 414   // in support of testing overflow code
 415   NOT_PRODUCT(int _overflow_counter;)
 416   NOT_PRODUCT(bool should_simulate_overflow();)
 417 
 418   // Accessor for overflow list
 419   oop overflow_list() { return _overflow_list; }
 420 
 421   // Push the given (from-space) object on the global overflow list.
 422   void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
 423 
 424   // If the global overflow list is non-empty, move some tasks from it
 425   // onto "work_q" (which need not be empty).  No more than 1/4 of the
 426   // available space on "work_q" is used.
 427   bool take_from_overflow_list(ParScanThreadState* par_scan_state);
 428   bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
 429 
 430   // The task queues to be used by parallel GC threads.
 431   ObjToScanQueueSet* task_queues() {
 432     return _task_queues;
 433   }
 434 
 435   PLABStats* plab_stats() {
 436     return &_plab_stats;
 437   }
 438 
 439   size_t desired_plab_sz() {
 440     return _plab_stats.desired_plab_sz();
 441   }
 442 
 443   static oop real_forwardee(oop obj);
 444 
 445   DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
 446 };
 447 
 448 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP