src/share/vm/gc_implementation/parNew/parNewGeneration.hpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
  27 

  28 #include "gc_implementation/shared/parGCAllocBuffer.hpp"

  29 #include "memory/defNewGeneration.hpp"
  30 #include "utilities/taskqueue.hpp"
  31 
  32 class ChunkArray;
  33 class ParScanWithoutBarrierClosure;
  34 class ParScanWithBarrierClosure;
  35 class ParRootScanWithoutBarrierClosure;
  36 class ParRootScanWithBarrierTwoGensClosure;
  37 class ParEvacuateFollowersClosure;
  38 
  39 // It would be better if these types could be kept local to the .cpp file,
  40 // but they must be here to allow ParScanClosure::do_oop_work to be defined
  41 // in genOopClosures.inline.hpp.
  42 
  43 typedef Padded<OopTaskQueue> ObjToScanQueue;
  44 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
  45 
  46 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
  47  private:
  48   ParScanWeakRefClosure* _par_cl;


  88   ParNewGeneration* young_gen() const { return _young_gen; }
  89 
  90   Generation* _old_gen;
  91   Generation* old_gen() { return _old_gen; }
  92 
  93   HeapWord *_young_old_boundary;
  94 
  95   int _hash_seed;
  96   int _thread_num;
  97   ageTable _ageTable;
  98 
  99   bool _to_space_full;
 100 
 101 #if TASKQUEUE_STATS
 102   size_t _term_attempts;
 103   size_t _overflow_refills;
 104   size_t _overflow_refill_objs;
 105 #endif // TASKQUEUE_STATS
 106 
 107   // Stats for promotion failure
 108   size_t _promotion_failure_size;
 109 
 110   // Timing numbers.
 111   double _start;
 112   double _start_strong_roots;
 113   double _strong_roots_time;
 114   double _start_term;
 115   double _term_time;
 116 
 117   // Helper for trim_queues. Scans subset of an array and makes
 118   // remainder available for work stealing.
 119   void scan_partial_array_and_push_remainder(oop obj);
 120 
 121   // In support of CMS' parallel rescan of survivor space.
 122   ChunkArray* _survivor_chunk_array;
 123   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 124 
 125   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 126 
 127   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 128                      Generation* old_gen_, int thread_num_,


 163 
 164   // Allocate a to-space block of size "sz", or else return NULL.
 165   HeapWord* alloc_in_to_space_slow(size_t word_sz);
 166 
 167   HeapWord* alloc_in_to_space(size_t word_sz) {
 168     HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
 169     if (obj != NULL) return obj;
 170     else return alloc_in_to_space_slow(word_sz);
 171   }
 172 
 173   HeapWord* young_old_boundary() { return _young_old_boundary; }
 174 
 175   void set_young_old_boundary(HeapWord *boundary) {
 176     _young_old_boundary = boundary;
 177   }
 178 
 179   // Undo the most recent allocation ("obj", of "word_sz").
 180   void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
 181 
 182   // Promotion failure stats
 183   size_t promotion_failure_size() { return promotion_failure_size(); }
 184   void log_promotion_failure(size_t sz) {
 185     if (_promotion_failure_size == 0) {
 186       _promotion_failure_size = sz;
 187     }


 188   }
 189   void print_and_clear_promotion_failure_size();



 190 
 191 #if TASKQUEUE_STATS
 192   TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
 193 
 194   size_t term_attempts() const             { return _term_attempts; }
 195   size_t overflow_refills() const          { return _overflow_refills; }
 196   size_t overflow_refill_objs() const      { return _overflow_refill_objs; }
 197 
 198   void note_term_attempt()                 { ++_term_attempts; }
 199   void note_overflow_refill(size_t objs)   {
 200     ++_overflow_refills; _overflow_refill_objs += objs;
 201   }
 202 
 203   void reset_stats();
 204 #endif // TASKQUEUE_STATS
 205 
 206   void start_strong_roots() {
 207     _start_strong_roots = os::elapsedTime();
 208   }
 209   void end_strong_roots() {


 320   // A list of from-space images of to-be-scanned objects, threaded through
 321   // klass-pointers (klass information already copied to the forwarded
 322   // image.)  Manipulated with CAS.
 323   oop _overflow_list;
 324   NOT_PRODUCT(ssize_t _num_par_pushes;)
 325 
 326   // If true, older generation does not support promotion undo, so avoid.
 327   static bool _avoid_promotion_undo;
 328 
 329   // This closure is used by the reference processor to filter out
 330   // references to live referent.
 331   DefNewGeneration::IsAliveClosure _is_alive_closure;
 332 
 333   static oop real_forwardee_slow(oop obj);
 334   static void waste_some_time();
 335 
 336   // Preserve the mark of "obj", if necessary, in preparation for its mark
 337   // word being overwritten with a self-forwarding-pointer.
 338   void preserve_mark_if_necessary(oop obj, markOop m);
 339 


 340  protected:
 341 
 342   bool _survivor_overflow;
 343 
 344   bool avoid_promotion_undo() { return _avoid_promotion_undo; }
 345   void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
 346 
 347   bool survivor_overflow() { return _survivor_overflow; }
 348   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 349 
 350  public:
 351   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
 352 
 353   ~ParNewGeneration() {
 354     for (uint i = 0; i < ParallelGCThreads; i++)
 355         delete _task_queues->queue(i);
 356 
 357     delete _task_queues;
 358   }
 359 




   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
  27 
  28 #include "gc_implementation/shared/gcTrace.hpp"
  29 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  30 #include "gc_implementation/shared/copyFailedInfo.hpp"
  31 #include "memory/defNewGeneration.hpp"
  32 #include "utilities/taskqueue.hpp"
  33 
  34 class ChunkArray;
  35 class ParScanWithoutBarrierClosure;
  36 class ParScanWithBarrierClosure;
  37 class ParRootScanWithoutBarrierClosure;
  38 class ParRootScanWithBarrierTwoGensClosure;
  39 class ParEvacuateFollowersClosure;
  40 
  41 // It would be better if these types could be kept local to the .cpp file,
  42 // but they must be here to allow ParScanClosure::do_oop_work to be defined
  43 // in genOopClosures.inline.hpp.
  44 
  45 typedef Padded<OopTaskQueue> ObjToScanQueue;
  46 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
  47 
  48 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
  49  private:
  50   ParScanWeakRefClosure* _par_cl;


  90   ParNewGeneration* young_gen() const { return _young_gen; }
  91 
  92   Generation* _old_gen;
  93   Generation* old_gen() { return _old_gen; }
  94 
  95   HeapWord *_young_old_boundary;
  96 
  97   int _hash_seed;
  98   int _thread_num;
  99   ageTable _ageTable;
 100 
 101   bool _to_space_full;
 102 
 103 #if TASKQUEUE_STATS
 104   size_t _term_attempts;
 105   size_t _overflow_refills;
 106   size_t _overflow_refill_objs;
 107 #endif // TASKQUEUE_STATS
 108 
 109   // Stats for promotion failure
 110   PromotionFailedInfo _promotion_failed_info;
 111 
 112   // Timing numbers.
 113   double _start;
 114   double _start_strong_roots;
 115   double _strong_roots_time;
 116   double _start_term;
 117   double _term_time;
 118 
 119   // Helper for trim_queues. Scans subset of an array and makes
 120   // remainder available for work stealing.
 121   void scan_partial_array_and_push_remainder(oop obj);
 122 
 123   // In support of CMS' parallel rescan of survivor space.
 124   ChunkArray* _survivor_chunk_array;
 125   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 126 
 127   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 128 
 129   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 130                      Generation* old_gen_, int thread_num_,


 165 
 166   // Allocate a to-space block of size "sz", or else return NULL.
 167   HeapWord* alloc_in_to_space_slow(size_t word_sz);
 168 
 169   HeapWord* alloc_in_to_space(size_t word_sz) {
 170     HeapWord* obj = to_space_alloc_buffer()->allocate(word_sz);
 171     if (obj != NULL) return obj;
 172     else return alloc_in_to_space_slow(word_sz);
 173   }
 174 
 175   HeapWord* young_old_boundary() { return _young_old_boundary; }
 176 
 177   void set_young_old_boundary(HeapWord *boundary) {
 178     _young_old_boundary = boundary;
 179   }
 180 
 181   // Undo the most recent allocation ("obj", of "word_sz").
 182   void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
 183 
 184   // Promotion failure stats
 185   void register_promotion_failure(size_t sz) {
 186     _promotion_failed_info.register_copy_failure(sz);


 187   }
 188   PromotionFailedInfo& promotion_failed_info() {
 189     return _promotion_failed_info;
 190   }
 191   bool promotion_failed() {
 192     return _promotion_failed_info.has_failed();
 193   }
 194   void print_promotion_failure_size();
 195 
 196 #if TASKQUEUE_STATS
 197   TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
 198 
 199   size_t term_attempts() const             { return _term_attempts; }
 200   size_t overflow_refills() const          { return _overflow_refills; }
 201   size_t overflow_refill_objs() const      { return _overflow_refill_objs; }
 202 
 203   void note_term_attempt()                 { ++_term_attempts; }
 204   void note_overflow_refill(size_t objs)   {
 205     ++_overflow_refills; _overflow_refill_objs += objs;
 206   }
 207 
 208   void reset_stats();
 209 #endif // TASKQUEUE_STATS
 210 
 211   void start_strong_roots() {
 212     _start_strong_roots = os::elapsedTime();
 213   }
 214   void end_strong_roots() {


 325   // A list of from-space images of to-be-scanned objects, threaded through
 326   // klass-pointers (klass information already copied to the forwarded
 327   // image.)  Manipulated with CAS.
 328   oop _overflow_list;
 329   NOT_PRODUCT(ssize_t _num_par_pushes;)
 330 
 331   // If true, older generation does not support promotion undo, so avoid.
 332   static bool _avoid_promotion_undo;
 333 
 334   // This closure is used by the reference processor to filter out
 335   // references to live referent.
 336   DefNewGeneration::IsAliveClosure _is_alive_closure;
 337 
 338   static oop real_forwardee_slow(oop obj);
 339   static void waste_some_time();
 340 
 341   // Preserve the mark of "obj", if necessary, in preparation for its mark
 342   // word being overwritten with a self-forwarding-pointer.
 343   void preserve_mark_if_necessary(oop obj, markOop m);
 344 
 345   void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
 346 
 347  protected:
 348 
 349   bool _survivor_overflow;
 350 
 351   bool avoid_promotion_undo() { return _avoid_promotion_undo; }
 352   void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
 353 
 354   bool survivor_overflow() { return _survivor_overflow; }
 355   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 356 
 357  public:
 358   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level);
 359 
 360   ~ParNewGeneration() {
 361     for (uint i = 0; i < ParallelGCThreads; i++)
 362         delete _task_queues->queue(i);
 363 
 364     delete _task_queues;
 365   }
 366