src/share/vm/gc_implementation/parNew/parNewGeneration.hpp

Print this page




  78   // This closure will always be bound to the old gen; it will be used
  79   // in evacuate_followers.
  80   ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
  81   ParEvacuateFollowersClosure          _evacuate_followers;
  82   DefNewGeneration::IsAliveClosure     _is_alive_closure;
  83   ParScanWeakRefClosure                _scan_weak_ref_closure;
  84   ParKeepAliveClosure                  _keep_alive_closure;
  85 
  86 
  87   Space* _to_space;
  88   Space* to_space() { return _to_space; }
  89 
  90   ParNewGeneration* _young_gen;
  91   ParNewGeneration* young_gen() const { return _young_gen; }
  92 
  93   Generation* _old_gen;
  94   Generation* old_gen() { return _old_gen; }
  95 
  96   HeapWord *_young_old_boundary;
  97 


  98   int _hash_seed;
  99   int _thread_num;
 100   ageTable _ageTable;
 101 
 102   bool _to_space_full;
 103 
 104 #if TASKQUEUE_STATS
 105   size_t _term_attempts;
 106   size_t _overflow_refills;
 107   size_t _overflow_refill_objs;
 108 #endif // TASKQUEUE_STATS
 109 
 110   // Stats for promotion failure
 111   PromotionFailedInfo _promotion_failed_info;
 112 
 113   // Timing numbers.
 114   double _start;
 115   double _start_strong_roots;
 116   double _strong_roots_time;
 117   double _start_term;
 118   double _term_time;
 119 
 120   // Helper for trim_queues. Scans subset of an array and makes
 121   // remainder available for work stealing.
 122   void scan_partial_array_and_push_remainder(oop obj);
 123 
 124   // In support of CMS' parallel rescan of survivor space.
 125   ChunkArray* _survivor_chunk_array;
 126   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 127 
 128   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 129 
 130   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 131                      Generation* old_gen_, int thread_num_,
 132                      ObjToScanQueueSet* work_queue_set_,
 133                      Stack<oop, mtGC>* overflow_stacks_,
 134                      size_t desired_plab_sz_,

 135                      ParallelTaskTerminator& term_);
 136 
 137  public:
 138   ageTable* age_table() {return &_ageTable;}
 139 
 140   ObjToScanQueue* work_queue() { return _work_queue; }
 141 
 142   ParGCAllocBuffer* to_space_alloc_buffer() {
 143     return &_to_space_alloc_buffer;
 144   }
 145 
 146   ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
 147   DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
 148   ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
 149   ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
 150   ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
 151   ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
 152 
 153   // Decrease queue size below "max_size".
 154   void trim_queues(int max_size);
 155 
 156   // Private overflow stack usage
 157   Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
 158   bool take_from_overflow_stack();
 159   void push_on_overflow_stack(oop p);
 160 
 161   // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
 162   inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
 163 


 164   int* hash_seed()  { return &_hash_seed; }
 165   int  thread_num() { return _thread_num; }
 166 
 167   // Allocate a to-space block of size "sz", or else return NULL.
 168   HeapWord* alloc_in_to_space_slow(size_t word_sz);

 169 
 170   HeapWord* alloc_in_to_space(size_t word_sz) {
 171     HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
 172     if (obj != NULL) return obj;
 173     else return alloc_in_to_space_slow(word_sz);



 174   }
 175 
 176   HeapWord* young_old_boundary() { return _young_old_boundary; }
 177 
 178   void set_young_old_boundary(HeapWord *boundary) {
 179     _young_old_boundary = boundary;
 180   }
 181 
 182   // Undo the most recent allocation ("obj", of "word_sz").
 183   void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
 184 
 185   // Promotion failure stats
 186   void register_promotion_failure(size_t sz) {
 187     _promotion_failed_info.register_copy_failure(sz);
 188   }
 189   PromotionFailedInfo& promotion_failed_info() {
 190     return _promotion_failed_info;
 191   }
 192   bool promotion_failed() {
 193     return _promotion_failed_info.has_failed();




  78   // This closure will always be bound to the old gen; it will be used
  79   // in evacuate_followers.
  80   ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
  81   ParEvacuateFollowersClosure          _evacuate_followers;
  82   DefNewGeneration::IsAliveClosure     _is_alive_closure;
  83   ParScanWeakRefClosure                _scan_weak_ref_closure;
  84   ParKeepAliveClosure                  _keep_alive_closure;
  85 
  86 
  87   Space* _to_space;
  88   Space* to_space() { return _to_space; }
  89 
  90   ParNewGeneration* _young_gen;
  91   ParNewGeneration* young_gen() const { return _young_gen; }
  92 
  93   Generation* _old_gen;
  94   Generation* old_gen() { return _old_gen; }
  95 
  96   HeapWord *_young_old_boundary;
  97 
  98   ParNewTracer* _gc_tracer;
  99   
 100   int _hash_seed;
 101   int _thread_num;
 102   ageTable _ageTable;
 103 
 104   bool _to_space_full;
 105 
 106 #if TASKQUEUE_STATS
 107   size_t _term_attempts;
 108   size_t _overflow_refills;
 109   size_t _overflow_refill_objs;
 110 #endif // TASKQUEUE_STATS
 111 
 112   // Stats for promotion failure
 113   PromotionFailedInfo _promotion_failed_info;
 114 
 115   // Timing numbers.
 116   double _start;
 117   double _start_strong_roots;
 118   double _strong_roots_time;
 119   double _start_term;
 120   double _term_time;
 121 
 122   // Helper for trim_queues. Scans subset of an array and makes
 123   // remainder available for work stealing.
 124   void scan_partial_array_and_push_remainder(oop obj);
 125 
 126   // In support of CMS' parallel rescan of survivor space.
 127   ChunkArray* _survivor_chunk_array;
 128   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 129 
 130   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 131 
 132   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 133                      Generation* old_gen_, int thread_num_,
 134                      ObjToScanQueueSet* work_queue_set_,
 135                      Stack<oop, mtGC>* overflow_stacks_,
 136                      size_t desired_plab_sz_,
 137                      ParNewTracer* gc_tracer,
 138                      ParallelTaskTerminator& term_);
 139 
 140  public:
 141   ageTable* age_table() {return &_ageTable;}
 142 
 143   ObjToScanQueue* work_queue() { return _work_queue; }
 144 
 145   ParGCAllocBuffer* to_space_alloc_buffer() {
 146     return &_to_space_alloc_buffer;
 147   }
 148 
 149   ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
 150   DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
 151   ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
 152   ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
 153   ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
 154   ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
 155 
 156   // Decrease queue size below "max_size".
 157   void trim_queues(int max_size);
 158 
 159   // Private overflow stack usage
 160   Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
 161   bool take_from_overflow_stack();
 162   void push_on_overflow_stack(oop p);
 163 
 164   // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
 165   inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
 166 
 167   ParNewTracer* const gc_tracer() { return _gc_tracer; }
 168   
 169   int* hash_seed()  { return &_hash_seed; }
 170   int  thread_num() { return _thread_num; }
 171 
 172   // Allocate a to-space block of size "sz", or else return NULL.
 173   // The oop (old) is used to extract information for promotion trace event
 174   HeapWord* alloc_in_to_space_slow(size_t word_sz, const oop old, const uint age);
 175 
 176   HeapWord* alloc_in_to_space(size_t word_sz, const oop old, const uint age) {
 177     HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
 178     if (obj != NULL) {
 179       return obj;
 180     } else {
 181       return alloc_in_to_space_slow(word_sz, old, age);
 182     }
 183   }
 184 
 185   HeapWord* young_old_boundary() { return _young_old_boundary; }
 186 
 187   void set_young_old_boundary(HeapWord *boundary) {
 188     _young_old_boundary = boundary;
 189   }
 190 
 191   // Undo the most recent allocation ("obj", of "word_sz").
 192   void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
 193 
 194   // Promotion failure stats
 195   void register_promotion_failure(size_t sz) {
 196     _promotion_failed_info.register_copy_failure(sz);
 197   }
 198   PromotionFailedInfo& promotion_failed_info() {
 199     return _promotion_failed_info;
 200   }
 201   bool promotion_failed() {
 202     return _promotion_failed_info.has_failed();