< prev index next >

src/share/vm/gc/cms/parNewGeneration.hpp

Print this page




  77   // in evacuate_followers.
  78   ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
  79   ParEvacuateFollowersClosure          _evacuate_followers;
  80   DefNewGeneration::IsAliveClosure     _is_alive_closure;
  81   ParScanWeakRefClosure                _scan_weak_ref_closure;
  82   ParKeepAliveClosure                  _keep_alive_closure;
  83 
  84   Space* _to_space;
  85   Space* to_space() { return _to_space; }
  86 
  87   ParNewGeneration* _young_gen;
  88   ParNewGeneration* young_gen() const { return _young_gen; }
  89 
  90   Generation* _old_gen;
  91   Generation* old_gen() { return _old_gen; }
  92 
  93   HeapWord *_young_old_boundary;
  94 
  95   int _hash_seed;
  96   int _thread_num;
  97   ageTable _ageTable;
  98 
  99   bool _to_space_full;
 100 
 101 #if TASKQUEUE_STATS
 102   size_t _term_attempts;
 103   size_t _overflow_refills;
 104   size_t _overflow_refill_objs;
 105 #endif // TASKQUEUE_STATS
 106 
 107   // Stats for promotion failure
 108   PromotionFailedInfo _promotion_failed_info;
 109 
 110   // Timing numbers.
 111   double _start;
 112   double _start_strong_roots;
 113   double _strong_roots_time;
 114   double _start_term;
 115   double _term_time;
 116 
 117   // Helper for trim_queues. Scans subset of an array and makes
 118   // remainder available for work stealing.
 119   void scan_partial_array_and_push_remainder(oop obj);
 120 
 121   // In support of CMS' parallel rescan of survivor space.
 122   ChunkArray* _survivor_chunk_array;
 123   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 124 
 125   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 126 
 127   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 128                      Generation* old_gen_, int thread_num_,
 129                      ObjToScanQueueSet* work_queue_set_,
 130                      Stack<oop, mtGC>* overflow_stacks_,
 131                      size_t desired_plab_sz_,
 132                      ParallelTaskTerminator& term_);
 133 
 134  public:
 135   ageTable* age_table() {return &_ageTable;}
 136 
 137   ObjToScanQueue* work_queue() { return _work_queue; }
 138 
 139   PLAB* to_space_alloc_buffer() {
 140     return &_to_space_alloc_buffer;
 141   }
 142 
 143   ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
 144   DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
 145   ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
 146   ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
 147   ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
 148   ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
 149 
 150   // Decrease queue size below "max_size".
 151   void trim_queues(int max_size);
 152 
 153   // Private overflow stack usage
 154   Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
 155   bool take_from_overflow_stack();




  77   // in evacuate_followers.
  78   ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
  79   ParEvacuateFollowersClosure          _evacuate_followers;
  80   DefNewGeneration::IsAliveClosure     _is_alive_closure;
  81   ParScanWeakRefClosure                _scan_weak_ref_closure;
  82   ParKeepAliveClosure                  _keep_alive_closure;
  83 
  84   Space* _to_space;
  85   Space* to_space() { return _to_space; }
  86 
  87   ParNewGeneration* _young_gen;
  88   ParNewGeneration* young_gen() const { return _young_gen; }
  89 
  90   Generation* _old_gen;
  91   Generation* old_gen() { return _old_gen; }
  92 
  93   HeapWord *_young_old_boundary;
  94 
  95   int _hash_seed;
  96   int _thread_num;
  97   AgeTable _ageTable;
  98 
  99   bool _to_space_full;
 100 
 101 #if TASKQUEUE_STATS
 102   size_t _term_attempts;
 103   size_t _overflow_refills;
 104   size_t _overflow_refill_objs;
 105 #endif // TASKQUEUE_STATS
 106 
 107   // Stats for promotion failure
 108   PromotionFailedInfo _promotion_failed_info;
 109 
 110   // Timing numbers.
 111   double _start;
 112   double _start_strong_roots;
 113   double _strong_roots_time;
 114   double _start_term;
 115   double _term_time;
 116 
 117   // Helper for trim_queues. Scans subset of an array and makes
 118   // remainder available for work stealing.
 119   void scan_partial_array_and_push_remainder(oop obj);
 120 
 121   // In support of CMS' parallel rescan of survivor space.
 122   ChunkArray* _survivor_chunk_array;
 123   ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
 124 
 125   void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
 126 
 127   ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
 128                      Generation* old_gen_, int thread_num_,
 129                      ObjToScanQueueSet* work_queue_set_,
 130                      Stack<oop, mtGC>* overflow_stacks_,
 131                      size_t desired_plab_sz_,
 132                      ParallelTaskTerminator& term_);
 133 
 134  public:
 135   AgeTable* age_table() {return &_ageTable;}
 136 
 137   ObjToScanQueue* work_queue() { return _work_queue; }
 138 
 139   PLAB* to_space_alloc_buffer() {
 140     return &_to_space_alloc_buffer;
 141   }
 142 
 143   ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
 144   DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
 145   ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
 146   ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
 147   ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
 148   ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
 149 
 150   // Decrease queue size below "max_size".
 151   void trim_queues(int max_size);
 152 
 153   // Private overflow stack usage
 154   Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
 155   bool take_from_overflow_stack();


< prev index next >