src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp

Print this page




  67   double _start_strong_roots;
  68   double _strong_roots_time;
  69   double _start_term;
  70   double _term_time;
  71 
  72   // Map from young-age-index (0 == not young, 1 is youngest) to
  73   // surviving words. base is what we get back from the malloc call
  74   size_t* _surviving_young_words_base;
  75   // this points into the array, as we use the first few entries for padding
  76   size_t* _surviving_young_words;
  77 
  78 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
  79 
  80   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
  81 
  82   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
  83 
  84   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
  85   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
  86 
  87   template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
  88 
  89   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
  90     // If the new value of the field points to the same region or
  91     // is the to-space, we don't need to include it in the Rset updates.
  92     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
  93       size_t card_index = ctbs()->index_for(p);
  94       // If the card hasn't been added to the buffer, do it.
  95       if (ctbs()->mark_card_deferred(card_index)) {
  96         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
  97       }
  98     }
  99   }
 100 
 101  public:
 102   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
 103   ~G1ParScanThreadState();
 104 
 105   ageTable*         age_table()       { return &_age_table;       }
 106 
 107   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
 108     return _alloc_buffers[purpose];
 109   }
 110 
 111   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
 112   size_t undo_waste() const                      { return _undo_waste; }
 113 
 114 #ifdef ASSERT
 115   bool queue_is_empty() const { return _refs->is_empty(); }
 116 
 117   bool verify_ref(narrowOop* ref) const;
 118   bool verify_ref(oop* ref) const;
 119   bool verify_task(StarTask ref) const;
 120 #endif // ASSERT
 121 
 122   template <class T> void push_on_queue(T* ref) {
 123     assert(verify_ref(ref), "sanity");
 124     _refs->push(ref);
 125   }
 126 
 127   template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
 128 
 129  private:
 130 
 131   inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
 132   inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
 133   inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
 134 
 135  public:
 136 
 137   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
 138     _evac_failure_cl = evac_failure_cl;
 139   }
 140 
 141   OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
 142 
 143   int* hash_seed() { return &_hash_seed; }
 144   uint queue_num() { return _queue_num; }
 145 
 146   size_t term_attempts() const  { return _term_attempts; }
 147   void note_term_attempt() { _term_attempts++; }




  67   double _start_strong_roots;
  68   double _strong_roots_time;
  69   double _start_term;
  70   double _term_time;
  71 
  72   // Map from young-age-index (0 == not young, 1 is youngest) to
  73   // surviving words. base is what we get back from the malloc call
  74   size_t* _surviving_young_words_base;
  75   // this points into the array, as we use the first few entries for padding
  76   size_t* _surviving_young_words;
  77 
  78 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
  79 
  80   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
  81 
  82   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
  83 
  84   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
  85   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
  86 
  87   template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, uint tid);
  88 
  89   template <class T> void deferred_rs_update(HeapRegion* from, T* p, uint tid) {
  90     // If the new value of the field points to the same region or
  91     // is the to-space, we don't need to include it in the Rset updates.
  92     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
  93       size_t card_index = ctbs()->index_for(p);
  94       // If the card hasn't been added to the buffer, do it.
  95       if (ctbs()->mark_card_deferred(card_index)) {
  96         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
  97       }
  98     }
  99   }
 100 
 101  public:
 102   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
 103   ~G1ParScanThreadState();
 104 
 105   ageTable*         age_table()       { return &_age_table;       }
 106 
 107   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
 108     return _alloc_buffers[purpose];
 109   }
 110 
 111   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
 112   size_t undo_waste() const                      { return _undo_waste; }
 113 
 114 #ifdef ASSERT
 115   bool queue_is_empty() const { return _refs->is_empty(); }
 116 
 117   bool verify_ref(narrowOop* ref) const;
 118   bool verify_ref(oop* ref) const;
 119   bool verify_task(StarTask ref) const;
 120 #endif // ASSERT
 121 
 122   template <class T> void push_on_queue(T* ref) {
 123     assert(verify_ref(ref), "sanity");
 124     _refs->push(ref);
 125   }
 126 
 127   template <class T> inline void update_rs(HeapRegion* from, T* p, uint tid);
 128 
 129  private:
 130 
 131   inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
 132   inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
 133   inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
 134 
 135  public:
 136 
 137   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
 138     _evac_failure_cl = evac_failure_cl;
 139   }
 140 
 141   OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
 142 
 143   int* hash_seed() { return &_hash_seed; }
 144   uint queue_num() { return _queue_num; }
 145 
 146   size_t term_attempts() const  { return _term_attempts; }
 147   void note_term_attempt() { _term_attempts++; }