< prev index next >

src/share/vm/gc/g1/g1ParScanThreadState.hpp

Print this page
rev 13170 : [mq]: 8183397-consistent-closure-filtering


  83   }
  84 
  85  public:
  86   G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
  87   virtual ~G1ParScanThreadState();
  88 
  89   void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
  90 
  91 #ifdef ASSERT
  92   bool queue_is_empty() const { return _refs->is_empty(); }
  93 
  94   bool verify_ref(narrowOop* ref) const;
  95   bool verify_ref(oop* ref) const;
  96   bool verify_task(StarTask ref) const;
  97 #endif // ASSERT
  98 
  99   template <class T> void do_oop_ext(T* ref);
 100   template <class T> void push_on_queue(T* ref);
 101 
 102   template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
 103     // If the new value of the field points to the same region or
 104     // is the to-space, we don't need to include it in the Rset updates.
 105     if (!HeapRegion::is_in_same_region(p, o) && !from->is_young()) {

 106       size_t card_index = ctbs()->index_for(p);
 107       // If the card hasn't been added to the buffer, do it.
 108       if (ctbs()->mark_card_deferred(card_index)) {
 109         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
 110       }
 111     }
 112   }
 113 
 114   G1EvacuationRootClosures* closures() { return _closures; }
 115   uint worker_id() { return _worker_id; }
 116 
 117   // Returns the current amount of waste due to alignment or not being able to fit
 118   // objects within LABs and the undo waste.
 119   virtual void waste(size_t& wasted, size_t& undo_wasted);
 120 
 121   size_t* surviving_young_words() {
 122     // We add one to hide entry 0 which accumulates surviving words for
 123     // age -1 regions (i.e. non-young ones)
 124     return _surviving_young_words + 1;
 125   }




  83   }
  84 
  85  public:
  86   G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
  87   virtual ~G1ParScanThreadState();
  88 
  89   void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
  90 
  91 #ifdef ASSERT
  92   bool queue_is_empty() const { return _refs->is_empty(); }
  93 
  94   bool verify_ref(narrowOop* ref) const;
  95   bool verify_ref(oop* ref) const;
  96   bool verify_task(StarTask ref) const;
  97 #endif // ASSERT
  98 
  99   template <class T> void do_oop_ext(T* ref);
 100   template <class T> void push_on_queue(T* ref);
 101 
 102   template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
 103     assert(!HeapRegion::is_in_same_region(p, o), "Caller should have filtered out cross-region references already.");
 104     // If the field originates from the to-space, we don't need to include it
 105     // in the remembered set updates.
 106     if (!from->is_young()) {
 107       size_t card_index = ctbs()->index_for(p);
 108       // If the card hasn't been added to the buffer, do it.
 109       if (ctbs()->mark_card_deferred(card_index)) {
 110         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
 111       }
 112     }
 113   }
 114 
 115   G1EvacuationRootClosures* closures() { return _closures; }
 116   uint worker_id() { return _worker_id; }
 117 
 118   // Returns the current amount of waste due to alignment or not being able to fit
 119   // objects within LABs and the undo waste.
 120   virtual void waste(size_t& wasted, size_t& undo_wasted);
 121 
 122   size_t* surviving_young_words() {
 123     // We add one to hide entry 0 which accumulates surviving words for
 124     // age -1 regions (i.e. non-young ones)
 125     return _surviving_young_words + 1;
 126   }


< prev index next >