< prev index next >

src/share/vm/gc/g1/g1ParScanThreadState.hpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile


  89   void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
  90 
  91 #ifdef ASSERT
  92   bool queue_is_empty() const { return _refs->is_empty(); }
  93 
  94   bool verify_ref(narrowOop* ref) const;
  95   bool verify_ref(oop* ref) const;
  96   bool verify_task(StarTask ref) const;
  97 #endif // ASSERT
  98 
  99   template <class T> void do_oop_ext(T* ref);
 100   template <class T> void push_on_queue(T* ref);
 101 
 102   template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
 103     // If the new value of the field points to the same region or
 104     // is the to-space, we don't need to include it in the Rset updates.
 105     if (!HeapRegion::is_in_same_region(p, o) && !from->is_young()) {
 106       size_t card_index = ctbs()->index_for(p);
 107       // If the card hasn't been added to the buffer, do it.
 108       if (ctbs()->mark_card_deferred(card_index)) {
 109         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
 110       }
 111     }
 112   }
 113 
 114   G1EvacuationRootClosures* closures() { return _closures; }
 115   uint worker_id() { return _worker_id; }
 116 
 117   // Returns the current amount of waste due to alignment or not being able to fit
 118   // objects within LABs and the undo waste.
 119   virtual void waste(size_t& wasted, size_t& undo_wasted);
 120 
 121   size_t* surviving_young_words() {
 122     // We add one to hide entry 0 which accumulates surviving words for
 123     // age -1 regions (i.e. non-young ones)
 124     return _surviving_young_words + 1;
 125   }
 126 
 127   void flush(size_t* surviving_young_words);
 128 
 129  private:




  89   void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
  90 
  91 #ifdef ASSERT
  92   bool queue_is_empty() const { return _refs->is_empty(); }
  93 
  94   bool verify_ref(narrowOop* ref) const;
  95   bool verify_ref(oop* ref) const;
  96   bool verify_task(StarTask ref) const;
  97 #endif // ASSERT
  98 
  99   template <class T> void do_oop_ext(T* ref);
 100   template <class T> void push_on_queue(T* ref);
 101 
 102   template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
 103     // If the new value of the field points to the same region or
 104     // is the to-space, we don't need to include it in the Rset updates.
 105     if (!HeapRegion::is_in_same_region(p, o) && !from->is_young()) {
 106       size_t card_index = ctbs()->index_for(p);
 107       // If the card hasn't been added to the buffer, do it.
 108       if (ctbs()->mark_card_deferred(card_index)) {
 109         dirty_card_queue().enqueue((volatile jbyte*)ctbs()->byte_for_index(card_index));
 110       }
 111     }
 112   }
 113 
 114   G1EvacuationRootClosures* closures() { return _closures; }
 115   uint worker_id() { return _worker_id; }
 116 
 117   // Returns the current amount of waste due to alignment or not being able to fit
 118   // objects within LABs and the undo waste.
 119   virtual void waste(size_t& wasted, size_t& undo_wasted);
 120 
 121   size_t* surviving_young_words() {
 122     // We add one to hide entry 0 which accumulates surviving words for
 123     // age -1 regions (i.e. non-young ones)
 124     return _surviving_young_words + 1;
 125   }
 126 
 127   void flush(size_t* surviving_young_words);
 128 
 129  private:


< prev index next >