< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.hpp

Print this page
rev 53824 : imported patch 8219096-merge-termination-stats-logging
rev 53825 : [mq]: 8219096-lkorinth-review


 110 
 111   template <class T> void do_oop_ext(T* ref);
 112   template <class T> void push_on_queue(T* ref);
 113 
 114   template <class T> void enqueue_card_if_tracked(T* p, oop o) {
 115     assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
 116     assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
 117     if (!_g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
 118       return;
 119     }
 120     size_t card_index = ct()->index_for(p);
 121     // If the card hasn't been added to the buffer, do it.
 122     if (ct()->mark_card_deferred(card_index)) {
 123       dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
 124     }
 125   }
 126 
 127   G1EvacuationRootClosures* closures() { return _closures; }
 128   uint worker_id() { return _worker_id; }
 129 
 130   size_t lab_waste() const;
 131   size_t lab_undo_waste() const;
 132 
 133   size_t* surviving_young_words() {
 134     // We add one to hide entry 0 which accumulates surviving words for
 135     // age -1 regions (i.e. non-young ones)
 136     return _surviving_young_words + 1;
 137   }
 138 
 139   void flush(size_t* surviving_young_words);
 140 
 141 private:
 142   #define G1_PARTIAL_ARRAY_MASK 0x2
 143 
 144   inline bool has_partial_array_mask(oop* ref) const {
 145     return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
 146   }
 147 
 148   // We never encode partial array oops as narrowOop*, so return false immediately.
 149   // This allows the compiler to create optimized code when popping references from
 150   // the work queue.
 151   inline bool has_partial_array_mask(narrowOop* ref) const {




 110 
 111   template <class T> void do_oop_ext(T* ref);
 112   template <class T> void push_on_queue(T* ref);
 113 
 114   template <class T> void enqueue_card_if_tracked(T* p, oop o) {
 115     assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
 116     assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
 117     if (!_g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
 118       return;
 119     }
 120     size_t card_index = ct()->index_for(p);
 121     // If the card hasn't been added to the buffer, do it.
 122     if (ct()->mark_card_deferred(card_index)) {
 123       dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
 124     }
 125   }
 126 
 127   G1EvacuationRootClosures* closures() { return _closures; }
 128   uint worker_id() { return _worker_id; }
 129 
 130   size_t lab_waste_words() const;
 131   size_t lab_undo_waste_words() const;
 132 
 133   size_t* surviving_young_words() {
 134     // We add one to hide entry 0 which accumulates surviving words for
 135     // age -1 regions (i.e. non-young ones)
 136     return _surviving_young_words + 1;
 137   }
 138 
 139   void flush(size_t* surviving_young_words);
 140 
 141 private:
 142   #define G1_PARTIAL_ARRAY_MASK 0x2
 143 
 144   inline bool has_partial_array_mask(oop* ref) const {
 145     return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
 146   }
 147 
 148   // We never encode partial array oops as narrowOop*, so return false immediately.
 149   // This allows the compiler to create optimized code when popping references from
 150   // the work queue.
 151   inline bool has_partial_array_mask(narrowOop* ref) const {


< prev index next >