87 return _dest[original.value()];
88 }
89
90 public:
91 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
92 virtual ~G1ParScanThreadState();
93
94 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
95
96 #ifdef ASSERT
97 bool queue_is_empty() const { return _refs->is_empty(); }
98
99 bool verify_ref(narrowOop* ref) const;
100 bool verify_ref(oop* ref) const;
101 bool verify_task(StarTask ref) const;
102 #endif // ASSERT
103
104 template <class T> void do_oop_ext(T* ref);
105 template <class T> void push_on_queue(T* ref);
106
107 template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
108 assert(!HeapRegion::is_in_same_region(p, o), "Caller should have filtered out cross-region references already.");
109 // If the field originates from the to-space, we don't need to include it
110 // in the remembered set updates. Also, if we are not tracking the remembered
111 // set in the destination region, do not bother either.
112 if (!from->is_young() && _g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
113 size_t card_index = ct()->index_for(p);
114 // If the card hasn't been added to the buffer, do it.
115 if (ct()->mark_card_deferred(card_index)) {
116 dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
117 }
118 }
119 }
120
121 G1EvacuationRootClosures* closures() { return _closures; }
122 uint worker_id() { return _worker_id; }
123
124 // Returns the current amount of waste due to alignment or not being able to fit
125 // objects within LABs and the undo waste.
126 virtual void waste(size_t& wasted, size_t& undo_wasted);
127
128 size_t* surviving_young_words() {
129 // We add one to hide entry 0 which accumulates surviving words for
130 // age -1 regions (i.e. non-young ones)
131 return _surviving_young_words + 1;
132 }
133
134 void flush(size_t* surviving_young_words);
135
136 private:
137 #define G1_PARTIAL_ARRAY_MASK 0x2
|
87 return _dest[original.value()];
88 }
89
90 public:
91 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
92 virtual ~G1ParScanThreadState();
93
94 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
95
96 #ifdef ASSERT
97 bool queue_is_empty() const { return _refs->is_empty(); }
98
99 bool verify_ref(narrowOop* ref) const;
100 bool verify_ref(oop* ref) const;
101 bool verify_task(StarTask ref) const;
102 #endif // ASSERT
103
104 template <class T> void do_oop_ext(T* ref);
105 template <class T> void push_on_queue(T* ref);
106
107 template <class T> void enqueue_card_if_tracked(T* p, oop o) {
108 assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
109 assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
110 if (!_g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
111 return;
112 }
113 size_t card_index = ct()->index_for(p);
114 // If the card hasn't been added to the buffer, do it.
115 if (ct()->mark_card_deferred(card_index)) {
116 dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
117 }
118 }
119
120 G1EvacuationRootClosures* closures() { return _closures; }
121 uint worker_id() { return _worker_id; }
122
123 // Returns the current amount of waste due to alignment or not being able to fit
124 // objects within LABs and the undo waste.
125 virtual void waste(size_t& wasted, size_t& undo_wasted);
126
127 size_t* surviving_young_words() {
128 // We add one to hide entry 0 which accumulates surviving words for
129 // age -1 regions (i.e. non-young ones)
130 return _surviving_young_words + 1;
131 }
132
133 void flush(size_t* surviving_young_words);
134
135 private:
136 #define G1_PARTIAL_ARRAY_MASK 0x2
|