81
82 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
83
84 DirtyCardQueue& dirty_card_queue() { return _dcq; }
85 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
86
87 template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
88
89 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
90 // If the new value of the field points to the same region or
91 // is the to-space, we don't need to include it in the Rset updates.
92 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
93 size_t card_index = ctbs()->index_for(p);
94 // If the card hasn't been added to the buffer, do it.
95 if (ctbs()->mark_card_deferred(card_index)) {
96 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
97 }
98 }
99 }
100
101 public:
102 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
103 ~G1ParScanThreadState() {
104 retire_alloc_buffers();
105 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
106 }
107
108 RefToScanQueue* refs() { return _refs; }
109 ageTable* age_table() { return &_age_table; }
110
111 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
112 return _alloc_buffers[purpose];
113 }
114
115 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
116 size_t undo_waste() const { return _undo_waste; }
117
118 public:
119 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
120 ~G1ParScanThreadState();
121
122 ageTable* age_table() { return &_age_table; }
123
124 #ifdef ASSERT
125 bool queue_is_empty() const { return _refs->is_empty(); }
126
127 bool verify_ref(narrowOop* ref) const;
128 bool verify_ref(oop* ref) const;
129 bool verify_task(StarTask ref) const;
130 #endif // ASSERT
131
132 template <class T> void push_on_queue(T* ref) {
133 assert(verify_ref(ref), "sanity");
134 _refs->push(ref);
135 }
136
137 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
138
139 private:
140
141 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
142 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
143 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
144
145 public:
146
147 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
148 _evac_failure_cl = evac_failure_cl;
149 }
150
151 OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
152
153 int* hash_seed() { return &_hash_seed; }
154 uint queue_num() { return _queue_num; }
155
156 size_t term_attempts() const { return _term_attempts; }
157 void note_term_attempt() { _term_attempts++; }
158
159 void start_strong_roots() {
160 _start_strong_roots = os::elapsedTime();
161 }
162 void end_strong_roots() {
163 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
203 return false;
204 }
205
206 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
207 // We always encode partial arrays as regular oop, to allow the
208 // specialization for has_partial_array_mask() for narrowOops above.
209 // This means that unintentional use of this method with narrowOops are caught
210 // by the compiler.
211 inline oop* set_partial_array_mask(oop obj) const {
212 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
213 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
214 }
215
216 inline oop clear_partial_array_mask(oop* ref) const {
217 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
218 }
219
220 inline void do_oop_partial_array(oop* p);
221
222 // This method is applied to the fields of the objects that have just been copied.
223 template <class T> void do_oop_evac(T* p, HeapRegion* from);
224
225 template <class T> inline void deal_with_reference(T* ref_to_scan);
226
227 inline void deal_with_reference(StarTask ref);
228 public:
229
230 oop copy_to_survivor_space(oop const obj);
231
232 void trim_queue();
233
234 void steal_and_trim_queue(RefToScanQueueSet *task_queues);
235 };
236
237 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
|
81
82 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
83
84 DirtyCardQueue& dirty_card_queue() { return _dcq; }
85 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
86
87 template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
88
89 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
90 // If the new value of the field points to the same region or
91 // is the to-space, we don't need to include it in the Rset updates.
92 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
93 size_t card_index = ctbs()->index_for(p);
94 // If the card hasn't been added to the buffer, do it.
95 if (ctbs()->mark_card_deferred(card_index)) {
96 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
97 }
98 }
99 }
100
101 public:
102 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
103 ~G1ParScanThreadState();
104
105 ageTable* age_table() { return &_age_table; }
106
107 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
108 return _alloc_buffers[purpose];
109 }
110
111 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
112 size_t undo_waste() const { return _undo_waste; }
113
114 #ifdef ASSERT
115 bool queue_is_empty() const { return _refs->is_empty(); }
116
117 bool verify_ref(narrowOop* ref) const;
118 bool verify_ref(oop* ref) const;
119 bool verify_task(StarTask ref) const;
120 #endif // ASSERT
121
122 template <class T> void push_on_queue(T* ref) {
123 assert(verify_ref(ref), "sanity");
124 _refs->push(ref);
125 }
126
127 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
128
129 private:
130
131 inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
132 inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
133 inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
134
135 public:
136
137 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
138 _evac_failure_cl = evac_failure_cl;
139 }
140
141 OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
142
143 int* hash_seed() { return &_hash_seed; }
144 uint queue_num() { return _queue_num; }
145
146 size_t term_attempts() const { return _term_attempts; }
147 void note_term_attempt() { _term_attempts++; }
148
149 void start_strong_roots() {
150 _start_strong_roots = os::elapsedTime();
151 }
152 void end_strong_roots() {
153 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
193 return false;
194 }
195
196 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
197 // We always encode partial arrays as regular oop, to allow the
198 // specialization for has_partial_array_mask() for narrowOops above.
199 // This means that unintentional use of this method with narrowOops are caught
200 // by the compiler.
201 inline oop* set_partial_array_mask(oop obj) const {
202 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
203 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
204 }
205
206 inline oop clear_partial_array_mask(oop* ref) const {
207 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
208 }
209
210 inline void do_oop_partial_array(oop* p);
211
212 // This method is applied to the fields of the objects that have just been copied.
213 template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
214
215 template <class T> inline void deal_with_reference(T* ref_to_scan);
216
217 inline void dispatch_reference(StarTask ref);
218 public:
219
220 oop copy_to_survivor_space(oop const obj);
221
222 void trim_queue();
223
224 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
225 };
226
227 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
|