105 }
106
107 template <class T, bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER>
108 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
109 assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
110 ShenandoahUpdateRefsForOopClosure<UPDATE_MATRIX, STOREVAL_WRITE_BARRIER> cl;
111 T* dst = (T*) start;
112 for (size_t i = 0; i < count; i++) {
113 cl.do_oop(dst++);
114 }
115 }
116
117 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
118 assert(UseShenandoahGC, "should be enabled");
119 if (count == 0) return;
120 if (!ShenandoahCloneBarrier) return;
121
122 if (!need_update_refs_barrier()) return;
123
124 if (_heap->is_concurrent_traversal_in_progress()) {
125 if (count > ShenandoahEnqueueArrayCopyThreshold) {
126 _heap->traversal_gc()->push_arraycopy(start, count);
127 } else {
128 ShenandoahEvacOOMScope oom_evac_scope;
129 if (UseShenandoahMatrix) {
130 if (UseCompressedOops) {
131 write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ true>(start, count);
132 } else {
133 write_ref_array_loop<oop, /* matrix = */ true, /* wb = */ true>(start, count);
134 }
135 } else {
136 if (UseCompressedOops) {
137 write_ref_array_loop<narrowOop, /* matrix = */ false, /* wb = */ true>(start, count);
138 } else {
139 write_ref_array_loop<oop, /* matrix = */ false, /* wb = */ true>(start, count);
140 }
141 }
142 }
143 } else {
144 if (UseShenandoahMatrix) {
145 if (UseCompressedOops) {
146 write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ false>(start, count);
147 } else {
148 write_ref_array_loop<oop, /* matrix = */ true, /* wb = */ false>(start, count);
149 }
150 } else {
151 if (UseCompressedOops) {
152 write_ref_array_loop<narrowOop, /* matrix = */ false, /* wb = */ false>(start, count);
153 } else {
154 write_ref_array_loop<oop, /* matrix = */ false, /* wb = */ false>(start, count);
155 }
156 }
157 }
158 }
159
160 template <class T>
161 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
162 shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
213
214 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
215 shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
216 shenandoah_assert_not_forwarded_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
217 shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
218 }
219
220 void ShenandoahBarrierSet::write_region(MemRegion mr) {
221 assert(UseShenandoahGC, "should be enabled");
222 if (!ShenandoahCloneBarrier) return;
223 if (! need_update_refs_barrier()) return;
224
225 // This is called for cloning an object (see jvm.cpp) after the clone
226 // has been made. We are not interested in any 'previous value' because
227 // it would be NULL in any case. But we *are* interested in any oop*
228 // that potentially need to be updated.
229
230 oop obj = oop(mr.start());
231 assert(oopDesc::is_oop(obj), "must be an oop");
232 if (_heap->is_concurrent_traversal_in_progress()) {
233 if ((size_t) obj->size() > ShenandoahEnqueueArrayCopyThreshold) {
234 _heap->traversal_gc()->push_arraycopy(mr.start(), 0);
235 } else {
236 ShenandoahEvacOOMScope oom_evac_scope;
237 if (UseShenandoahMatrix) {
238 ShenandoahUpdateRefsForOopClosure</* matrix = */ true, /* wb = */ true> cl;
239 obj->oop_iterate(&cl);
240 } else {
241 ShenandoahUpdateRefsForOopClosure</* matrix = */ false, /* wb = */ true> cl;
242 obj->oop_iterate(&cl);
243 }
244 }
245 } else {
246 if (UseShenandoahMatrix) {
247 ShenandoahUpdateRefsForOopClosure</* matrix = */ true, /* wb = */ false> cl;
248 obj->oop_iterate(&cl);
249 } else {
250 ShenandoahUpdateRefsForOopClosure</* matrix = */ false, /* wb = */ false> cl;
251 obj->oop_iterate(&cl);
252 }
253 }
254 }
255
256 oop ShenandoahBarrierSet::read_barrier(oop src) {
257 // Check for forwarded objects, because on Full GC path we might deal with
258 // non-trivial fwdptrs that contain Full GC specific metadata. We could check
259 // for is_full_gc_in_progress(), but this also covers the case of stable heap,
260 // which provides a bit of performance improvement.
261 if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
262 return ShenandoahBarrierSet::resolve_forwarded(src);
263 } else {
|
105 }
106
107 template <class T, bool UPDATE_MATRIX, bool STOREVAL_WRITE_BARRIER>
108 void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) {
109 assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled");
110 ShenandoahUpdateRefsForOopClosure<UPDATE_MATRIX, STOREVAL_WRITE_BARRIER> cl;
111 T* dst = (T*) start;
112 for (size_t i = 0; i < count; i++) {
113 cl.do_oop(dst++);
114 }
115 }
116
117 void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) {
118 assert(UseShenandoahGC, "should be enabled");
119 if (count == 0) return;
120 if (!ShenandoahCloneBarrier) return;
121
122 if (!need_update_refs_barrier()) return;
123
124 if (_heap->is_concurrent_traversal_in_progress()) {
125 ShenandoahEvacOOMScope oom_evac_scope;
126 if (UseShenandoahMatrix) {
127 if (UseCompressedOops) {
128 write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ true>(start, count);
129 } else {
130 write_ref_array_loop<oop, /* matrix = */ true, /* wb = */ true>(start, count);
131 }
132 } else {
133 if (UseCompressedOops) {
134 write_ref_array_loop<narrowOop, /* matrix = */ false, /* wb = */ true>(start, count);
135 } else {
136 write_ref_array_loop<oop, /* matrix = */ false, /* wb = */ true>(start, count);
137 }
138 }
139 } else {
140 if (UseShenandoahMatrix) {
141 if (UseCompressedOops) {
142 write_ref_array_loop<narrowOop, /* matrix = */ true, /* wb = */ false>(start, count);
143 } else {
144 write_ref_array_loop<oop, /* matrix = */ true, /* wb = */ false>(start, count);
145 }
146 } else {
147 if (UseCompressedOops) {
148 write_ref_array_loop<narrowOop, /* matrix = */ false, /* wb = */ false>(start, count);
149 } else {
150 write_ref_array_loop<oop, /* matrix = */ false, /* wb = */ false>(start, count);
151 }
152 }
153 }
154 }
155
156 template <class T>
157 void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
158 shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc());
209
210 void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) {
211 shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc());
212 shenandoah_assert_not_forwarded_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
213 shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress());
214 }
215
216 void ShenandoahBarrierSet::write_region(MemRegion mr) {
217 assert(UseShenandoahGC, "should be enabled");
218 if (!ShenandoahCloneBarrier) return;
219 if (! need_update_refs_barrier()) return;
220
221 // This is called for cloning an object (see jvm.cpp) after the clone
222 // has been made. We are not interested in any 'previous value' because
223 // it would be NULL in any case. But we *are* interested in any oop*
224 // that potentially need to be updated.
225
226 oop obj = oop(mr.start());
227 assert(oopDesc::is_oop(obj), "must be an oop");
228 if (_heap->is_concurrent_traversal_in_progress()) {
229 ShenandoahEvacOOMScope oom_evac_scope;
230 if (UseShenandoahMatrix) {
231 ShenandoahUpdateRefsForOopClosure</* matrix = */ true, /* wb = */ true> cl;
232 obj->oop_iterate(&cl);
233 } else {
234 ShenandoahUpdateRefsForOopClosure</* matrix = */ false, /* wb = */ true> cl;
235 obj->oop_iterate(&cl);
236 }
237 } else {
238 if (UseShenandoahMatrix) {
239 ShenandoahUpdateRefsForOopClosure</* matrix = */ true, /* wb = */ false> cl;
240 obj->oop_iterate(&cl);
241 } else {
242 ShenandoahUpdateRefsForOopClosure</* matrix = */ false, /* wb = */ false> cl;
243 obj->oop_iterate(&cl);
244 }
245 }
246 }
247
248 oop ShenandoahBarrierSet::read_barrier(oop src) {
249 // Check for forwarded objects, because on Full GC path we might deal with
250 // non-trivial fwdptrs that contain Full GC specific metadata. We could check
251 // for is_full_gc_in_progress(), but this also covers the case of stable heap,
252 // which provides a bit of performance improvement.
253 if (ShenandoahReadBarrier && _heap->has_forwarded_objects()) {
254 return ShenandoahBarrierSet::resolve_forwarded(src);
255 } else {
|