180 if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) {
181 // E.g. during evacuation.
182 return forwarded_oop;
183 }
184
185 assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
186
187 log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT,
188 p2i(p), p2i(heap_oop), p2i(forwarded_oop));
189
190 assert(forwarded_oop->is_oop(), "oop required");
191 assert(is_in(forwarded_oop), "forwardee must be in heap");
192 assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
193 // If this fails, another thread wrote to p before us, it will be logged in SATB and the
194 // reference be updated later.
195 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
196
197 if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
198 return forwarded_oop;
199 } else {
200 return NULL;
201 }
202 } else {
203 assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)),
204 "expect not forwarded");
205 return heap_oop;
206 }
207 }
208
209 inline bool ShenandoahHeap::cancelled_concgc() const {
210 return OrderAccess::load_acquire((jbyte*) &_cancelled_concgc) == 1;
211 }
212
213 inline bool ShenandoahHeap::try_cancel_concgc() {
214 return Atomic::cmpxchg(1, &_cancelled_concgc, 0) == 0;
215 }
216
217 inline void ShenandoahHeap::clear_cancelled_concgc() {
218 OrderAccess::release_store_fence(&_cancelled_concgc, 0);
219 }
403 inline bool ShenandoahHeap::is_evacuation_in_progress() {
404 return _evacuation_in_progress != 0;
405 }
406
407 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
408 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
409 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
410 bool alloc_after_mark_start = addr >= top_at_mark_start;
411 return alloc_after_mark_start;
412 }
413
414 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
415 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
416 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
417 bool alloc_after_mark_start = addr >= top_at_mark_start;
418 return alloc_after_mark_start;
419 }
420
421 template<class T>
422 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
423 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
424
425 CMBitMap* mark_bit_map = _complete_mark_bit_map;
426 HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
427
428 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
429 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
430 HeapWord* start = region->bottom() + BrooksPointer::word_size();
431
432 HeapWord* limit = region->top();
433 HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
434 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
435
436 intx dist = ShenandoahMarkScanPrefetch;
437 if (dist > 0) {
438 // Batched scan that prefetches the oop data, anticipating the access to
439 // either header, oop field, or forwarding pointer. Not that we cannot
440 // touch anything in oop, while it still being prefetched to get enough
441 // time for prefetch to work. This is why we try to scan the bitmap linearly,
442 // disregarding the object size. However, since we know forwarding pointer
443 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
444 // there is no point for prefetching the oop contents, as oop->size() will
445 // touch it prematurely.
446
447 // No variable-length arrays in standard C++, have enough slots to fit
448 // the prefetch distance.
449 static const int SLOT_COUNT = 256;
450 guarantee(dist <= SLOT_COUNT, "adjust slot count");
451 oop slots[SLOT_COUNT];
452
|
180 if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) {
181 // E.g. during evacuation.
182 return forwarded_oop;
183 }
184
185 assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
186
187 log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT,
188 p2i(p), p2i(heap_oop), p2i(forwarded_oop));
189
190 assert(forwarded_oop->is_oop(), "oop required");
191 assert(is_in(forwarded_oop), "forwardee must be in heap");
192 assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
193 // If this fails, another thread wrote to p before us, it will be logged in SATB and the
194 // reference be updated later.
195 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
196
197 if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
198 return forwarded_oop;
199 } else {
200 assert(oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)),
201 "expect not forwarded");
202 return NULL;
203 }
204 } else {
205 assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)),
206 "expect not forwarded");
207 return heap_oop;
208 }
209 }
210
211 inline bool ShenandoahHeap::cancelled_concgc() const {
212 return OrderAccess::load_acquire((jbyte*) &_cancelled_concgc) == 1;
213 }
214
215 inline bool ShenandoahHeap::try_cancel_concgc() {
216 return Atomic::cmpxchg(1, &_cancelled_concgc, 0) == 0;
217 }
218
219 inline void ShenandoahHeap::clear_cancelled_concgc() {
220 OrderAccess::release_store_fence(&_cancelled_concgc, 0);
221 }
405 inline bool ShenandoahHeap::is_evacuation_in_progress() {
406 return _evacuation_in_progress != 0;
407 }
408
409 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
410 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
411 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
412 bool alloc_after_mark_start = addr >= top_at_mark_start;
413 return alloc_after_mark_start;
414 }
415
416 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
417 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
418 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
419 bool alloc_after_mark_start = addr >= top_at_mark_start;
420 return alloc_after_mark_start;
421 }
422
423 template<class T>
424 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
425 marked_object_iterate(region, cl, region->top());
426 }
427
428 template<class T>
429 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
430 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
431
432 CMBitMap* mark_bit_map = _complete_mark_bit_map;
433 HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
434
435 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
436 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
437 HeapWord* start = region->bottom() + BrooksPointer::word_size();
438
439 HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
440 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
441
442 intx dist = ShenandoahMarkScanPrefetch;
443 if (dist > 0) {
444 // Batched scan that prefetches the oop data, anticipating the access to
445 // either header, oop field, or forwarding pointer. Not that we cannot
446 // touch anything in oop, while it still being prefetched to get enough
447 // time for prefetch to work. This is why we try to scan the bitmap linearly,
448 // disregarding the object size. However, since we know forwarding pointer
449 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
450 // there is no point for prefetching the oop contents, as oop->size() will
451 // touch it prematurely.
452
453 // No variable-length arrays in standard C++, have enough slots to fit
454 // the prefetch distance.
455 static const int SLOT_COUNT = 256;
456 guarantee(dist <= SLOT_COUNT, "adjust slot count");
457 oop slots[SLOT_COUNT];
458
|