174 }
175 #endif
176 assert(is_in(heap_oop), "only ever call this on objects in the heap");
177 if (in_collection_set(heap_oop)) {
178 oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
179 assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
180
181 log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT,
182 p2i(p), p2i(heap_oop), p2i(forwarded_oop));
183
184 assert(forwarded_oop->is_oop(), "oop required");
185 assert(is_in(forwarded_oop), "forwardee must be in heap");
186 assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
187 // If this fails, another thread wrote to p before us, it will be logged in SATB and the
188 // reference be updated later.
189 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
190
191 if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
192 return forwarded_oop;
193 } else {
194 return NULL;
195 }
196 } else {
197 assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)),
198 "expect not forwarded");
199 return heap_oop;
200 }
201 }
202
203 inline bool ShenandoahHeap::cancelled_concgc() const {
204 return OrderAccess::load_acquire((jbyte*) &_cancelled_concgc) == 1;
205 }
206
207 inline bool ShenandoahHeap::try_cancel_concgc() {
208 return Atomic::cmpxchg(1, &_cancelled_concgc, 0) == 0;
209 }
210
211 inline void ShenandoahHeap::clear_cancelled_concgc() {
212 OrderAccess::release_store_fence(&_cancelled_concgc, 0);
213 }
333 inline bool ShenandoahHeap::is_evacuation_in_progress() {
334 return _evacuation_in_progress != 0;
335 }
336
337 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
338 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
339 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
340 bool alloc_after_mark_start = addr >= top_at_mark_start;
341 return alloc_after_mark_start;
342 }
343
344 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
345 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
346 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
347 bool alloc_after_mark_start = addr >= top_at_mark_start;
348 return alloc_after_mark_start;
349 }
350
351 template<class T>
352 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
353 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
354
355 CMBitMap* mark_bit_map = _complete_mark_bit_map;
356 HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
357
358 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
359 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
360 HeapWord* start = region->bottom() + BrooksPointer::word_size();
361
362 HeapWord* limit = region->top();
363 HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
364 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
365
366 intx dist = ShenandoahMarkScanPrefetch;
367 if (dist > 0) {
368 // Batched scan that prefetches the oop data, anticipating the access to
369 // either header, oop field, or forwarding pointer. Not that we cannot
370 // touch anything in oop, while it still being prefetched to get enough
371 // time for prefetch to work. This is why we try to scan the bitmap linearly,
372 // disregarding the object size. However, since we know forwarding pointer
373 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
374 // there is no point for prefetching the oop contents, as oop->size() will
375 // touch it prematurely.
376
377 // No variable-length arrays in standard C++, have enough slots to fit
378 // the prefetch distance.
379 static const int SLOT_COUNT = 256;
380 guarantee(dist <= SLOT_COUNT, "adjust slot count");
381 oop slots[SLOT_COUNT];
382
419 addr += size + skip_objsize_delta;
420 if (addr < top_at_mark_start) {
421 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
422 }
423 }
424 }
425 }
426
427 template<class T>
428 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
429 #ifdef ASSERT
430 assert(!oopDesc::is_null(obj), "sanity");
431 assert(obj->is_oop(), "sanity");
432 assert(is_in(obj), "sanity");
433 assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap");
434 assert(is_marked_complete(obj), "object expected to be marked");
435 #endif
436 cl->do_object(obj);
437 }
438
439 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
|
174 }
175 #endif
176 assert(is_in(heap_oop), "only ever call this on objects in the heap");
177 if (in_collection_set(heap_oop)) {
178 oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
179 assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
180
181 log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT,
182 p2i(p), p2i(heap_oop), p2i(forwarded_oop));
183
184 assert(forwarded_oop->is_oop(), "oop required");
185 assert(is_in(forwarded_oop), "forwardee must be in heap");
186 assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
187 // If this fails, another thread wrote to p before us, it will be logged in SATB and the
188 // reference be updated later.
189 oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
190
191 if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
192 return forwarded_oop;
193 } else {
194 // Note: we used to assert the following here. This doesn't work because sometimes, during
195 // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy,
196 // which first copies the array, which potentially contains from-space refs, and only afterwards
197 // updates all from-space refs to to-space refs, which leaves a short window where the new array
198 // elements can be from-space.
199 // assert(oopDesc::is_null(result) ||
200 // oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)),
201 // "expect not forwarded");
202 return NULL;
203 }
204 } else {
205 assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)),
206 "expect not forwarded");
207 return heap_oop;
208 }
209 }
210
211 inline bool ShenandoahHeap::cancelled_concgc() const {
212 return OrderAccess::load_acquire((jbyte*) &_cancelled_concgc) == 1;
213 }
214
215 inline bool ShenandoahHeap::try_cancel_concgc() {
216 return Atomic::cmpxchg(1, &_cancelled_concgc, 0) == 0;
217 }
218
219 inline void ShenandoahHeap::clear_cancelled_concgc() {
220 OrderAccess::release_store_fence(&_cancelled_concgc, 0);
221 }
341 inline bool ShenandoahHeap::is_evacuation_in_progress() {
342 return _evacuation_in_progress != 0;
343 }
344
345 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
346 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
347 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
348 bool alloc_after_mark_start = addr >= top_at_mark_start;
349 return alloc_after_mark_start;
350 }
351
352 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
353 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
354 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
355 bool alloc_after_mark_start = addr >= top_at_mark_start;
356 return alloc_after_mark_start;
357 }
358
359 template<class T>
360 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
361 marked_object_iterate(region, cl, region->top());
362 }
363
364 template<class T>
365 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
366 marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit());
367 }
368
369 template<class T>
370 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
371 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
372
373 CMBitMap* mark_bit_map = _complete_mark_bit_map;
374 HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
375
376 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
377 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
378 HeapWord* start = region->bottom() + BrooksPointer::word_size();
379
380 HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
381 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
382
383 intx dist = ShenandoahMarkScanPrefetch;
384 if (dist > 0) {
385 // Batched scan that prefetches the oop data, anticipating the access to
386 // either header, oop field, or forwarding pointer. Not that we cannot
387 // touch anything in oop, while it still being prefetched to get enough
388 // time for prefetch to work. This is why we try to scan the bitmap linearly,
389 // disregarding the object size. However, since we know forwarding pointer
390 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
391 // there is no point for prefetching the oop contents, as oop->size() will
392 // touch it prematurely.
393
394 // No variable-length arrays in standard C++, have enough slots to fit
395 // the prefetch distance.
396 static const int SLOT_COUNT = 256;
397 guarantee(dist <= SLOT_COUNT, "adjust slot count");
398 oop slots[SLOT_COUNT];
399
436 addr += size + skip_objsize_delta;
437 if (addr < top_at_mark_start) {
438 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
439 }
440 }
441 }
442 }
443
444 template<class T>
445 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
446 #ifdef ASSERT
447 assert(!oopDesc::is_null(obj), "sanity");
448 assert(obj->is_oop(), "sanity");
449 assert(is_in(obj), "sanity");
450 assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap");
451 assert(is_marked_complete(obj), "object expected to be marked");
452 #endif
453 cl->do_object(obj);
454 }
455
456 template <class T>
457 class ShenandoahObjectToOopClosure : public ObjectClosure {
458 T* _cl;
459 public:
460 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
461
462 void do_object(oop obj) {
463 obj->oop_iterate(_cl);
464 }
465 };
466
467 template <class T>
468 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
469 T* _cl;
470 MemRegion _bounds;
471 public:
472 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
473 _cl(cl), _bounds(bottom, top) {}
474
475 void do_object(oop obj) {
476 obj->oop_iterate(_cl, _bounds);
477 }
478 };
479
480 template<class T>
481 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) {
482 if (region->is_humongous()) {
483 HeapWord* bottom = region->bottom();
484 if (top > bottom) {
485 // Go to start of humongous region.
486 uint idx = region->region_number();
487 while (! region->is_humongous_start()) {
488 assert(idx > 0, "sanity");
489 idx--;
490 region = _ordered_regions->get(idx);
491 }
492 ShenandoahObjectToOopBoundedClosure<T> objs(cl, bottom, top);
493 marked_object_iterate(region, &objs);
494 }
495 } else {
496 ShenandoahObjectToOopClosure<T> objs(cl);
497 marked_object_iterate(region, &objs, top);
498 }
499 }
500
501 template<class T>
502 inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl) {
503 marked_object_oop_iterate(region, cl, region->top());
504 }
505
506 template<class T>
507 inline void ShenandoahHeap::marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
508 marked_object_oop_iterate(region, cl, region->concurrent_iteration_safe_limit());
509 }
510 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
|