46 #include "runtime/thread.hpp"
47 #include "utilities/copy.hpp"
48
49 template <class T>
50 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
51 T o = oopDesc::load_heap_oop(p);
52 if (! oopDesc::is_null(o)) {
53 oop obj = oopDesc::decode_heap_oop_not_null(o);
54 _heap->update_oop_ref_not_null(p, obj);
55 }
56 }
57
58 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); }
59 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
60
61 /*
62 * Marks the object. Returns true if the object has not been marked before and has
63 * been marked by this thread. Returns false if the object has already been marked,
64 * or if a competing thread succeeded in marking this object.
65 */
66 inline bool ShenandoahHeap::mark_next(oop obj) const {
67 #ifdef ASSERT
68 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
69 tty->print_cr("heap region containing obj:");
70 ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
71 obj_region->print();
72 tty->print_cr("heap region containing forwardee:");
73 ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
74 forward_region->print();
75 }
76 #endif
77
78 assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
79 return mark_next_no_checks(obj);
80 }
81
82 inline bool ShenandoahHeap::mark_next_no_checks(oop obj) const {
83 HeapWord* addr = (HeapWord*) obj;
84 return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
85 }
86
87 inline bool ShenandoahHeap::is_marked_next(oop obj) const {
88 HeapWord* addr = (HeapWord*) obj;
89 return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
90 }
91
92 inline bool ShenandoahHeap::is_marked_complete(oop obj) const {
93 HeapWord* addr = (HeapWord*) obj;
94 return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr);
95 }
96
97 inline bool ShenandoahHeap::need_update_refs() const {
98 return _need_update_refs;
99 }
100
101 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
102 uintptr_t region_start = ((uintptr_t) addr);
103 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
104 #ifdef ASSERT
105 if (index >= num_regions()) {
106 tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \
107 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT,
108 p2i(base()),
109 p2i(_ordered_regions->get(0)->bottom()),
110 num_regions(),
111 ShenandoahHeapRegion::region_size_bytes());
112 }
113 #endif
114 assert(index < num_regions(), "heap region index must be in range");
380 // not get updated for this stale copy during this cycle, and we will crash while scanning
381 // it the next cycle.
382 //
383 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
384 // object will overwrite this stale copy, or the filler object on LAB retirement will
385 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
386 // have to explicitly overwrite the copy with the filler object. With that overwrite,
387 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
388 if (alloc_from_gclab) {
389 thread->gclab().rollback(size_with_fwdptr);
390 } else {
391 fill_with_object(copy, size_no_fwdptr);
392 }
393 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT,
394 p2i(p), p2i(copy), p2i(result));
395 return result;
396 }
397 }
398
399 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
400 return ! is_marked_next(oop(entry));
401 }
402
403 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
404 assert(collection_set() != NULL, "Sanity");
405 return collection_set()->is_in(region_index);
406 }
407
408 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
409 return region_in_collection_set(r->region_number());
410 }
411
412 template <class T>
413 inline bool ShenandoahHeap::in_collection_set(T p) const {
414 HeapWord* obj = (HeapWord*) p;
415 assert(collection_set() != NULL, "Sanity");
416 assert(is_in(obj), "should be in heap");
417
418 return collection_set()->is_in(obj);
419 }
420
421 inline bool ShenandoahHeap::concurrent_mark_in_progress() const {
422 return _concurrent_mark_in_progress != 0;
423 }
424
425 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
426 return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
427 }
428
429 inline address ShenandoahHeap::update_refs_in_progress_addr() {
430 return (address) &(ShenandoahHeap::heap()->_update_refs_in_progress);
431 }
432
433 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
434 return _evacuation_in_progress != 0;
435 }
436
437 inline address ShenandoahHeap::evacuation_in_progress_addr() {
438 return (address) &(ShenandoahHeap::heap()->_evacuation_in_progress);
439 }
440
441 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
442 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
443 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
444 bool alloc_after_mark_start = addr >= top_at_mark_start;
445 return alloc_after_mark_start;
446 }
447
448 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
449 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
450 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
451 bool alloc_after_mark_start = addr >= top_at_mark_start;
452 return alloc_after_mark_start;
453 }
454
455 template<class T>
456 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
457 marked_object_iterate(region, cl, region->top());
458 }
459
460 template<class T>
461 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
462 marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit());
463 }
464
465 template<class T>
466 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
467 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
468
469 assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
470
471 MarkBitMap* mark_bit_map = _complete_mark_bit_map;
472 HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
473
474 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
475 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
476 HeapWord* start = region->bottom() + BrooksPointer::word_size();
477
478 HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), region->end());
479 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
480
481 intx dist = ShenandoahMarkScanPrefetch;
482 if (dist > 0) {
483 // Batched scan that prefetches the oop data, anticipating the access to
484 // either header, oop field, or forwarding pointer. Not that we cannot
485 // touch anything in oop, while it still being prefetched to get enough
486 // time for prefetch to work. This is why we try to scan the bitmap linearly,
487 // disregarding the object size. However, since we know forwarding pointer
488 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
489 // there is no point for prefetching the oop contents, as oop->size() will
490 // touch it prematurely.
491
492 // No variable-length arrays in standard C++, have enough slots to fit
493 // the prefetch distance.
494 static const int SLOT_COUNT = 256;
495 guarantee(dist <= SLOT_COUNT, "adjust slot count");
496 oop slots[SLOT_COUNT];
497
498 bool aborting = false;
499 int avail;
500 do {
501 avail = 0;
502 for (int c = 0; (c < dist) && (addr < limit); c++) {
503 Prefetch::read(addr, BrooksPointer::byte_offset());
504 oop obj = oop(addr);
505 slots[avail++] = obj;
506 if (addr < top_at_mark_start) {
507 addr += skip_bitmap_delta;
508 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
509 } else {
510 // cannot trust mark bitmap anymore, finish the current stride,
511 // and switch to accurate traversal
512 addr += obj->size() + skip_objsize_delta;
513 aborting = true;
514 }
515 }
516
517 for (int c = 0; c < avail; c++) {
518 do_marked_object(mark_bit_map, cl, slots[c]);
519 }
520 } while (avail > 0 && !aborting);
521
522 // accurate traversal
523 while (addr < limit) {
524 oop obj = oop(addr);
525 int size = obj->size();
526 do_marked_object(mark_bit_map, cl, obj);
527 addr += size + skip_objsize_delta;
528 }
529 } else {
530 while (addr < limit) {
531 oop obj = oop(addr);
532 int size = obj->size();
533 do_marked_object(mark_bit_map, cl, obj);
534 addr += size + skip_objsize_delta;
535 if (addr < top_at_mark_start) {
536 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
537 }
538 }
539 }
540 }
541
542 template<class T>
543 inline void ShenandoahHeap::do_marked_object(MarkBitMap* bitmap, T* cl, oop obj) {
544 assert(!oopDesc::is_null(obj), "sanity");
545 assert(oopDesc::is_oop(obj), "sanity");
546 assert(is_in(obj), "sanity");
547 assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap");
548 assert(is_marked_complete(obj), "object expected to be marked");
549 cl->do_object(obj);
550 }
551
552 template <class T>
553 class ShenandoahObjectToOopClosure : public ObjectClosure {
554 T* _cl;
555 public:
556 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
557
558 void do_object(oop obj) {
559 obj->oop_iterate(_cl);
560 }
561 };
562
563 template <class T>
564 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
565 T* _cl;
566 MemRegion _bounds;
567 public:
568 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
|
46 #include "runtime/thread.hpp"
47 #include "utilities/copy.hpp"
48
49 template <class T>
50 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
51 T o = oopDesc::load_heap_oop(p);
52 if (! oopDesc::is_null(o)) {
53 oop obj = oopDesc::decode_heap_oop_not_null(o);
54 _heap->update_oop_ref_not_null(p, obj);
55 }
56 }
57
58 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); }
59 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
60
61 /*
62 * Marks the object. Returns true if the object has not been marked before and has
63 * been marked by this thread. Returns false if the object has already been marked,
64 * or if a competing thread succeeded in marking this object.
65 */
66 inline bool ShenandoahHeap::mark(oop obj) const {
67 #ifdef ASSERT
68 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
69 tty->print_cr("heap region containing obj:");
70 ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
71 obj_region->print();
72 tty->print_cr("heap region containing forwardee:");
73 ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
74 forward_region->print();
75 }
76 #endif
77
78 assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
79 return mark_no_checks(obj);
80 }
81
82 inline bool ShenandoahHeap::mark_no_checks(oop obj) const {
83 HeapWord* addr = (HeapWord*) obj;
84 return (! allocated_after_mark_start(addr)) && _mark_bit_map->parMark(addr);
85 }
86
87 inline bool ShenandoahHeap::is_marked(oop obj) const {
88 HeapWord* addr = (HeapWord*) obj;
89 return allocated_after_mark_start(addr) || _mark_bit_map->isMarked(addr);
90 }
91
92 inline bool ShenandoahHeap::need_update_refs() const {
93 return _need_update_refs;
94 }
95
96 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
97 uintptr_t region_start = ((uintptr_t) addr);
98 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
99 #ifdef ASSERT
100 if (index >= num_regions()) {
101 tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \
102 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT,
103 p2i(base()),
104 p2i(_ordered_regions->get(0)->bottom()),
105 num_regions(),
106 ShenandoahHeapRegion::region_size_bytes());
107 }
108 #endif
109 assert(index < num_regions(), "heap region index must be in range");
375 // not get updated for this stale copy during this cycle, and we will crash while scanning
376 // it the next cycle.
377 //
378 // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next
379 // object will overwrite this stale copy, or the filler object on LAB retirement will
380 // do this. For non-GCLAB allocations, we have no way to retract the allocation, and
381 // have to explicitly overwrite the copy with the filler object. With that overwrite,
382 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
383 if (alloc_from_gclab) {
384 thread->gclab().rollback(size_with_fwdptr);
385 } else {
386 fill_with_object(copy, size_no_fwdptr);
387 }
388 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT,
389 p2i(p), p2i(copy), p2i(result));
390 return result;
391 }
392 }
393
394 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
395 return ! is_marked(oop(entry));
396 }
397
398 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
399 assert(collection_set() != NULL, "Sanity");
400 return collection_set()->is_in(region_index);
401 }
402
403 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
404 return region_in_collection_set(r->region_number());
405 }
406
407 template <class T>
408 inline bool ShenandoahHeap::in_collection_set(T p) const {
409 HeapWord* obj = (HeapWord*) p;
410 assert(collection_set() != NULL, "Sanity");
411 assert(is_in(obj), "should be in heap");
412
413 return collection_set()->is_in(obj);
414 }
415
416 inline bool ShenandoahHeap::concurrent_mark_in_progress() const {
417 return _concurrent_mark_in_progress != 0;
418 }
419
420 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
421 return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
422 }
423
424 inline address ShenandoahHeap::update_refs_in_progress_addr() {
425 return (address) &(ShenandoahHeap::heap()->_update_refs_in_progress);
426 }
427
428 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
429 return _evacuation_in_progress != 0;
430 }
431
432 inline address ShenandoahHeap::evacuation_in_progress_addr() {
433 return (address) &(ShenandoahHeap::heap()->_evacuation_in_progress);
434 }
435
436 inline bool ShenandoahHeap::allocated_after_mark_start(HeapWord* addr) const {
437 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
438 HeapWord* top_at_mark_start = _top_at_mark_starts[index];
439 bool alloc_after_mark_start = addr >= top_at_mark_start;
440 return alloc_after_mark_start;
441 }
442
443 template<class T>
444 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
445 marked_object_iterate(region, cl, region->top());
446 }
447
448 template<class T>
449 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
450 marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit());
451 }
452
453 template<class T>
454 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
455 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
456
457 assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
458 assert(is_bitmap_valid(), "only try this with complete marking bitmap");
459
460 MarkBitMap* mark_bit_map = _mark_bit_map;
461 HeapWord* tams = top_at_mark_start(region->bottom());
462
463 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
464 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
465 HeapWord* start = region->bottom() + BrooksPointer::word_size();
466
467 HeapWord* end = MIN2(tams + BrooksPointer::word_size(), region->end());
468 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
469
470 intx dist = ShenandoahMarkScanPrefetch;
471 if (dist > 0) {
472 // Batched scan that prefetches the oop data, anticipating the access to
473 // either header, oop field, or forwarding pointer. Not that we cannot
474 // touch anything in oop, while it still being prefetched to get enough
475 // time for prefetch to work. This is why we try to scan the bitmap linearly,
476 // disregarding the object size. However, since we know forwarding pointer
477 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
478 // there is no point for prefetching the oop contents, as oop->size() will
479 // touch it prematurely.
480
481 // No variable-length arrays in standard C++, have enough slots to fit
482 // the prefetch distance.
483 static const int SLOT_COUNT = 256;
484 guarantee(dist <= SLOT_COUNT, "adjust slot count");
485 oop slots[SLOT_COUNT];
486
487 bool aborting = false;
488 int avail;
489 do {
490 avail = 0;
491 for (int c = 0; (c < dist) && (addr < limit); c++) {
492 Prefetch::read(addr, BrooksPointer::byte_offset());
493 oop obj = oop(addr);
494 slots[avail++] = obj;
495 if (addr < tams) {
496 addr += skip_bitmap_delta;
497 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
498 } else {
499 // cannot trust mark bitmap anymore, finish the current stride,
500 // and switch to accurate traversal
501 addr += obj->size() + skip_objsize_delta;
502 aborting = true;
503 }
504 }
505
506 for (int c = 0; c < avail; c++) {
507 do_marked_object(mark_bit_map, cl, slots[c]);
508 }
509 } while (avail > 0 && !aborting);
510
511 // accurate traversal
512 while (addr < limit) {
513 oop obj = oop(addr);
514 int size = obj->size();
515 do_marked_object(mark_bit_map, cl, obj);
516 addr += size + skip_objsize_delta;
517 }
518 } else {
519 while (addr < limit) {
520 oop obj = oop(addr);
521 int size = obj->size();
522 do_marked_object(mark_bit_map, cl, obj);
523 addr += size + skip_objsize_delta;
524 if (addr < tams) {
525 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
526 }
527 }
528 }
529 }
530
531 template<class T>
532 inline void ShenandoahHeap::do_marked_object(MarkBitMap* bitmap, T* cl, oop obj) {
533 assert(!oopDesc::is_null(obj), "sanity");
534 assert(oopDesc::is_oop(obj), "sanity");
535 assert(is_in(obj), "sanity");
536 assert(bitmap == _mark_bit_map, "only iterate completed mark bitmap");
537 assert(is_marked(obj), "object expected to be marked");
538 cl->do_object(obj);
539 }
540
541 template <class T>
542 class ShenandoahObjectToOopClosure : public ObjectClosure {
543 T* _cl;
544 public:
545 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
546
547 void do_object(oop obj) {
548 obj->oop_iterate(_cl);
549 }
550 };
551
552 template <class T>
553 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
554 T* _cl;
555 MemRegion _bounds;
556 public:
557 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
|