46 #include "runtime/thread.hpp"
47 #include "utilities/copy.hpp"
48
49 template <class T>
50 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
51 T o = oopDesc::load_heap_oop(p);
52 if (! oopDesc::is_null(o)) {
53 oop obj = oopDesc::decode_heap_oop_not_null(o);
54 _heap->update_oop_ref_not_null(p, obj);
55 }
56 }
57
58 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); }
59 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
60
61 /*
62 * Marks the object. Returns true if the object has not been marked before and has
63 * been marked by this thread. Returns false if the object has already been marked,
64 * or if a competing thread succeeded in marking this object.
65 */
66 inline bool ShenandoahHeap::mark_next(oop obj) const {
67 #ifdef ASSERT
68 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
69 tty->print_cr("heap region containing obj:");
70 ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
71 obj_region->print();
72 tty->print_cr("heap region containing forwardee:");
73 ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
74 forward_region->print();
75 }
76 #endif
77
78 assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
79 return mark_next_no_checks(obj);
80 }
81
82 inline bool ShenandoahHeap::mark_next_no_checks(oop obj) const {
83 HeapWord* addr = (HeapWord*) obj;
84 return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
85 }
86
87 inline bool ShenandoahHeap::is_marked_next(oop obj) const {
88 HeapWord* addr = (HeapWord*) obj;
89 return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
90 }
91
92 inline bool ShenandoahHeap::is_marked_complete(oop obj) const {
93 HeapWord* addr = (HeapWord*) obj;
94 return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr);
95 }
96
97 inline bool ShenandoahHeap::need_update_refs() const {
98 return _need_update_refs;
99 }
100
101 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
102 uintptr_t region_start = ((uintptr_t) addr);
103 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
104 #ifdef ASSERT
105 if (index >= num_regions()) {
106 tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \
107 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT,
108 p2i(base()),
109 p2i(_ordered_regions->get(0)->bottom()),
110 num_regions(),
111 ShenandoahHeapRegion::region_size_bytes());
112 }
113 #endif
114 assert(index < num_regions(), "heap region index must be in range");
411 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
412 if (alloc_from_gclab) {
413 thread->gclab().rollback(size_with_fwdptr);
414 } else {
415 fill_with_object(copy, size_no_fwdptr);
416 }
417 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT,
418 p2i(p), p2i(copy), p2i(result));
419 return result;
420 }
421 }
422
423 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
424 // TODO: Make this faster! It's used in a hot path.
425 // TODO: it's not strictly matrix-related, but used only in partial (i.e. matrix) GCs.
426 if (is_concurrent_partial_in_progress()) {
427 assert(! in_collection_set((oop) entry), "must not get cset objects here");
428 // assert(free_regions()->contains(heap_region_containing(entry)), "expect to-space object");
429 return true;
430 } else if (concurrent_mark_in_progress()) {
431 return ! is_marked_next(oop(entry));
432 } else {
433 return false;
434 }
435 }
436
437 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
438 assert(collection_set() != NULL, "Sanity");
439 return collection_set()->is_in(region_index);
440 }
441
442 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
443 return region_in_collection_set(r->region_number());
444 }
445
446 template <class T>
447 inline bool ShenandoahHeap::in_collection_set(T p) const {
448 HeapWord* obj = (HeapWord*) p;
449 assert(collection_set() != NULL, "Sanity");
450 assert(is_in(obj), "should be in heap");
451
455 inline bool ShenandoahHeap::concurrent_mark_in_progress() const {
456 return _concurrent_mark_in_progress != 0;
457 }
458
459 inline bool ShenandoahHeap::is_concurrent_partial_in_progress() const {
460 return _concurrent_partial_in_progress;
461 }
462
463 inline address ShenandoahHeap::update_refs_in_progress_addr() {
464 return (address) &(ShenandoahHeap::heap()->_update_refs_in_progress);
465 }
466
467 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
468 return _evacuation_in_progress != 0;
469 }
470
471 inline address ShenandoahHeap::evacuation_in_progress_addr() {
472 return (address) &(ShenandoahHeap::heap()->_evacuation_in_progress);
473 }
474
475 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
476 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
477 HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
478 bool alloc_after_mark_start = addr >= top_at_mark_start;
479 return alloc_after_mark_start;
480 }
481
482 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
483 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
484 HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
485 bool alloc_after_mark_start = addr >= top_at_mark_start;
486 return alloc_after_mark_start;
487 }
488
489 template<class T>
490 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
491 marked_object_iterate(region, cl, region->top());
492 }
493
494 template<class T>
495 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
496 marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit());
497 }
498
499 template<class T>
500 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
501 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
502
503 assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
504
505 MarkBitMap* mark_bit_map = _complete_mark_bit_map;
506 HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
507
508 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
509 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
510 HeapWord* start = region->bottom() + BrooksPointer::word_size();
511
512 HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), region->end());
513 HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
514
515 intx dist = ShenandoahMarkScanPrefetch;
516 if (dist > 0) {
517 // Batched scan that prefetches the oop data, anticipating the access to
518 // either header, oop field, or forwarding pointer. Not that we cannot
519 // touch anything in oop, while it still being prefetched to get enough
520 // time for prefetch to work. This is why we try to scan the bitmap linearly,
521 // disregarding the object size. However, since we know forwarding pointer
522 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
523 // there is no point for prefetching the oop contents, as oop->size() will
524 // touch it prematurely.
525
526 // No variable-length arrays in standard C++, have enough slots to fit
527 // the prefetch distance.
528 static const int SLOT_COUNT = 256;
529 guarantee(dist <= SLOT_COUNT, "adjust slot count");
530 oop slots[SLOT_COUNT];
531
532 bool aborting = false;
533 int avail;
534 do {
535 avail = 0;
536 for (int c = 0; (c < dist) && (addr < limit); c++) {
537 Prefetch::read(addr, BrooksPointer::byte_offset());
538 oop obj = oop(addr);
539 slots[avail++] = obj;
540 if (addr < top_at_mark_start) {
541 addr += skip_bitmap_delta;
542 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
543 } else {
544 // cannot trust mark bitmap anymore, finish the current stride,
545 // and switch to accurate traversal
546 addr += obj->size() + skip_objsize_delta;
547 aborting = true;
548 }
549 }
550
551 for (int c = 0; c < avail; c++) {
552 do_marked_object(mark_bit_map, cl, slots[c]);
553 }
554 } while (avail > 0 && !aborting);
555
556 // accurate traversal
557 while (addr < limit) {
558 oop obj = oop(addr);
559 int size = obj->size();
560 do_marked_object(mark_bit_map, cl, obj);
561 addr += size + skip_objsize_delta;
562 }
563 } else {
564 while (addr < limit) {
565 oop obj = oop(addr);
566 int size = obj->size();
567 do_marked_object(mark_bit_map, cl, obj);
568 addr += size + skip_objsize_delta;
569 if (addr < top_at_mark_start) {
570 addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
571 }
572 }
573 }
574 }
575
576 template<class T>
577 inline void ShenandoahHeap::do_marked_object(MarkBitMap* bitmap, T* cl, oop obj) {
578 assert(!oopDesc::is_null(obj), "sanity");
579 assert(oopDesc::is_oop(obj), "sanity");
580 assert(is_in(obj), "sanity");
581 assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap");
582 assert(is_marked_complete(obj), "object expected to be marked");
583 cl->do_object(obj);
584 }
585
586 template <class T>
587 class ShenandoahObjectToOopClosure : public ObjectClosure {
588 T* _cl;
589 public:
590 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
591
592 void do_object(oop obj) {
593 obj->oop_iterate(_cl);
594 }
595 };
596
597 template <class T>
598 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
599 T* _cl;
600 MemRegion _bounds;
601 public:
602 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
|
46 #include "runtime/thread.hpp"
47 #include "utilities/copy.hpp"
48
49 template <class T>
50 void ShenandoahUpdateRefsClosure::do_oop_work(T* p) {
51 T o = oopDesc::load_heap_oop(p);
52 if (! oopDesc::is_null(o)) {
53 oop obj = oopDesc::decode_heap_oop_not_null(o);
54 _heap->update_oop_ref_not_null(p, obj);
55 }
56 }
57
58 void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); }
59 void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
60
61 /*
62 * Marks the object. Returns true if the object has not been marked before and has
63 * been marked by this thread. Returns false if the object has already been marked,
64 * or if a competing thread succeeded in marking this object.
65 */
66 inline bool ShenandoahHeap::mark(oop obj) {
67 #ifdef ASSERT
68 if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
69 tty->print_cr("heap region containing obj:");
70 ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
71 obj_region->print();
72 tty->print_cr("heap region containing forwardee:");
73 ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
74 forward_region->print();
75 }
76 #endif
77
78 assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
79 return mark_no_checks(obj);
80 }
81
82 inline bool ShenandoahHeap::mark_no_checks(oop obj) {
83 HeapWord* addr = (HeapWord*) obj;
84 return !allocated_after_mark_start(addr) && _mark_bit_map.parMark(addr);
85 }
86
87 inline bool ShenandoahHeap::is_marked(oop obj) const {
88 HeapWord* addr = (HeapWord*) obj;
89 return allocated_after_mark_start(addr) || _mark_bit_map.isMarked(addr);
90 }
91
92 inline bool ShenandoahHeap::need_update_refs() const {
93 return _need_update_refs;
94 }
95
96 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
97 uintptr_t region_start = ((uintptr_t) addr);
98 uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift();
99 #ifdef ASSERT
100 if (index >= num_regions()) {
101 tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \
102 ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT,
103 p2i(base()),
104 p2i(_ordered_regions->get(0)->bottom()),
105 num_regions(),
106 ShenandoahHeapRegion::region_size_bytes());
107 }
108 #endif
109 assert(index < num_regions(), "heap region index must be in range");
406 // we have to keep the fwdptr initialized and pointing to our (stale) copy.
407 if (alloc_from_gclab) {
408 thread->gclab().rollback(size_with_fwdptr);
409 } else {
410 fill_with_object(copy, size_no_fwdptr);
411 }
412 log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " failed, use other: " PTR_FORMAT,
413 p2i(p), p2i(copy), p2i(result));
414 return result;
415 }
416 }
417
418 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
419 // TODO: Make this faster! It's used in a hot path.
420 // TODO: it's not strictly matrix-related, but used only in partial (i.e. matrix) GCs.
421 if (is_concurrent_partial_in_progress()) {
422 assert(! in_collection_set((oop) entry), "must not get cset objects here");
423 // assert(free_regions()->contains(heap_region_containing(entry)), "expect to-space object");
424 return true;
425 } else if (concurrent_mark_in_progress()) {
426 return ! is_marked(oop(entry));
427 } else {
428 return false;
429 }
430 }
431
432 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
433 assert(collection_set() != NULL, "Sanity");
434 return collection_set()->is_in(region_index);
435 }
436
437 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
438 return region_in_collection_set(r->region_number());
439 }
440
441 template <class T>
442 inline bool ShenandoahHeap::in_collection_set(T p) const {
443 HeapWord* obj = (HeapWord*) p;
444 assert(collection_set() != NULL, "Sanity");
445 assert(is_in(obj), "should be in heap");
446
450 inline bool ShenandoahHeap::concurrent_mark_in_progress() const {
451 return _concurrent_mark_in_progress != 0;
452 }
453
454 inline bool ShenandoahHeap::is_concurrent_partial_in_progress() const {
455 return _concurrent_partial_in_progress;
456 }
457
458 inline address ShenandoahHeap::update_refs_in_progress_addr() {
459 return (address) &(ShenandoahHeap::heap()->_update_refs_in_progress);
460 }
461
462 inline bool ShenandoahHeap::is_evacuation_in_progress() const {
463 return _evacuation_in_progress != 0;
464 }
465
466 inline address ShenandoahHeap::evacuation_in_progress_addr() {
467 return (address) &(ShenandoahHeap::heap()->_evacuation_in_progress);
468 }
469
470 inline bool ShenandoahHeap::allocated_after_mark_start(HeapWord* addr) const {
471 uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
472 HeapWord* top_at_mark_start = _top_at_mark_starts[index];
473 bool alloc_after_mark_start = addr >= top_at_mark_start;
474 return alloc_after_mark_start;
475 }
476
477 template<class T>
478 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
479 marked_object_iterate(region, cl, region->top());
480 }
481
482 template<class T>
483 inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) {
484 marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit());
485 }
486
487 template<class T>
488 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) {
489 assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
490
491 assert(! region->is_humongous_continuation(), "no humongous continuation regions here");
492 assert(is_bitmap_valid(), "only try this with complete marking bitmap");
493
494 MarkBitMap mark_bit_map = _mark_bit_map;
495 HeapWord* tams = top_at_mark_start(region->bottom());
496
497 size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
498 size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
499 HeapWord* start = region->bottom() + BrooksPointer::word_size();
500
501 HeapWord* end = MIN2(tams + BrooksPointer::word_size(), region->end());
502 HeapWord* addr = mark_bit_map.getNextMarkedWordAddress(start, end);
503
504 intx dist = ShenandoahMarkScanPrefetch;
505 if (dist > 0) {
506 // Batched scan that prefetches the oop data, anticipating the access to
507 // either header, oop field, or forwarding pointer. Not that we cannot
508 // touch anything in oop, while it still being prefetched to get enough
509 // time for prefetch to work. This is why we try to scan the bitmap linearly,
510 // disregarding the object size. However, since we know forwarding pointer
511 // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
512 // there is no point for prefetching the oop contents, as oop->size() will
513 // touch it prematurely.
514
515 // No variable-length arrays in standard C++, have enough slots to fit
516 // the prefetch distance.
517 static const int SLOT_COUNT = 256;
518 guarantee(dist <= SLOT_COUNT, "adjust slot count");
519 oop slots[SLOT_COUNT];
520
521 bool aborting = false;
522 int avail;
523 do {
524 avail = 0;
525 for (int c = 0; (c < dist) && (addr < limit); c++) {
526 Prefetch::read(addr, BrooksPointer::byte_offset());
527 oop obj = oop(addr);
528 slots[avail++] = obj;
529 if (addr < tams) {
530 addr += skip_bitmap_delta;
531 addr = mark_bit_map.getNextMarkedWordAddress(addr, end);
532 } else {
533 // cannot trust mark bitmap anymore, finish the current stride,
534 // and switch to accurate traversal
535 addr += obj->size() + skip_objsize_delta;
536 aborting = true;
537 }
538 }
539
540 for (int c = 0; c < avail; c++) {
541 do_marked_object(cl, slots[c]);
542 }
543 } while (avail > 0 && !aborting);
544
545 // accurate traversal
546 while (addr < limit) {
547 oop obj = oop(addr);
548 int size = obj->size();
549 do_marked_object(cl, obj);
550 addr += size + skip_objsize_delta;
551 }
552 } else {
553 while (addr < limit) {
554 oop obj = oop(addr);
555 int size = obj->size();
556 do_marked_object(cl, obj);
557 addr += size + skip_objsize_delta;
558 if (addr < tams) {
559 addr = mark_bit_map.getNextMarkedWordAddress(addr, end);
560 }
561 }
562 }
563 }
564
565 template<class T>
566 inline void ShenandoahHeap::do_marked_object(T* cl, oop obj) {
567 assert(!oopDesc::is_null(obj), "sanity");
568 assert(oopDesc::is_oop(obj), "sanity");
569 assert(is_in(obj), "sanity");
570 assert(is_marked(obj), "object expected to be marked");
571 cl->do_object(obj);
572 }
573
574 template <class T>
575 class ShenandoahObjectToOopClosure : public ObjectClosure {
576 T* _cl;
577 public:
578 ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {}
579
580 void do_object(oop obj) {
581 obj->oop_iterate(_cl);
582 }
583 };
584
585 template <class T>
586 class ShenandoahObjectToOopBoundedClosure : public ObjectClosure {
587 T* _cl;
588 MemRegion _bounds;
589 public:
590 ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) :
|