2164 // values in the heap have been properly initialized.
2165 _g1mm = new G1MonitoringSupport(this);
2166
2167 G1StringDedup::initialize();
2168
2169 return JNI_OK;
2170 }
2171
2172 void G1CollectedHeap::stop() {
2173 // Stop all concurrent threads. We do this to make sure these threads
2174 // do not continue to execute and access resources (e.g. gclog_or_tty)
2175 // that are destroyed during shutdown.
2176 _cg1r->stop();
2177 _cmThread->stop();
2178 if (G1StringDedup::is_enabled()) {
2179 G1StringDedup::stop();
2180 }
2181 }
2182
2183 void G1CollectedHeap::clear_humongous_is_live_table() {
2184 _humongous_is_live.clear();
2185 }
2186
2187 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2188 return HeapRegion::max_region_size();
2189 }
2190
2191 void G1CollectedHeap::ref_processing_init() {
2192 // Reference processing in G1 currently works as follows:
2193 //
2194 // * There are two reference processor instances. One is
2195 // used to record and process discovered references
2196 // during concurrent marking; the other is used to
2197 // record and process references during STW pauses
2198 // (both full and incremental).
2199 // * Both ref processors need to 'span' the entire heap as
2200 // the regions in the collection set may be dotted around.
2201 //
2202 // * For the concurrent marking ref processor:
2203 // * Reference discovery is enabled at initial marking.
3675 }
3676 #endif // PRODUCT
3677
3678 G1CollectedHeap* G1CollectedHeap::heap() {
3679 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3680 "not a garbage-first heap");
3681 return _g1h;
3682 }
3683
3684 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3685 // always_do_update_barrier = false;
3686 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3687 // Fill TLAB's and such
3688 accumulate_statistics_all_tlabs();
3689 ensure_parsability(true);
3690
3691 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3692 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3693 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3694 }
3695
3696 if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
3697 clear_humongous_is_live_table();
3698 }
3699 }
3700
3701 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3702
3703 if (G1SummarizeRSetStats &&
3704 (G1SummarizeRSetStatsPeriod > 0) &&
3705 // we are at the end of the GC. Total collections has already been increased.
3706 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3707 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3708 }
3709
3710 // FIXME: what is this about?
3711 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3712 // is set.
3713 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3714 "derived pointer present"));
3715 // always_do_update_barrier = true;
3716
3717 resize_all_tlabs();
3718
3758 JavaThread *curr = Threads::first();
3759 while (curr != NULL) {
3760 DirtyCardQueue& dcq = curr->dirty_card_queue();
3761 extra_cards += dcq.size();
3762 curr = curr->next();
3763 }
3764 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3765 size_t buffer_size = dcqs.buffer_size();
3766 size_t buffer_num = dcqs.completed_buffers_num();
3767
3768 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3769 // in bytes - not the number of 'entries'. We need to convert
3770 // into a number of cards.
3771 return (buffer_size * buffer_num + extra_cards) / oopSize;
3772 }
3773
3774 size_t G1CollectedHeap::cards_scanned() {
3775 return g1_rem_set()->cardsScanned();
3776 }
3777
3778 bool G1CollectedHeap::humongous_region_is_always_live(HeapRegion* region) {
3779 assert(region->startsHumongous(), "Must start a humongous object");
3780 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3781 }
3782
3783 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3784 private:
3785 size_t _total_humongous;
3786 size_t _candidate_humongous;
3787 public:
3788 RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
3789 }
3790
3791 virtual bool doHeapRegion(HeapRegion* r) {
3792 if (!r->startsHumongous()) {
3793 return false;
3794 }
3795 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3796
3797 bool is_candidate = !g1h->humongous_region_is_always_live(r);
3798 if (is_candidate) {
3799 // Do not even try to reclaim a humongous object that we already know will
3800 // not be treated as live later. A young collection will not decrease the
3801 // amount of remembered set entries for that region.
3802 g1h->register_humongous_region_with_in_cset_fast_test(r->hrs_index());
3803 _candidate_humongous++;
3804 }
3805 _total_humongous++;
3806
3807 return false;
3808 }
3809
3810 size_t total_humongous() const { return _total_humongous; }
3811 size_t candidate_humongous() const { return _candidate_humongous; }
3812 };
3813
3814 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3815 RegisterHumongousWithInCSetFastTestClosure cl;
3816 heap_region_iterate(&cl);
3817 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
3818 cl.candidate_humongous());
3819 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3820 }
3821
3822 void
3823 G1CollectedHeap::setup_surviving_young_words() {
3824 assert(_surviving_young_words == NULL, "pre-condition");
3825 uint array_length = g1_policy()->young_cset_region_length();
3826 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3827 if (_surviving_young_words == NULL) {
3828 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3829 "Not enough space for young surv words summary.");
3830 }
3831 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3832 #ifdef ASSERT
3833 for (uint i = 0; i < array_length; ++i) {
3834 assert( _surviving_young_words[i] == 0, "memset above" );
3835 }
3836 #endif // !ASSERT
3837 }
3838
3839 void
4089 }
4090 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4091
4092 #if YOUNG_LIST_VERBOSE
4093 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4094 _young_list->print();
4095 #endif // YOUNG_LIST_VERBOSE
4096
4097 if (g1_policy()->during_initial_mark_pause()) {
4098 concurrent_mark()->checkpointRootsInitialPre();
4099 }
4100
4101 #if YOUNG_LIST_VERBOSE
4102 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4103 _young_list->print();
4104 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4105 #endif // YOUNG_LIST_VERBOSE
4106
4107 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4108
4109 if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
4110 register_humongous_regions_with_in_cset_fast_test();
4111 }
4112
4113 _cm->note_start_of_gc();
4114 // We should not verify the per-thread SATB buffers given that
4115 // we have not filtered them yet (we'll do so during the
4116 // GC). We also call this after finalize_cset() to
4117 // ensure that the CSet has been finalized.
4118 _cm->verify_no_cset_oops(true /* verify_stacks */,
4119 true /* verify_enqueued_buffers */,
4120 false /* verify_thread_buffers */,
4121 true /* verify_fingers */);
4122
4123 if (_hr_printer.is_active()) {
4124 HeapRegion* hr = g1_policy()->collection_set();
4125 while (hr != NULL) {
4126 G1HRPrinter::RegionType type;
4127 if (!hr->is_young()) {
4128 type = G1HRPrinter::Old;
4129 } else if (hr->is_survivor()) {
4130 type = G1HRPrinter::Survivor;
4131 } else {
4143
4144 setup_surviving_young_words();
4145
4146 // Initialize the GC alloc regions.
4147 init_gc_alloc_regions(evacuation_info);
4148
4149 // Actually do the work...
4150 evacuate_collection_set(evacuation_info);
4151
4152 // We do this to mainly verify the per-thread SATB buffers
4153 // (which have been filtered by now) since we didn't verify
4154 // them earlier. No point in re-checking the stacks / enqueued
4155 // buffers given that the CSet has not changed since last time
4156 // we checked.
4157 _cm->verify_no_cset_oops(false /* verify_stacks */,
4158 false /* verify_enqueued_buffers */,
4159 true /* verify_thread_buffers */,
4160 true /* verify_fingers */);
4161
4162 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4163 if (G1ReclaimDeadHumongousObjectsAtYoungGC && _has_humongous_reclaim_candidates) {
4164 eagerly_reclaim_humongous_regions();
4165 }
4166 g1_policy()->clear_collection_set();
4167
4168 cleanup_surviving_young_words();
4169
4170 // Start a new incremental collection set for the next pause.
4171 g1_policy()->start_incremental_cset_building();
4172
4173 clear_cset_fast_test();
4174
4175 _young_list->reset_sampled_info();
4176
4177 // Don't check the whole heap at this point as the
4178 // GC alloc regions from this pause have been tagged
4179 // as survivors and moved on to the survivor list.
4180 // Survivor regions will fail the !is_young() check.
4181 assert(check_young_list_empty(false /* check_heap */),
4182 "young list should be empty");
4183
4184 #if YOUNG_LIST_VERBOSE
4185 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4645
4646 template <class T>
4647 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4648 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4649 _scanned_klass->record_modified_oops();
4650 }
4651 }
4652
4653 template <G1Barrier barrier, G1Mark do_mark_object>
4654 template <class T>
4655 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4656 T heap_oop = oopDesc::load_heap_oop(p);
4657
4658 if (oopDesc::is_null(heap_oop)) {
4659 return;
4660 }
4661
4662 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4663
4664 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4665 bool needs_marking = true;
4666
4667 if (_g1->is_in_cset_or_humongous(obj)) {
4668 oop forwardee;
4669 if (obj->is_forwarded()) {
4670 forwardee = obj->forwardee();
4671 } else {
4672 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4673 }
4674 if (forwardee != NULL) {
4675 oopDesc::encode_store_heap_oop(p, forwardee);
4676 if (do_mark_object != G1MarkNone && forwardee != obj) {
4677 // If the object is self-forwarded we don't need to explicitly
4678 // mark it, the evacuation failure protocol will do so.
4679 mark_forwarded_object(obj, forwardee);
4680 }
4681
4682 if (barrier == G1BarrierKlass) {
4683 do_klass_barrier(p, forwardee);
4684 }
4685 needs_marking = false;
4686 }
4687 }
4688 if (needs_marking) {
4689 // The object is not in collection set. If we're a root scanning
4690 // closure during an initial mark pause then attempt to mark the object.
4691 if (do_mark_object == G1MarkFromRoot) {
4692 mark_object(obj);
4693 }
4694 }
4695
4696 if (barrier == G1BarrierEvac) {
4697 _par_scan_state->update_rs(_from, p, _worker_id);
4698 }
4699 }
4700
4701 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4702 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4703
4704 class G1ParEvacuateFollowersClosure : public VoidClosure {
4705 protected:
4706 G1CollectedHeap* _g1h;
4707 G1ParScanThreadState* _par_scan_state;
4708 RefToScanQueueSet* _queues;
5495 }
5496 return false;
5497 }
5498 };
5499
5500 bool G1STWIsAliveClosure::do_object_b(oop p) {
5501 // An object is reachable if it is outside the collection set,
5502 // or is inside and copied.
5503 return !_g1->obj_in_cs(p) || p->is_forwarded();
5504 }
5505
5506 // Non Copying Keep Alive closure
5507 class G1KeepAliveClosure: public OopClosure {
5508 G1CollectedHeap* _g1;
5509 public:
5510 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5511 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5512 void do_oop(oop* p) {
5513 oop obj = *p;
5514
5515 if (obj == NULL || !_g1->is_in_cset_or_humongous(obj)) {
5516 return;
5517 }
5518 if (_g1->is_in_cset(obj)) {
5519 assert( obj->is_forwarded(), "invariant" );
5520 *p = obj->forwardee();
5521 } else {
5522 assert(!obj->is_forwarded(), "invariant" );
5523 _g1->set_humongous_is_live(obj);
5524 }
5525 }
5526 };
5527
5528 // Copying Keep Alive closure - can be called from both
5529 // serial and parallel code as long as different worker
5530 // threads utilize different G1ParScanThreadState instances
5531 // and different queues.
5532
5533 class G1CopyingKeepAliveClosure: public OopClosure {
5534 G1CollectedHeap* _g1h;
5535 OopClosure* _copy_non_heap_obj_cl;
5536 G1ParScanThreadState* _par_scan_state;
5537
5538 public:
5539 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5540 OopClosure* non_heap_obj_cl,
5541 G1ParScanThreadState* pss):
5542 _g1h(g1h),
6533 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6534 // until the end of a concurrent mark.
6535 //
6536 // It is not required to check whether the object has been found dead by marking
6537 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6538 // all objects allocated during that time are considered live.
6539 // SATB marking is even more conservative than the remembered set.
6540 // So if at this point in the collection there is no remembered set entry,
6541 // nobody has a reference to it.
6542 // At the start of collection we flush all refinement logs, and remembered sets
6543 // are completely up-to-date wrt to references to the humongous object.
6544 //
6545 // Other implementation considerations:
6546 // - never consider object arrays: while they are a valid target, they have not
6547 // been observed to be used as temporary objects.
6548 // - they would also pose considerable effort for cleaning up the the remembered
6549 // sets.
6550 // While this cleanup is not strictly necessary to be done (or done instantly),
6551 // given that their occurrence is very low, this saves us this additional
6552 // complexity.
6553 if (g1h->humongous_is_live(r->hrs_index()) ||
6554 g1h->humongous_region_is_always_live(r)) {
6555
6556 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6557 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6558 r->isHumongous(),
6559 r->hrs_index(),
6560 r->rem_set()->occupied(),
6561 r->rem_set()->strong_code_roots_list_length(),
6562 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6563 g1h->humongous_is_live(r->hrs_index()),
6564 oop(r->bottom())->is_objArray()
6565 );
6566 }
6567
6568 return false;
6569 }
6570
6571 guarantee(!((oop)(r->bottom()))->is_objArray(),
6572 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6573 r->bottom()));
6574
6575 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6576 gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6577 r->isHumongous(),
6578 r->bottom(),
6579 r->hrs_index(),
6580 r->region_num(),
6581 r->rem_set()->occupied(),
6582 r->rem_set()->strong_code_roots_list_length(),
6583 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6584 g1h->humongous_is_live(r->hrs_index()),
6585 oop(r->bottom())->is_objArray()
6586 );
6587 }
6588 _freed_bytes += r->used();
6589 r->set_containing_set(NULL);
6590 _humongous_regions_removed.increment(1u, r->capacity());
6591 g1h->free_humongous_region(r, _free_region_list, false);
6592
6593 return false;
6594 }
6595
6596 HeapRegionSetCount& humongous_free_count() {
6597 return _humongous_regions_removed;
6598 }
6599
6600 size_t bytes_freed() const {
6601 return _freed_bytes;
6602 }
6603
6604 size_t humongous_reclaimed() const {
6605 return _humongous_regions_removed.length();
6606 }
6607 };
6608
6609 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6610 assert_at_safepoint(true);
6611 guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Feature must be enabled");
6612 guarantee(_has_humongous_reclaim_candidates, "Should not reach here if no candidates for eager reclaim were found.");
6613
6614 double start_time = os::elapsedTime();
6615
6616 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6617
6618 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6619 heap_region_iterate(&cl);
6620
6621 HeapRegionSetCount empty_set;
6622 remove_from_old_sets(empty_set, cl.humongous_free_count());
6623
6624 G1HRPrinter* hr_printer = _g1h->hr_printer();
6625 if (hr_printer->is_active()) {
6626 FreeRegionListIterator iter(&local_cleanup_list);
6627 while (iter.more_available()) {
6628 HeapRegion* hr = iter.get_next();
6629 hr_printer->cleanup(hr);
6630 }
6631 }
6632
|
2164 // values in the heap have been properly initialized.
2165 _g1mm = new G1MonitoringSupport(this);
2166
2167 G1StringDedup::initialize();
2168
2169 return JNI_OK;
2170 }
2171
2172 void G1CollectedHeap::stop() {
2173 // Stop all concurrent threads. We do this to make sure these threads
2174 // do not continue to execute and access resources (e.g. gclog_or_tty)
2175 // that are destroyed during shutdown.
2176 _cg1r->stop();
2177 _cmThread->stop();
2178 if (G1StringDedup::is_enabled()) {
2179 G1StringDedup::stop();
2180 }
2181 }
2182
2183 void G1CollectedHeap::clear_humongous_is_live_table() {
2184 guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
2185 _humongous_is_live.clear();
2186 }
2187
2188 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2189 return HeapRegion::max_region_size();
2190 }
2191
2192 void G1CollectedHeap::ref_processing_init() {
2193 // Reference processing in G1 currently works as follows:
2194 //
2195 // * There are two reference processor instances. One is
2196 // used to record and process discovered references
2197 // during concurrent marking; the other is used to
2198 // record and process references during STW pauses
2199 // (both full and incremental).
2200 // * Both ref processors need to 'span' the entire heap as
2201 // the regions in the collection set may be dotted around.
2202 //
2203 // * For the concurrent marking ref processor:
2204 // * Reference discovery is enabled at initial marking.
3676 }
3677 #endif // PRODUCT
3678
3679 G1CollectedHeap* G1CollectedHeap::heap() {
3680 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3681 "not a garbage-first heap");
3682 return _g1h;
3683 }
3684
3685 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3686 // always_do_update_barrier = false;
3687 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3688 // Fill TLAB's and such
3689 accumulate_statistics_all_tlabs();
3690 ensure_parsability(true);
3691
3692 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3693 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3694 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3695 }
3696 }
3697
3698 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3699
3700 if (G1SummarizeRSetStats &&
3701 (G1SummarizeRSetStatsPeriod > 0) &&
3702 // we are at the end of the GC. Total collections has already been increased.
3703 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3704 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3705 }
3706
3707 // FIXME: what is this about?
3708 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3709 // is set.
3710 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3711 "derived pointer present"));
3712 // always_do_update_barrier = true;
3713
3714 resize_all_tlabs();
3715
3755 JavaThread *curr = Threads::first();
3756 while (curr != NULL) {
3757 DirtyCardQueue& dcq = curr->dirty_card_queue();
3758 extra_cards += dcq.size();
3759 curr = curr->next();
3760 }
3761 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3762 size_t buffer_size = dcqs.buffer_size();
3763 size_t buffer_num = dcqs.completed_buffers_num();
3764
3765 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3766 // in bytes - not the number of 'entries'. We need to convert
3767 // into a number of cards.
3768 return (buffer_size * buffer_num + extra_cards) / oopSize;
3769 }
3770
3771 size_t G1CollectedHeap::cards_scanned() {
3772 return g1_rem_set()->cardsScanned();
3773 }
3774
3775 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3776 HeapRegion* region = region_at(index);
3777 assert(region->startsHumongous(), "Must start a humongous object");
3778 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3779 }
3780
3781 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3782 private:
3783 size_t _total_humongous;
3784 size_t _candidate_humongous;
3785 public:
3786 RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
3787 }
3788
3789 virtual bool doHeapRegion(HeapRegion* r) {
3790 if (!r->startsHumongous()) {
3791 return false;
3792 }
3793 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3794
3795 uint region_idx = r->hrs_index();
3796 bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
3797 // Is_candidate already filters out humongous regions with some remembered set.
3798 // This will not lead to humongous object that we mistakenly keep alive because
3799 // during young collection the remembered sets will only be added to.
3800 if (is_candidate) {
3801 g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3802 _candidate_humongous++;
3803 }
3804 _total_humongous++;
3805
3806 return false;
3807 }
3808
3809 size_t total_humongous() const { return _total_humongous; }
3810 size_t candidate_humongous() const { return _candidate_humongous; }
3811 };
3812
3813 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3814 if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
3815 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);
3816 return;
3817 }
3818
3819 RegisterHumongousWithInCSetFastTestClosure cl;
3820 heap_region_iterate(&cl);
3821 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
3822 cl.candidate_humongous());
3823 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3824
3825 if (_has_humongous_reclaim_candidates) {
3826 clear_humongous_is_live_table();
3827 }
3828 }
3829
3830 void
3831 G1CollectedHeap::setup_surviving_young_words() {
3832 assert(_surviving_young_words == NULL, "pre-condition");
3833 uint array_length = g1_policy()->young_cset_region_length();
3834 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3835 if (_surviving_young_words == NULL) {
3836 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3837 "Not enough space for young surv words summary.");
3838 }
3839 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3840 #ifdef ASSERT
3841 for (uint i = 0; i < array_length; ++i) {
3842 assert( _surviving_young_words[i] == 0, "memset above" );
3843 }
3844 #endif // !ASSERT
3845 }
3846
3847 void
4097 }
4098 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4099
4100 #if YOUNG_LIST_VERBOSE
4101 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4102 _young_list->print();
4103 #endif // YOUNG_LIST_VERBOSE
4104
4105 if (g1_policy()->during_initial_mark_pause()) {
4106 concurrent_mark()->checkpointRootsInitialPre();
4107 }
4108
4109 #if YOUNG_LIST_VERBOSE
4110 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4111 _young_list->print();
4112 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4113 #endif // YOUNG_LIST_VERBOSE
4114
4115 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4116
4117 register_humongous_regions_with_in_cset_fast_test();
4118
4119 _cm->note_start_of_gc();
4120 // We should not verify the per-thread SATB buffers given that
4121 // we have not filtered them yet (we'll do so during the
4122 // GC). We also call this after finalize_cset() to
4123 // ensure that the CSet has been finalized.
4124 _cm->verify_no_cset_oops(true /* verify_stacks */,
4125 true /* verify_enqueued_buffers */,
4126 false /* verify_thread_buffers */,
4127 true /* verify_fingers */);
4128
4129 if (_hr_printer.is_active()) {
4130 HeapRegion* hr = g1_policy()->collection_set();
4131 while (hr != NULL) {
4132 G1HRPrinter::RegionType type;
4133 if (!hr->is_young()) {
4134 type = G1HRPrinter::Old;
4135 } else if (hr->is_survivor()) {
4136 type = G1HRPrinter::Survivor;
4137 } else {
4149
4150 setup_surviving_young_words();
4151
4152 // Initialize the GC alloc regions.
4153 init_gc_alloc_regions(evacuation_info);
4154
4155 // Actually do the work...
4156 evacuate_collection_set(evacuation_info);
4157
4158 // We do this to mainly verify the per-thread SATB buffers
4159 // (which have been filtered by now) since we didn't verify
4160 // them earlier. No point in re-checking the stacks / enqueued
4161 // buffers given that the CSet has not changed since last time
4162 // we checked.
4163 _cm->verify_no_cset_oops(false /* verify_stacks */,
4164 false /* verify_enqueued_buffers */,
4165 true /* verify_thread_buffers */,
4166 true /* verify_fingers */);
4167
4168 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4169
4170 eagerly_reclaim_humongous_regions();
4171
4172 g1_policy()->clear_collection_set();
4173
4174 cleanup_surviving_young_words();
4175
4176 // Start a new incremental collection set for the next pause.
4177 g1_policy()->start_incremental_cset_building();
4178
4179 clear_cset_fast_test();
4180
4181 _young_list->reset_sampled_info();
4182
4183 // Don't check the whole heap at this point as the
4184 // GC alloc regions from this pause have been tagged
4185 // as survivors and moved on to the survivor list.
4186 // Survivor regions will fail the !is_young() check.
4187 assert(check_young_list_empty(false /* check_heap */),
4188 "young list should be empty");
4189
4190 #if YOUNG_LIST_VERBOSE
4191 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4651
4652 template <class T>
4653 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4654 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4655 _scanned_klass->record_modified_oops();
4656 }
4657 }
4658
4659 template <G1Barrier barrier, G1Mark do_mark_object>
4660 template <class T>
4661 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4662 T heap_oop = oopDesc::load_heap_oop(p);
4663
4664 if (oopDesc::is_null(heap_oop)) {
4665 return;
4666 }
4667
4668 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4669
4670 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4671
4672 G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
4673
4674 if (state == G1CollectedHeap::InCSet) {
4675 oop forwardee;
4676 if (obj->is_forwarded()) {
4677 forwardee = obj->forwardee();
4678 } else {
4679 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4680 }
4681 assert(forwardee != NULL, "forwardee should not be NULL");
4682 oopDesc::encode_store_heap_oop(p, forwardee);
4683 if (do_mark_object != G1MarkNone && forwardee != obj) {
4684 // If the object is self-forwarded we don't need to explicitly
4685 // mark it, the evacuation failure protocol will do so.
4686 mark_forwarded_object(obj, forwardee);
4687 }
4688
4689 if (barrier == G1BarrierKlass) {
4690 do_klass_barrier(p, forwardee);
4691 }
4692 } else {
4693 if (state == G1CollectedHeap::IsHumongous) {
4694 _g1->set_humongous_is_live(obj);
4695 }
4696 // The object is not in collection set. If we're a root scanning
4697 // closure during an initial mark pause then attempt to mark the object.
4698 if (do_mark_object == G1MarkFromRoot) {
4699 mark_object(obj);
4700 }
4701 }
4702
4703 if (barrier == G1BarrierEvac) {
4704 _par_scan_state->update_rs(_from, p, _worker_id);
4705 }
4706 }
4707
4708 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4709 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4710
4711 class G1ParEvacuateFollowersClosure : public VoidClosure {
4712 protected:
4713 G1CollectedHeap* _g1h;
4714 G1ParScanThreadState* _par_scan_state;
4715 RefToScanQueueSet* _queues;
5502 }
5503 return false;
5504 }
5505 };
5506
5507 bool G1STWIsAliveClosure::do_object_b(oop p) {
5508 // An object is reachable if it is outside the collection set,
5509 // or is inside and copied.
5510 return !_g1->obj_in_cs(p) || p->is_forwarded();
5511 }
5512
5513 // Non Copying Keep Alive closure
5514 class G1KeepAliveClosure: public OopClosure {
5515 G1CollectedHeap* _g1;
5516 public:
5517 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5518 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5519 void do_oop(oop* p) {
5520 oop obj = *p;
5521
5522 G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
5523 if (obj == NULL || cset_state == G1CollectedHeap::InNeither) {
5524 return;
5525 }
5526 if (cset_state == G1CollectedHeap::InCSet) {
5527 assert( obj->is_forwarded(), "invariant" );
5528 *p = obj->forwardee();
5529 } else {
5530 assert(!obj->is_forwarded(), "invariant" );
5531 assert(cset_state == G1CollectedHeap::IsHumongous,
5532 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
5533 _g1->set_humongous_is_live(obj);
5534 }
5535 }
5536 };
5537
5538 // Copying Keep Alive closure - can be called from both
5539 // serial and parallel code as long as different worker
5540 // threads utilize different G1ParScanThreadState instances
5541 // and different queues.
5542
5543 class G1CopyingKeepAliveClosure: public OopClosure {
5544 G1CollectedHeap* _g1h;
5545 OopClosure* _copy_non_heap_obj_cl;
5546 G1ParScanThreadState* _par_scan_state;
5547
5548 public:
5549 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5550 OopClosure* non_heap_obj_cl,
5551 G1ParScanThreadState* pss):
5552 _g1h(g1h),
6543 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6544 // until the end of a concurrent mark.
6545 //
6546 // It is not required to check whether the object has been found dead by marking
6547 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6548 // all objects allocated during that time are considered live.
6549 // SATB marking is even more conservative than the remembered set.
6550 // So if at this point in the collection there is no remembered set entry,
6551 // nobody has a reference to it.
6552 // At the start of collection we flush all refinement logs, and remembered sets
6553 // are completely up-to-date wrt to references to the humongous object.
6554 //
6555 // Other implementation considerations:
6556 // - never consider object arrays: while they are a valid target, they have not
6557 // been observed to be used as temporary objects.
6558 // - they would also pose considerable effort for cleaning up the the remembered
6559 // sets.
6560 // While this cleanup is not strictly necessary to be done (or done instantly),
6561 // given that their occurrence is very low, this saves us this additional
6562 // complexity.
6563 uint region_idx = r->hrs_index();
6564 if (g1h->humongous_is_live(region_idx) ||
6565 g1h->humongous_region_is_always_live(region_idx)) {
6566
6567 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6568 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6569 r->isHumongous(),
6570 region_idx,
6571 r->rem_set()->occupied(),
6572 r->rem_set()->strong_code_roots_list_length(),
6573 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6574 g1h->humongous_is_live(region_idx),
6575 oop(r->bottom())->is_objArray()
6576 );
6577 }
6578
6579 return false;
6580 }
6581
6582 guarantee(!((oop)(r->bottom()))->is_objArray(),
6583 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6584 r->bottom()));
6585
6586 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6587 gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6588 r->isHumongous(),
6589 r->bottom(),
6590 region_idx,
6591 r->region_num(),
6592 r->rem_set()->occupied(),
6593 r->rem_set()->strong_code_roots_list_length(),
6594 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6595 g1h->humongous_is_live(region_idx),
6596 oop(r->bottom())->is_objArray()
6597 );
6598 }
6599 _freed_bytes += r->used();
6600 r->set_containing_set(NULL);
6601 _humongous_regions_removed.increment(1u, r->capacity());
6602 g1h->free_humongous_region(r, _free_region_list, false);
6603
6604 return false;
6605 }
6606
6607 HeapRegionSetCount& humongous_free_count() {
6608 return _humongous_regions_removed;
6609 }
6610
6611 size_t bytes_freed() const {
6612 return _freed_bytes;
6613 }
6614
6615 size_t humongous_reclaimed() const {
6616 return _humongous_regions_removed.length();
6617 }
6618 };
6619
6620 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6621 assert_at_safepoint(true);
6622
6623 if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) {
6624 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6625 return;
6626 }
6627
6628 double start_time = os::elapsedTime();
6629
6630 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6631
6632 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6633 heap_region_iterate(&cl);
6634
6635 HeapRegionSetCount empty_set;
6636 remove_from_old_sets(empty_set, cl.humongous_free_count());
6637
6638 G1HRPrinter* hr_printer = _g1h->hr_printer();
6639 if (hr_printer->is_active()) {
6640 FreeRegionListIterator iter(&local_cleanup_list);
6641 while (iter.more_available()) {
6642 HeapRegion* hr = iter.get_next();
6643 hr_printer->cleanup(hr);
6644 }
6645 }
6646
|