944
945 // Do collection work
946 {
947 HandleMark hm; // Discard invalid handles created during gc
948 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
949 }
950 // Because freeing humongous regions may have added some unclean
951 // regions, it is necessary to tear down again before rebuilding.
952 tear_down_region_lists();
953 rebuild_region_lists();
954
955 _summary_bytes_used = recalculate_used();
956
957 ref_processor()->enqueue_discovered_references();
958
959 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
960
961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
962 HandleMark hm; // Discard invalid handles created during verification
963 gclog_or_tty->print(" VerifyAfterGC:");
964 Universe::verify(false);
965 }
966 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
967
968 reset_gc_time_stamp();
969 // Since everything potentially moved, we will clear all remembered
970 // sets, and clear all cards. Later we will rebuild remebered
971 // sets. We will also reset the GC time stamps of the regions.
972 PostMCRemSetClearClosure rs_clear(mr_bs());
973 heap_region_iterate(&rs_clear);
974
975 // Resize the heap if necessary.
976 resize_if_necessary_after_full_collection(full ? 0 : word_size);
977
978 if (_cg1r->use_cache()) {
979 _cg1r->clear_and_record_card_counts();
980 _cg1r->clear_hot_cache();
981 }
982
983 // Rebuild remembered sets of all regions.
2118 HeapWord *end = start + word_sz;
2119 HeapWord *cur;
2120 int *val;
2121 for (cur = start; cur < end; cur++) {
2122 val = (int *) cur;
2123 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
2124 }
2125 }
2126 }
2127 };
2128
2129 class VerifyRegionClosure: public HeapRegionClosure {
2130 public:
2131 bool _allow_dirty;
2132 bool _par;
2133 VerifyRegionClosure(bool allow_dirty, bool par = false)
2134 : _allow_dirty(allow_dirty), _par(par) {}
2135 bool doHeapRegion(HeapRegion* r) {
2136 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
2137 "Should be unclaimed at verify points.");
2138 if (r->isHumongous()) {
2139 if (r->startsHumongous()) {
2140 // Verify the single H object.
2141 oop(r->bottom())->verify();
2142 size_t word_sz = oop(r->bottom())->size();
2143 guarantee(r->top() == r->bottom() + word_sz,
2144 "Only one object in a humongous region");
2145 }
2146 } else {
2147 VerifyObjsInRegionClosure not_dead_yet_cl(r);
2148 r->verify(_allow_dirty);
2149 r->object_iterate(¬_dead_yet_cl);
2150 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
2151 "More live objects than counted in last complete marking.");
2152 }
2153 return false;
2154 }
2155 };
2156
2157 class VerifyRootsClosure: public OopsInGenClosure {
2158 private:
2159 G1CollectedHeap* _g1h;
2160 bool _failures;
2161
2162 public:
2163 VerifyRootsClosure() :
2164 _g1h(G1CollectedHeap::heap()), _failures(false) { }
2165
2166 bool failures() { return _failures; }
2178 obj->print_on(gclog_or_tty);
2179 _failures = true;
2180 }
2181 }
2182 }
2183 };
2184
2185 // This is the task used for parallel heap verification.
2186
2187 class G1ParVerifyTask: public AbstractGangTask {
2188 private:
2189 G1CollectedHeap* _g1h;
2190 bool _allow_dirty;
2191
2192 public:
2193 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
2194 AbstractGangTask("Parallel verify task"),
2195 _g1h(g1h), _allow_dirty(allow_dirty) { }
2196
2197 void work(int worker_i) {
2198 VerifyRegionClosure blk(_allow_dirty, true);
2199 _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
2200 HeapRegion::ParVerifyClaimValue);
2201 }
2202 };
2203
2204 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
2205 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2206 if (!silent) { gclog_or_tty->print("roots "); }
2207 VerifyRootsClosure rootsCl;
2208 process_strong_roots(false,
2209 SharedHeap::SO_AllClasses,
2210 &rootsCl,
2211 &rootsCl);
2212 rem_set()->invalidate(perm_gen()->used_region(), false);
2213 if (!silent) { gclog_or_tty->print("heapRegions "); }
2214 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
2215 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2216 "sanity check");
2217
2696 set_marking_started();
2697 doConcurrentMark();
2698 }
2699
2700 #if SCAN_ONLY_VERBOSE
2701 _young_list->print();
2702 #endif // SCAN_ONLY_VERBOSE
2703
2704 double end_time_sec = os::elapsedTime();
2705 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
2706 g1_policy()->record_pause_time_ms(pause_time_ms);
2707 GCOverheadReporter::recordSTWEnd(end_time_sec);
2708 g1_policy()->record_collection_pause_end(popular_region != NULL,
2709 abandoned);
2710
2711 assert(regions_accounted_for(), "Region leakage.");
2712
2713 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
2714 HandleMark hm; // Discard invalid handles created during verification
2715 gclog_or_tty->print(" VerifyAfterGC:");
2716 Universe::verify(false);
2717 }
2718
2719 if (was_enabled) ref_processor()->enable_discovery();
2720
2721 {
2722 size_t expand_bytes = g1_policy()->expansion_amount();
2723 if (expand_bytes > 0) {
2724 size_t bytes_before = capacity();
2725 expand(expand_bytes);
2726 }
2727 }
2728
2729 if (mark_in_progress()) {
2730 concurrent_mark()->update_g1_committed();
2731 }
2732
2733 #ifdef TRACESPINNING
2734 ParallelTaskTerminator::print_termination_counts();
2735 #endif
2827 }
2828
2829 #ifdef G1_DEBUG
2830 class FindGCAllocRegion: public HeapRegionClosure {
2831 public:
2832 bool doHeapRegion(HeapRegion* r) {
2833 if (r->is_gc_alloc_region()) {
2834 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
2835 r->hrs_index(), r->bottom());
2836 }
2837 return false;
2838 }
2839 };
2840 #endif // G1_DEBUG
2841
2842 void G1CollectedHeap::forget_alloc_region_list() {
2843 assert(Thread::current()->is_VM_thread(), "Precondition");
2844 while (_gc_alloc_region_list != NULL) {
2845 HeapRegion* r = _gc_alloc_region_list;
2846 assert(r->is_gc_alloc_region(), "Invariant.");
2847 _gc_alloc_region_list = r->next_gc_alloc_region();
2848 r->set_next_gc_alloc_region(NULL);
2849 r->set_is_gc_alloc_region(false);
2850 if (r->is_survivor()) {
2851 if (r->is_empty()) {
2852 r->set_not_young();
2853 } else {
2854 _young_list->add_survivor_region(r);
2855 }
2856 }
2857 if (r->is_empty()) {
2858 ++_free_regions;
2859 }
2860 }
2861 #ifdef G1_DEBUG
2862 FindGCAllocRegion fa;
2863 heap_region_iterate(&fa);
2864 #endif // G1_DEBUG
2865 }
2866
3721 double _strong_roots_time;
3722 double _start_term;
3723 double _term_time;
3724
3725 // Map from young-age-index (0 == not young, 1 is youngest) to
3726 // surviving words. base is what we get back from the malloc call
3727 size_t* _surviving_young_words_base;
3728 // this points into the array, as we use the first few entries for padding
3729 size_t* _surviving_young_words;
3730
3731 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
3732
3733 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
3734
3735 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
3736
3737 DirtyCardQueue& dirty_card_queue() { return _dcq; }
3738 CardTableModRefBS* ctbs() { return _ct_bs; }
3739
3740 void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
3741 _g1_rem->par_write_ref(from, p, tid);
3742 }
3743
3744 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3745 // If the new value of the field points to the same region or
3746 // is the to-space, we don't need to include it in the Rset updates.
3747 if (!from->is_in_reserved(*p) && !from->is_survivor()) {
3748 size_t card_index = ctbs()->index_for(p);
3749 // If the card hasn't been added to the buffer, do it.
3750 if (ctbs()->mark_card_deferred(card_index)) {
3751 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
3752 }
3753 }
3754 }
3755
3756 public:
3757 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3758 : _g1h(g1h),
3759 _refs(g1h->task_queue(queue_num)),
3760 _dcq(&g1h->dirty_card_queue_set()),
3761 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3762 _g1_rem(g1h->g1_rem_set()),
|
944
945 // Do collection work
946 {
947 HandleMark hm; // Discard invalid handles created during gc
948 G1MarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
949 }
950 // Because freeing humongous regions may have added some unclean
951 // regions, it is necessary to tear down again before rebuilding.
952 tear_down_region_lists();
953 rebuild_region_lists();
954
955 _summary_bytes_used = recalculate_used();
956
957 ref_processor()->enqueue_discovered_references();
958
959 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
960
961 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
962 HandleMark hm; // Discard invalid handles created during verification
963 gclog_or_tty->print(" VerifyAfterGC:");
964 prepare_for_verify();
965 Universe::verify(false);
966 }
967 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
968
969 reset_gc_time_stamp();
970 // Since everything potentially moved, we will clear all remembered
971 // sets, and clear all cards. Later we will rebuild remebered
972 // sets. We will also reset the GC time stamps of the regions.
973 PostMCRemSetClearClosure rs_clear(mr_bs());
974 heap_region_iterate(&rs_clear);
975
976 // Resize the heap if necessary.
977 resize_if_necessary_after_full_collection(full ? 0 : word_size);
978
979 if (_cg1r->use_cache()) {
980 _cg1r->clear_and_record_card_counts();
981 _cg1r->clear_hot_cache();
982 }
983
984 // Rebuild remembered sets of all regions.
2119 HeapWord *end = start + word_sz;
2120 HeapWord *cur;
2121 int *val;
2122 for (cur = start; cur < end; cur++) {
2123 val = (int *) cur;
2124 gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
2125 }
2126 }
2127 }
2128 };
2129
2130 class VerifyRegionClosure: public HeapRegionClosure {
2131 public:
2132 bool _allow_dirty;
2133 bool _par;
2134 VerifyRegionClosure(bool allow_dirty, bool par = false)
2135 : _allow_dirty(allow_dirty), _par(par) {}
2136 bool doHeapRegion(HeapRegion* r) {
2137 guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
2138 "Should be unclaimed at verify points.");
2139 if (!r->continuesHumongous()) {
2140 VerifyObjsInRegionClosure not_dead_yet_cl(r);
2141 r->verify(_allow_dirty);
2142 r->object_iterate(¬_dead_yet_cl);
2143 guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
2144 "More live objects than counted in last complete marking.");
2145 }
2146 return false;
2147 }
2148 };
2149
2150 class VerifyRootsClosure: public OopsInGenClosure {
2151 private:
2152 G1CollectedHeap* _g1h;
2153 bool _failures;
2154
2155 public:
2156 VerifyRootsClosure() :
2157 _g1h(G1CollectedHeap::heap()), _failures(false) { }
2158
2159 bool failures() { return _failures; }
2171 obj->print_on(gclog_or_tty);
2172 _failures = true;
2173 }
2174 }
2175 }
2176 };
2177
2178 // This is the task used for parallel heap verification.
2179
2180 class G1ParVerifyTask: public AbstractGangTask {
2181 private:
2182 G1CollectedHeap* _g1h;
2183 bool _allow_dirty;
2184
2185 public:
2186 G1ParVerifyTask(G1CollectedHeap* g1h, bool allow_dirty) :
2187 AbstractGangTask("Parallel verify task"),
2188 _g1h(g1h), _allow_dirty(allow_dirty) { }
2189
2190 void work(int worker_i) {
2191 HandleMark hm;
2192 VerifyRegionClosure blk(_allow_dirty, true);
2193 _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
2194 HeapRegion::ParVerifyClaimValue);
2195 }
2196 };
2197
2198 void G1CollectedHeap::verify(bool allow_dirty, bool silent) {
2199 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2200 if (!silent) { gclog_or_tty->print("roots "); }
2201 VerifyRootsClosure rootsCl;
2202 process_strong_roots(false,
2203 SharedHeap::SO_AllClasses,
2204 &rootsCl,
2205 &rootsCl);
2206 rem_set()->invalidate(perm_gen()->used_region(), false);
2207 if (!silent) { gclog_or_tty->print("heapRegions "); }
2208 if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
2209 assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
2210 "sanity check");
2211
2690 set_marking_started();
2691 doConcurrentMark();
2692 }
2693
2694 #if SCAN_ONLY_VERBOSE
2695 _young_list->print();
2696 #endif // SCAN_ONLY_VERBOSE
2697
2698 double end_time_sec = os::elapsedTime();
2699 double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
2700 g1_policy()->record_pause_time_ms(pause_time_ms);
2701 GCOverheadReporter::recordSTWEnd(end_time_sec);
2702 g1_policy()->record_collection_pause_end(popular_region != NULL,
2703 abandoned);
2704
2705 assert(regions_accounted_for(), "Region leakage.");
2706
2707 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
2708 HandleMark hm; // Discard invalid handles created during verification
2709 gclog_or_tty->print(" VerifyAfterGC:");
2710 prepare_for_verify();
2711 Universe::verify(false);
2712 }
2713
2714 if (was_enabled) ref_processor()->enable_discovery();
2715
2716 {
2717 size_t expand_bytes = g1_policy()->expansion_amount();
2718 if (expand_bytes > 0) {
2719 size_t bytes_before = capacity();
2720 expand(expand_bytes);
2721 }
2722 }
2723
2724 if (mark_in_progress()) {
2725 concurrent_mark()->update_g1_committed();
2726 }
2727
2728 #ifdef TRACESPINNING
2729 ParallelTaskTerminator::print_termination_counts();
2730 #endif
2822 }
2823
2824 #ifdef G1_DEBUG
2825 class FindGCAllocRegion: public HeapRegionClosure {
2826 public:
2827 bool doHeapRegion(HeapRegion* r) {
2828 if (r->is_gc_alloc_region()) {
2829 gclog_or_tty->print_cr("Region %d ["PTR_FORMAT"...] is still a gc_alloc_region.",
2830 r->hrs_index(), r->bottom());
2831 }
2832 return false;
2833 }
2834 };
2835 #endif // G1_DEBUG
2836
2837 void G1CollectedHeap::forget_alloc_region_list() {
2838 assert(Thread::current()->is_VM_thread(), "Precondition");
2839 while (_gc_alloc_region_list != NULL) {
2840 HeapRegion* r = _gc_alloc_region_list;
2841 assert(r->is_gc_alloc_region(), "Invariant.");
2842 // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
2843 // newly allocated data in order to be able to apply deferred updates
2844 // before the GC is done for verification purposes (i.e to allow
2845 // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
2846 // collection.
2847 r->ContiguousSpace::set_saved_mark();
2848 _gc_alloc_region_list = r->next_gc_alloc_region();
2849 r->set_next_gc_alloc_region(NULL);
2850 r->set_is_gc_alloc_region(false);
2851 if (r->is_survivor()) {
2852 if (r->is_empty()) {
2853 r->set_not_young();
2854 } else {
2855 _young_list->add_survivor_region(r);
2856 }
2857 }
2858 if (r->is_empty()) {
2859 ++_free_regions;
2860 }
2861 }
2862 #ifdef G1_DEBUG
2863 FindGCAllocRegion fa;
2864 heap_region_iterate(&fa);
2865 #endif // G1_DEBUG
2866 }
2867
3722 double _strong_roots_time;
3723 double _start_term;
3724 double _term_time;
3725
3726 // Map from young-age-index (0 == not young, 1 is youngest) to
3727 // surviving words. base is what we get back from the malloc call
3728 size_t* _surviving_young_words_base;
3729 // this points into the array, as we use the first few entries for padding
3730 size_t* _surviving_young_words;
3731
3732 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
3733
3734 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
3735
3736 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
3737
3738 DirtyCardQueue& dirty_card_queue() { return _dcq; }
3739 CardTableModRefBS* ctbs() { return _ct_bs; }
3740
3741 void immediate_rs_update(HeapRegion* from, oop* p, int tid) {
3742 if (!from->is_survivor()) {
3743 _g1_rem->par_write_ref(from, p, tid);
3744 }
3745 }
3746
3747 void deferred_rs_update(HeapRegion* from, oop* p, int tid) {
3748 // If the new value of the field points to the same region or
3749 // is the to-space, we don't need to include it in the Rset updates.
3750 if (!from->is_in_reserved(*p) && !from->is_survivor()) {
3751 size_t card_index = ctbs()->index_for(p);
3752 // If the card hasn't been added to the buffer, do it.
3753 if (ctbs()->mark_card_deferred(card_index)) {
3754 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
3755 }
3756 }
3757 }
3758
3759 public:
3760 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
3761 : _g1h(g1h),
3762 _refs(g1h->task_queue(queue_num)),
3763 _dcq(&g1h->dirty_card_queue_set()),
3764 _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
3765 _g1_rem(g1h->g1_rem_set()),
|