1750 _ref_processor_stw(NULL),
1751 _bot(NULL),
1752 _cg1r(NULL),
1753 _g1mm(NULL),
1754 _refine_cte_cl(NULL),
1755 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1756 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1757 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1758 _humongous_reclaim_candidates(),
1759 _has_humongous_reclaim_candidates(false),
1760 _archive_allocator(NULL),
1761 _free_regions_coming(false),
1762 _young_list(new YoungList(this)),
1763 _gc_time_stamp(0),
1764 _summary_bytes_used(0),
1765 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1766 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1767 _expand_heap_after_alloc_failure(true),
1768 _old_marking_cycles_started(0),
1769 _old_marking_cycles_completed(0),
1770 _heap_summary_sent(false),
1771 _in_cset_fast_test(),
1772 _dirty_cards_region_list(NULL),
1773 _worker_cset_start_region(NULL),
1774 _worker_cset_start_region_time_stamp(NULL),
1775 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1776 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1777 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1778 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1779
1780 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1781 /* are_GC_task_threads */true,
1782 /* are_ConcurrentGC_threads */false);
1783 _workers->initialize_workers();
1784 _verifier = new G1HeapVerifier(this);
1785
1786 _allocator = G1Allocator::create_allocator(this);
1787 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1788
1789 // Override the default _filler_array_max_size so that no humongous filler
1790 // objects are created.
1791 _filler_array_max_size = _humongous_object_threshold_in_words;
1792
1793 uint n_queues = ParallelGCThreads;
1794 _task_queues = new RefToScanQueueSet(n_queues);
1795
1796 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1797 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1798 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
2299 "is inconsistent with _old_marking_cycles_completed = %u",
2300 _old_marking_cycles_started, _old_marking_cycles_completed);
2301
2302 _old_marking_cycles_completed += 1;
2303
2304 // We need to clear the "in_progress" flag in the CM thread before
2305 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2306 // is set) so that if a waiter requests another System.gc() it doesn't
2307 // incorrectly see that a marking cycle is still in progress.
2308 if (concurrent) {
2309 _cmThread->set_idle();
2310 }
2311
2312 // This notify_all() will ensure that a thread that called
2313 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2314 // and it's waiting for a full GC to finish will be woken up. It is
2315 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2316 FullGCCount_lock->notify_all();
2317 }
2318
2319 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2320 GCIdMarkAndRestore conc_gc_id_mark;
2321 collector_state()->set_concurrent_cycle_started(true);
2322 _gc_timer_cm->register_gc_start(start_time);
2323
2324 _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2325 trace_heap_before_gc(_gc_tracer_cm);
2326 _cmThread->set_gc_id(GCId::current());
2327 }
2328
2329 void G1CollectedHeap::register_concurrent_cycle_end() {
2330 if (collector_state()->concurrent_cycle_started()) {
2331 GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
2332 if (_cm->has_aborted()) {
2333 _gc_tracer_cm->report_concurrent_mode_failure();
2334
2335 // ConcurrentGCTimer will be ended as well.
2336 _cm->register_concurrent_gc_end_and_stop_timer();
2337 } else {
2338 _gc_timer_cm->register_gc_end();
2339 }
2340
2341 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2342
2343 // Clear state variables to prepare for the next concurrent cycle.
2344 collector_state()->set_concurrent_cycle_started(false);
2345 _heap_summary_sent = false;
2346 }
2347 }
2348
2349 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2350 if (collector_state()->concurrent_cycle_started()) {
2351 // This function can be called when:
2352 // the cleanup pause is run
2353 // the concurrent cycle is aborted before the cleanup pause.
2354 // the concurrent cycle is aborted after the cleanup pause,
2355 // but before the concurrent cycle end has been registered.
2356 // Make sure that we only send the heap information once.
2357 if (!_heap_summary_sent) {
2358 GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
2359 trace_heap_after_gc(_gc_tracer_cm);
2360 _heap_summary_sent = true;
2361 }
2362 }
2363 }
2364
2365 void G1CollectedHeap::collect(GCCause::Cause cause) {
2366 assert_heap_not_locked();
2367
2368 uint gc_count_before;
2369 uint old_marking_count_before;
2370 uint full_gc_count_before;
2371 bool retry_gc;
2372
2373 do {
2374 retry_gc = false;
2375
2376 {
2377 MutexLocker ml(Heap_lock);
2378
2379 // Read the GC count while holding the Heap_lock
2380 gc_count_before = total_collections();
2381 full_gc_count_before = total_full_collections();
2382 old_marking_count_before = _old_marking_cycles_started;
2383 }
2384
2824 tty->cr();
2825 }
2826 };
2827
2828 void G1CollectedHeap::print_cset_rsets() {
2829 PrintRSetsClosure cl("Printing CSet RSets");
2830 collection_set_iterate(&cl);
2831 }
2832
2833 void G1CollectedHeap::print_all_rsets() {
2834 PrintRSetsClosure cl("Printing All RSets");;
2835 heap_region_iterate(&cl);
2836 }
2837 #endif // PRODUCT
2838
2839 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2840 YoungList* young_list = heap()->young_list();
2841
2842 size_t eden_used_bytes = young_list->eden_used_bytes();
2843 size_t survivor_used_bytes = young_list->survivor_used_bytes();
2844
2845 size_t eden_capacity_bytes =
2846 (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2847
2848 VirtualSpaceSummary heap_summary = create_heap_space_summary();
2849 return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions());
2850 }
2851
2852 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2853 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2854 stats->unused(), stats->used(), stats->region_end_waste(),
2855 stats->regions_filled(), stats->direct_allocated(),
2856 stats->failure_used(), stats->failure_waste());
2857 }
2858
2859 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2860 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2861 gc_tracer->report_gc_heap_summary(when, heap_summary);
2862
2863 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2864 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2865 }
2866
2867
2868 G1CollectedHeap* G1CollectedHeap::heap() {
2869 CollectedHeap* heap = Universe::heap();
2870 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2871 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
2872 return (G1CollectedHeap*)heap;
2873 }
2874
2875 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2876 // always_do_update_barrier = false;
2877 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2878 // Fill TLAB's and such
2879 accumulate_statistics_all_tlabs();
2880 ensure_parsability(true);
2881
2882 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2883 }
2884
2885 void G1CollectedHeap::gc_epilogue(bool full) {
2886 // we are at the end of the GC. Total collections has already been increased.
2887 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
3216 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3217 assert(!collector_state()->during_initial_mark_pause() ||
3218 collector_state()->gcs_are_young(), "sanity");
3219
3220 // We also do not allow mixed GCs during marking.
3221 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3222
3223 // Record whether this pause is an initial mark. When the current
3224 // thread has completed its logging output and it's safe to signal
3225 // the CM thread, the flag's value in the policy has been reset.
3226 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3227
3228 // Inner scope for scope based logging, timers, and stats collection
3229 {
3230 EvacuationInfo evacuation_info;
3231
3232 if (collector_state()->during_initial_mark_pause()) {
3233 // We are about to start a marking cycle, so we increment the
3234 // full collection counter.
3235 increment_old_marking_cycles_started();
3236 register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3237 }
3238
3239 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3240
3241 GCTraceCPUTime tcpu;
3242
3243 FormatBuffer<> gc_string("Pause ");
3244 if (collector_state()->during_initial_mark_pause()) {
3245 gc_string.append("Initial Mark");
3246 } else if (collector_state()->gcs_are_young()) {
3247 gc_string.append("Young");
3248 } else {
3249 gc_string.append("Mixed");
3250 }
3251 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3252
3253 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3254 workers()->active_workers(),
3255 Threads::number_of_non_daemon_threads());
3256 workers()->set_active_workers(active_workers);
|
1750 _ref_processor_stw(NULL),
1751 _bot(NULL),
1752 _cg1r(NULL),
1753 _g1mm(NULL),
1754 _refine_cte_cl(NULL),
1755 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1756 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1757 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1758 _humongous_reclaim_candidates(),
1759 _has_humongous_reclaim_candidates(false),
1760 _archive_allocator(NULL),
1761 _free_regions_coming(false),
1762 _young_list(new YoungList(this)),
1763 _gc_time_stamp(0),
1764 _summary_bytes_used(0),
1765 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1766 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1767 _expand_heap_after_alloc_failure(true),
1768 _old_marking_cycles_started(0),
1769 _old_marking_cycles_completed(0),
1770 _in_cset_fast_test(),
1771 _dirty_cards_region_list(NULL),
1772 _worker_cset_start_region(NULL),
1773 _worker_cset_start_region_time_stamp(NULL),
1774 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1775 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1776
1777 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1778 /* are_GC_task_threads */true,
1779 /* are_ConcurrentGC_threads */false);
1780 _workers->initialize_workers();
1781 _verifier = new G1HeapVerifier(this);
1782
1783 _allocator = G1Allocator::create_allocator(this);
1784 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1785
1786 // Override the default _filler_array_max_size so that no humongous filler
1787 // objects are created.
1788 _filler_array_max_size = _humongous_object_threshold_in_words;
1789
1790 uint n_queues = ParallelGCThreads;
1791 _task_queues = new RefToScanQueueSet(n_queues);
1792
1793 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1794 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1795 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
2296 "is inconsistent with _old_marking_cycles_completed = %u",
2297 _old_marking_cycles_started, _old_marking_cycles_completed);
2298
2299 _old_marking_cycles_completed += 1;
2300
2301 // We need to clear the "in_progress" flag in the CM thread before
2302 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2303 // is set) so that if a waiter requests another System.gc() it doesn't
2304 // incorrectly see that a marking cycle is still in progress.
2305 if (concurrent) {
2306 _cmThread->set_idle();
2307 }
2308
2309 // This notify_all() will ensure that a thread that called
2310 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2311 // and it's waiting for a full GC to finish will be woken up. It is
2312 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2313 FullGCCount_lock->notify_all();
2314 }
2315
2316 void G1CollectedHeap::collect(GCCause::Cause cause) {
2317 assert_heap_not_locked();
2318
2319 uint gc_count_before;
2320 uint old_marking_count_before;
2321 uint full_gc_count_before;
2322 bool retry_gc;
2323
2324 do {
2325 retry_gc = false;
2326
2327 {
2328 MutexLocker ml(Heap_lock);
2329
2330 // Read the GC count while holding the Heap_lock
2331 gc_count_before = total_collections();
2332 full_gc_count_before = total_full_collections();
2333 old_marking_count_before = _old_marking_cycles_started;
2334 }
2335
2775 tty->cr();
2776 }
2777 };
2778
2779 void G1CollectedHeap::print_cset_rsets() {
2780 PrintRSetsClosure cl("Printing CSet RSets");
2781 collection_set_iterate(&cl);
2782 }
2783
2784 void G1CollectedHeap::print_all_rsets() {
2785 PrintRSetsClosure cl("Printing All RSets");;
2786 heap_region_iterate(&cl);
2787 }
2788 #endif // PRODUCT
2789
2790 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2791 YoungList* young_list = heap()->young_list();
2792
2793 size_t eden_used_bytes = young_list->eden_used_bytes();
2794 size_t survivor_used_bytes = young_list->survivor_used_bytes();
2795 size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2796
2797 size_t eden_capacity_bytes =
2798 (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2799
2800 VirtualSpaceSummary heap_summary = create_heap_space_summary();
2801 return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2802 eden_capacity_bytes, survivor_used_bytes, num_regions());
2803 }
2804
2805 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2806 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2807 stats->unused(), stats->used(), stats->region_end_waste(),
2808 stats->regions_filled(), stats->direct_allocated(),
2809 stats->failure_used(), stats->failure_waste());
2810 }
2811
2812 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2813 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2814 gc_tracer->report_gc_heap_summary(when, heap_summary);
2815
2816 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2817 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2818 }
2819
2820 G1CollectedHeap* G1CollectedHeap::heap() {
2821 CollectedHeap* heap = Universe::heap();
2822 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2823 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
2824 return (G1CollectedHeap*)heap;
2825 }
2826
2827 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2828 // always_do_update_barrier = false;
2829 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2830 // Fill TLAB's and such
2831 accumulate_statistics_all_tlabs();
2832 ensure_parsability(true);
2833
2834 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2835 }
2836
2837 void G1CollectedHeap::gc_epilogue(bool full) {
2838 // we are at the end of the GC. Total collections has already been increased.
2839 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
3168 // We do not allow initial-mark to be piggy-backed on a mixed GC.
3169 assert(!collector_state()->during_initial_mark_pause() ||
3170 collector_state()->gcs_are_young(), "sanity");
3171
3172 // We also do not allow mixed GCs during marking.
3173 assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3174
3175 // Record whether this pause is an initial mark. When the current
3176 // thread has completed its logging output and it's safe to signal
3177 // the CM thread, the flag's value in the policy has been reset.
3178 bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3179
3180 // Inner scope for scope based logging, timers, and stats collection
3181 {
3182 EvacuationInfo evacuation_info;
3183
3184 if (collector_state()->during_initial_mark_pause()) {
3185 // We are about to start a marking cycle, so we increment the
3186 // full collection counter.
3187 increment_old_marking_cycles_started();
3188 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
3189 }
3190
3191 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3192
3193 GCTraceCPUTime tcpu;
3194
3195 FormatBuffer<> gc_string("Pause ");
3196 if (collector_state()->during_initial_mark_pause()) {
3197 gc_string.append("Initial Mark");
3198 } else if (collector_state()->gcs_are_young()) {
3199 gc_string.append("Young");
3200 } else {
3201 gc_string.append("Mixed");
3202 }
3203 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3204
3205 uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3206 workers()->active_workers(),
3207 Threads::number_of_non_daemon_threads());
3208 workers()->set_active_workers(active_workers);
|