1789 SuspendibleThreadSet::desynchronize();
1790 }
1791
1792 void G1CollectedHeap::post_initialize() {
1793 CollectedHeap::post_initialize();
1794 ref_processing_init();
1795 }
1796
1797 void G1CollectedHeap::ref_processing_init() {
1798 // Reference processing in G1 currently works as follows:
1799 //
1800 // * There are two reference processor instances. One is
1801 // used to record and process discovered references
1802 // during concurrent marking; the other is used to
1803 // record and process references during STW pauses
1804 // (both full and incremental).
1805 // * Both ref processors need to 'span' the entire heap as
1806 // the regions in the collection set may be dotted around.
1807 //
1808 // * For the concurrent marking ref processor:
1809 // * Reference discovery is enabled at initial marking.
1810 // * Reference discovery is disabled and the discovered
1811 // references processed etc during remarking.
1812 // * Reference discovery is MT (see below).
1813 // * Reference discovery requires a barrier (see below).
1814 // * Reference processing may or may not be MT
1815 // (depending on the value of ParallelRefProcEnabled
1816 // and ParallelGCThreads).
1817 // * A full GC disables reference discovery by the CM
1818 // ref processor and abandons any entries on it's
1819 // discovered lists.
1820 //
1821 // * For the STW processor:
1822 // * Non MT discovery is enabled at the start of a full GC.
1823 // * Processing and enqueueing during a full GC is non-MT.
1824 // * During a full GC, references are processed after marking.
1825 //
1826 // * Discovery (may or may not be MT) is enabled at the start
1827 // of an incremental evacuation pause.
1828 // * References are processed near the end of a STW evacuation pause.
1829 // * For both types of GC:
2597 stats->regions_filled(), stats->direct_allocated(),
2598 stats->failure_used(), stats->failure_waste());
2599 }
2600
2601 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2602 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2603 gc_tracer->report_gc_heap_summary(when, heap_summary);
2604
2605 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2606 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2607 }
2608
2609 void G1CollectedHeap::gc_prologue(bool full) {
2610 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2611
2612 // This summary needs to be printed before incrementing total collections.
2613 rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2614
2615 // Update common counters.
2616 increment_total_collections(full /* full gc */);
2617 if (full || collector_state()->in_initial_mark_gc()) {
2618 increment_old_marking_cycles_started();
2619 }
2620
2621 // Fill TLAB's and such
2622 {
2623 Ticks start = Ticks::now();
2624 ensure_parsability(true);
2625 Tickspan dt = Ticks::now() - start;
2626 phase_times()->record_prepare_tlab_time_ms(dt.seconds() * MILLIUNITS);
2627 }
2628
2629 if (!full) {
2630 // Flush dirty card queues to qset, so later phases don't need to account
2631 // for partially filled per-thread queues and such. Not needed for full
2632 // collections, which ignore those logs.
2633 Ticks start = Ticks::now();
2634 G1BarrierSet::dirty_card_queue_set().concatenate_logs();
2635 Tickspan dt = Ticks::now() - start;
2636 phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
2637 }
2828
2829 phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
2830 }
2831
2832 void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
2833
2834 _collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
2835 evacuation_info.set_collectionset_regions(collection_set()->region_length() +
2836 collection_set()->optional_region_length());
2837
2838 _cm->verify_no_collection_set_oops();
2839
2840 if (_hr_printer.is_active()) {
2841 G1PrintCollectionSetClosure cl(&_hr_printer);
2842 _collection_set.iterate(&cl);
2843 _collection_set.iterate_optional(&cl);
2844 }
2845 }
2846
2847 G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const {
2848 if (collector_state()->in_initial_mark_gc()) {
2849 return G1HeapVerifier::G1VerifyConcurrentStart;
2850 } else if (collector_state()->in_young_only_phase()) {
2851 return G1HeapVerifier::G1VerifyYoungNormal;
2852 } else {
2853 return G1HeapVerifier::G1VerifyMixed;
2854 }
2855 }
2856
2857 void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyType type) {
2858 if (VerifyRememberedSets) {
2859 log_info(gc, verify)("[Verifying RemSets before GC]");
2860 VerifyRegionRemSetClosure v_cl;
2861 heap_region_iterate(&v_cl);
2862 }
2863 _verifier->verify_before_gc(type);
2864 _verifier->check_bitmaps("GC Start");
2865 verify_numa_regions("GC Start");
2866 }
2867
2868 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2873 }
2874 _verifier->verify_after_gc(type);
2875 _verifier->check_bitmaps("GC End");
2876 verify_numa_regions("GC End");
2877 }
2878
2879 void G1CollectedHeap::expand_heap_after_young_collection(){
2880 size_t expand_bytes = _heap_sizing_policy->young_collection_expansion_amount();
2881 if (expand_bytes > 0) {
2882 // No need for an ergo logging here,
2883 // expansion_amount() does this when it returns a value > 0.
2884 double expand_ms;
2885 if (!expand(expand_bytes, _workers, &expand_ms)) {
2886 // We failed to expand the heap. Cannot do anything about it.
2887 }
2888 phase_times()->record_expand_heap_time(expand_ms);
2889 }
2890 }
2891
2892 const char* G1CollectedHeap::young_gc_name() const {
2893 if (collector_state()->in_initial_mark_gc()) {
2894 return "Pause Young (Concurrent Start)";
2895 } else if (collector_state()->in_young_only_phase()) {
2896 if (collector_state()->in_young_gc_before_mixed()) {
2897 return "Pause Young (Prepare Mixed)";
2898 } else {
2899 return "Pause Young (Normal)";
2900 }
2901 } else {
2902 return "Pause Young (Mixed)";
2903 }
2904 }
2905
2906 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2907 assert_at_safepoint_on_vm_thread();
2908 guarantee(!is_gc_active(), "collection is not reentrant");
2909
2910 if (GCLocker::check_active_before_gc()) {
2911 return false;
2912 }
2913
2926 void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
2927 GCIdMark gc_id_mark;
2928
2929 SvcGCMarker sgcm(SvcGCMarker::MINOR);
2930 ResourceMark rm;
2931
2932 policy()->note_gc_start();
2933
2934 _gc_timer_stw->register_gc_start();
2935 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2936
2937 wait_for_root_region_scanning();
2938
2939 print_heap_before_gc();
2940 print_heap_regions();
2941 trace_heap_before_gc(_gc_tracer_stw);
2942
2943 _verifier->verify_region_sets_optional();
2944 _verifier->verify_dirty_young_regions();
2945
2946 // We should not be doing initial mark unless the conc mark thread is running
2947 if (!_cm_thread->should_terminate()) {
2948 // This call will decide whether this pause is an initial-mark
2949 // pause. If it is, in_initial_mark_gc() will return true
2950 // for the duration of this pause.
2951 policy()->decide_on_conc_mark_initiation();
2952 }
2953
2954 // We do not allow initial-mark to be piggy-backed on a mixed GC.
2955 assert(!collector_state()->in_initial_mark_gc() ||
2956 collector_state()->in_young_only_phase(), "sanity");
2957 // We also do not allow mixed GCs during marking.
2958 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2959
2960 // Record whether this pause is an initial mark. When the current
2961 // thread has completed its logging output and it's safe to signal
2962 // the CM thread, the flag's value in the policy has been reset.
2963 bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
2964 if (should_start_conc_mark) {
2965 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2966 }
2967
2968 // Inner scope for scope based logging, timers, and stats collection
2969 {
2970 G1EvacuationInfo evacuation_info;
2971
2972 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2973
2974 GCTraceCPUTime tcpu;
2975
2976 GCTraceTime(Info, gc) tm(young_gc_name(), NULL, gc_cause(), true);
2977
2978 uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
2979 workers()->active_workers(),
2980 Threads::number_of_non_daemon_threads());
2981 active_workers = workers()->update_active_workers(active_workers);
2982 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2983
3027 collection_set()->optional_region_length());
3028 pre_evacuate_collection_set(evacuation_info, &per_thread_states);
3029
3030 // Actually do the work...
3031 evacuate_initial_collection_set(&per_thread_states);
3032
3033 if (_collection_set.optional_region_length() != 0) {
3034 evacuate_optional_collection_set(&per_thread_states);
3035 }
3036 post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
3037
3038 start_new_collection_set();
3039
3040 _survivor_evac_stats.adjust_desired_plab_sz();
3041 _old_evac_stats.adjust_desired_plab_sz();
3042
3043 if (should_start_conc_mark) {
3044 // We have to do this before we notify the CM threads that
3045 // they can start working to make sure that all the
3046 // appropriate initialization is done on the CM object.
3047 concurrent_mark()->post_initial_mark();
3048 // Note that we don't actually trigger the CM thread at
3049 // this point. We do that later when we're sure that
3050 // the current thread has completed its logging output.
3051 }
3052
3053 allocate_dummy_regions();
3054
3055 _allocator->init_mutator_alloc_regions();
3056
3057 expand_heap_after_young_collection();
3058
3059 double sample_end_time_sec = os::elapsedTime();
3060 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3061 policy()->record_collection_pause_end(pause_time_ms);
3062 }
3063
3064 verify_after_young_collection(verify_type);
3065
3066 gc_epilogue(false);
3067 }
3516 &drain_queue,
3517 &par_task_executor,
3518 pt);
3519 }
3520
3521 _gc_tracer_stw->report_gc_reference_stats(stats);
3522
3523 // We have completed copying any necessary live referent objects.
3524 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3525
3526 make_pending_list_reachable();
3527
3528 assert(!rp->discovery_enabled(), "Postcondition");
3529 rp->verify_no_references_recorded();
3530
3531 double ref_proc_time = os::elapsedTime() - ref_proc_start;
3532 phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3533 }
3534
3535 void G1CollectedHeap::make_pending_list_reachable() {
3536 if (collector_state()->in_initial_mark_gc()) {
3537 oop pll_head = Universe::reference_pending_list();
3538 if (pll_head != NULL) {
3539 // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3540 _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3541 }
3542 }
3543 }
3544
3545 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3546 Ticks start = Ticks::now();
3547 per_thread_states->flush();
3548 phase_times()->record_or_add_time_secs(G1GCPhaseTimes::MergePSS, 0 /* worker_id */, (Ticks::now() - start).seconds());
3549 }
3550
3551 class G1PrepareEvacuationTask : public AbstractGangTask {
3552 class G1PrepareRegionsClosure : public HeapRegionClosure {
3553 G1CollectedHeap* _g1h;
3554 G1PrepareEvacuationTask* _parent_task;
3555 size_t _worker_humongous_total;
3556 size_t _worker_humongous_candidates;
3705 rem_set()->prepare_for_scan_heap_roots();
3706 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3707 }
3708
3709 {
3710 G1PrepareEvacuationTask g1_prep_task(this);
3711 Tickspan task_time = run_task(&g1_prep_task);
3712
3713 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3714 g1_prep_task.humongous_total(),
3715 g1_prep_task.humongous_candidates());
3716 }
3717
3718 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3719 _preserved_marks_set.assert_empty();
3720
3721 #if COMPILER2_OR_JVMCI
3722 DerivedPointerTable::clear();
3723 #endif
3724
3725 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3726 if (collector_state()->in_initial_mark_gc()) {
3727 concurrent_mark()->pre_initial_mark();
3728
3729 double start_clear_claimed_marks = os::elapsedTime();
3730
3731 ClassLoaderDataGraph::clear_claimed_marks();
3732
3733 double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3734 phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3735 }
3736
3737 // Should G1EvacuationFailureALot be in effect for this GC?
3738 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3739 }
3740
3741 class G1EvacuateRegionsBaseTask : public AbstractGangTask {
3742 protected:
3743 G1CollectedHeap* _g1h;
3744 G1ParScanThreadStateSet* _per_thread_states;
3745 G1ScannerTasksQueueSet* _task_queues;
3746 TaskTerminator _terminator;
3747 uint _num_workers;
4775 }
4776 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4777 register_region_with_region_attr(new_alloc_region);
4778 _hr_printer.alloc(new_alloc_region);
4779 return new_alloc_region;
4780 }
4781 return NULL;
4782 }
4783
4784 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4785 size_t allocated_bytes,
4786 G1HeapRegionAttr dest) {
4787 _bytes_used_during_gc += allocated_bytes;
4788 if (dest.is_old()) {
4789 old_set_add(alloc_region);
4790 } else {
4791 assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
4792 _survivor.add_used_bytes(allocated_bytes);
4793 }
4794
4795 bool const during_im = collector_state()->in_initial_mark_gc();
4796 if (during_im && allocated_bytes > 0) {
4797 _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
4798 }
4799 _hr_printer.retire(alloc_region);
4800 }
4801
4802 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4803 bool expanded = false;
4804 uint index = _hrm->find_highest_free(&expanded);
4805
4806 if (index != G1_NO_HRM_INDEX) {
4807 if (expanded) {
4808 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4809 HeapRegion::GrainWords * HeapWordSize);
4810 }
4811 return _hrm->allocate_free_regions_starting_at(index, 1);
4812 }
4813 return NULL;
4814 }
4815
|
1789 SuspendibleThreadSet::desynchronize();
1790 }
1791
1792 void G1CollectedHeap::post_initialize() {
1793 CollectedHeap::post_initialize();
1794 ref_processing_init();
1795 }
1796
1797 void G1CollectedHeap::ref_processing_init() {
1798 // Reference processing in G1 currently works as follows:
1799 //
1800 // * There are two reference processor instances. One is
1801 // used to record and process discovered references
1802 // during concurrent marking; the other is used to
1803 // record and process references during STW pauses
1804 // (both full and incremental).
1805 // * Both ref processors need to 'span' the entire heap as
1806 // the regions in the collection set may be dotted around.
1807 //
1808 // * For the concurrent marking ref processor:
1809 // * Reference discovery is enabled at concurrent start.
1810 // * Reference discovery is disabled and the discovered
1811 // references processed etc during remarking.
1812 // * Reference discovery is MT (see below).
1813 // * Reference discovery requires a barrier (see below).
1814 // * Reference processing may or may not be MT
1815 // (depending on the value of ParallelRefProcEnabled
1816 // and ParallelGCThreads).
1817 // * A full GC disables reference discovery by the CM
1818 // ref processor and abandons any entries on it's
1819 // discovered lists.
1820 //
1821 // * For the STW processor:
1822 // * Non MT discovery is enabled at the start of a full GC.
1823 // * Processing and enqueueing during a full GC is non-MT.
1824 // * During a full GC, references are processed after marking.
1825 //
1826 // * Discovery (may or may not be MT) is enabled at the start
1827 // of an incremental evacuation pause.
1828 // * References are processed near the end of a STW evacuation pause.
1829 // * For both types of GC:
2597 stats->regions_filled(), stats->direct_allocated(),
2598 stats->failure_used(), stats->failure_waste());
2599 }
2600
2601 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2602 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2603 gc_tracer->report_gc_heap_summary(when, heap_summary);
2604
2605 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2606 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2607 }
2608
2609 void G1CollectedHeap::gc_prologue(bool full) {
2610 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2611
2612 // This summary needs to be printed before incrementing total collections.
2613 rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2614
2615 // Update common counters.
2616 increment_total_collections(full /* full gc */);
2617 if (full || collector_state()->in_concurrent_start_gc()) {
2618 increment_old_marking_cycles_started();
2619 }
2620
2621 // Fill TLAB's and such
2622 {
2623 Ticks start = Ticks::now();
2624 ensure_parsability(true);
2625 Tickspan dt = Ticks::now() - start;
2626 phase_times()->record_prepare_tlab_time_ms(dt.seconds() * MILLIUNITS);
2627 }
2628
2629 if (!full) {
2630 // Flush dirty card queues to qset, so later phases don't need to account
2631 // for partially filled per-thread queues and such. Not needed for full
2632 // collections, which ignore those logs.
2633 Ticks start = Ticks::now();
2634 G1BarrierSet::dirty_card_queue_set().concatenate_logs();
2635 Tickspan dt = Ticks::now() - start;
2636 phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
2637 }
2828
2829 phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
2830 }
2831
2832 void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
2833
2834 _collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
2835 evacuation_info.set_collectionset_regions(collection_set()->region_length() +
2836 collection_set()->optional_region_length());
2837
2838 _cm->verify_no_collection_set_oops();
2839
2840 if (_hr_printer.is_active()) {
2841 G1PrintCollectionSetClosure cl(&_hr_printer);
2842 _collection_set.iterate(&cl);
2843 _collection_set.iterate_optional(&cl);
2844 }
2845 }
2846
2847 G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const {
2848 if (collector_state()->in_concurrent_start_gc()) {
2849 return G1HeapVerifier::G1VerifyConcurrentStart;
2850 } else if (collector_state()->in_young_only_phase()) {
2851 return G1HeapVerifier::G1VerifyYoungNormal;
2852 } else {
2853 return G1HeapVerifier::G1VerifyMixed;
2854 }
2855 }
2856
2857 void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyType type) {
2858 if (VerifyRememberedSets) {
2859 log_info(gc, verify)("[Verifying RemSets before GC]");
2860 VerifyRegionRemSetClosure v_cl;
2861 heap_region_iterate(&v_cl);
2862 }
2863 _verifier->verify_before_gc(type);
2864 _verifier->check_bitmaps("GC Start");
2865 verify_numa_regions("GC Start");
2866 }
2867
2868 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2873 }
2874 _verifier->verify_after_gc(type);
2875 _verifier->check_bitmaps("GC End");
2876 verify_numa_regions("GC End");
2877 }
2878
2879 void G1CollectedHeap::expand_heap_after_young_collection(){
2880 size_t expand_bytes = _heap_sizing_policy->young_collection_expansion_amount();
2881 if (expand_bytes > 0) {
2882 // No need for an ergo logging here,
2883 // expansion_amount() does this when it returns a value > 0.
2884 double expand_ms;
2885 if (!expand(expand_bytes, _workers, &expand_ms)) {
2886 // We failed to expand the heap. Cannot do anything about it.
2887 }
2888 phase_times()->record_expand_heap_time(expand_ms);
2889 }
2890 }
2891
2892 const char* G1CollectedHeap::young_gc_name() const {
2893 if (collector_state()->in_concurrent_start_gc()) {
2894 return "Pause Young (Concurrent Start)";
2895 } else if (collector_state()->in_young_only_phase()) {
2896 if (collector_state()->in_young_gc_before_mixed()) {
2897 return "Pause Young (Prepare Mixed)";
2898 } else {
2899 return "Pause Young (Normal)";
2900 }
2901 } else {
2902 return "Pause Young (Mixed)";
2903 }
2904 }
2905
2906 bool G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2907 assert_at_safepoint_on_vm_thread();
2908 guarantee(!is_gc_active(), "collection is not reentrant");
2909
2910 if (GCLocker::check_active_before_gc()) {
2911 return false;
2912 }
2913
2926 void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
2927 GCIdMark gc_id_mark;
2928
2929 SvcGCMarker sgcm(SvcGCMarker::MINOR);
2930 ResourceMark rm;
2931
2932 policy()->note_gc_start();
2933
2934 _gc_timer_stw->register_gc_start();
2935 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
2936
2937 wait_for_root_region_scanning();
2938
2939 print_heap_before_gc();
2940 print_heap_regions();
2941 trace_heap_before_gc(_gc_tracer_stw);
2942
2943 _verifier->verify_region_sets_optional();
2944 _verifier->verify_dirty_young_regions();
2945
2946 // We should not be doing concurrent start unless the concurrent mark thread is running
2947 if (!_cm_thread->should_terminate()) {
2948 // This call will decide whether this pause is a concurrent start
2949 // pause. If it is, in_concurrent_start_gc() will return true
2950 // for the duration of this pause.
2951 policy()->decide_on_conc_mark_initiation();
2952 }
2953
2954 // We do not allow initial-mark to be piggy-backed on a mixed GC.
2955 assert(!collector_state()->in_concurrent_start_gc() ||
2956 collector_state()->in_young_only_phase(), "sanity");
2957 // We also do not allow mixed GCs during marking.
2958 assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
2959
2960 // Record whether this pause is a concurrent start. When the current
2961 // thread has completed its logging output and it's safe to signal
2962 // the CM thread, the flag's value in the policy has been reset.
2963 bool should_start_conc_mark = collector_state()->in_concurrent_start_gc();
2964 if (should_start_conc_mark) {
2965 _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
2966 }
2967
2968 // Inner scope for scope based logging, timers, and stats collection
2969 {
2970 G1EvacuationInfo evacuation_info;
2971
2972 _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
2973
2974 GCTraceCPUTime tcpu;
2975
2976 GCTraceTime(Info, gc) tm(young_gc_name(), NULL, gc_cause(), true);
2977
2978 uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
2979 workers()->active_workers(),
2980 Threads::number_of_non_daemon_threads());
2981 active_workers = workers()->update_active_workers(active_workers);
2982 log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2983
3027 collection_set()->optional_region_length());
3028 pre_evacuate_collection_set(evacuation_info, &per_thread_states);
3029
3030 // Actually do the work...
3031 evacuate_initial_collection_set(&per_thread_states);
3032
3033 if (_collection_set.optional_region_length() != 0) {
3034 evacuate_optional_collection_set(&per_thread_states);
3035 }
3036 post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
3037
3038 start_new_collection_set();
3039
3040 _survivor_evac_stats.adjust_desired_plab_sz();
3041 _old_evac_stats.adjust_desired_plab_sz();
3042
3043 if (should_start_conc_mark) {
3044 // We have to do this before we notify the CM threads that
3045 // they can start working to make sure that all the
3046 // appropriate initialization is done on the CM object.
3047 concurrent_mark()->post_concurrent_start();
3048 // Note that we don't actually trigger the CM thread at
3049 // this point. We do that later when we're sure that
3050 // the current thread has completed its logging output.
3051 }
3052
3053 allocate_dummy_regions();
3054
3055 _allocator->init_mutator_alloc_regions();
3056
3057 expand_heap_after_young_collection();
3058
3059 double sample_end_time_sec = os::elapsedTime();
3060 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3061 policy()->record_collection_pause_end(pause_time_ms);
3062 }
3063
3064 verify_after_young_collection(verify_type);
3065
3066 gc_epilogue(false);
3067 }
3516 &drain_queue,
3517 &par_task_executor,
3518 pt);
3519 }
3520
3521 _gc_tracer_stw->report_gc_reference_stats(stats);
3522
3523 // We have completed copying any necessary live referent objects.
3524 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3525
3526 make_pending_list_reachable();
3527
3528 assert(!rp->discovery_enabled(), "Postcondition");
3529 rp->verify_no_references_recorded();
3530
3531 double ref_proc_time = os::elapsedTime() - ref_proc_start;
3532 phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3533 }
3534
3535 void G1CollectedHeap::make_pending_list_reachable() {
3536 if (collector_state()->in_concurrent_start_gc()) {
3537 oop pll_head = Universe::reference_pending_list();
3538 if (pll_head != NULL) {
3539 // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3540 _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3541 }
3542 }
3543 }
3544
3545 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3546 Ticks start = Ticks::now();
3547 per_thread_states->flush();
3548 phase_times()->record_or_add_time_secs(G1GCPhaseTimes::MergePSS, 0 /* worker_id */, (Ticks::now() - start).seconds());
3549 }
3550
3551 class G1PrepareEvacuationTask : public AbstractGangTask {
3552 class G1PrepareRegionsClosure : public HeapRegionClosure {
3553 G1CollectedHeap* _g1h;
3554 G1PrepareEvacuationTask* _parent_task;
3555 size_t _worker_humongous_total;
3556 size_t _worker_humongous_candidates;
3705 rem_set()->prepare_for_scan_heap_roots();
3706 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3707 }
3708
3709 {
3710 G1PrepareEvacuationTask g1_prep_task(this);
3711 Tickspan task_time = run_task(&g1_prep_task);
3712
3713 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3714 g1_prep_task.humongous_total(),
3715 g1_prep_task.humongous_candidates());
3716 }
3717
3718 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3719 _preserved_marks_set.assert_empty();
3720
3721 #if COMPILER2_OR_JVMCI
3722 DerivedPointerTable::clear();
3723 #endif
3724
3725 // Concurrent start needs claim bits to keep track of the marked-through CLDs.
3726 if (collector_state()->in_concurrent_start_gc()) {
3727 concurrent_mark()->pre_concurrent_start();
3728
3729 double start_clear_claimed_marks = os::elapsedTime();
3730
3731 ClassLoaderDataGraph::clear_claimed_marks();
3732
3733 double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
3734 phase_times()->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
3735 }
3736
3737 // Should G1EvacuationFailureALot be in effect for this GC?
3738 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
3739 }
3740
3741 class G1EvacuateRegionsBaseTask : public AbstractGangTask {
3742 protected:
3743 G1CollectedHeap* _g1h;
3744 G1ParScanThreadStateSet* _per_thread_states;
3745 G1ScannerTasksQueueSet* _task_queues;
3746 TaskTerminator _terminator;
3747 uint _num_workers;
4775 }
4776 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4777 register_region_with_region_attr(new_alloc_region);
4778 _hr_printer.alloc(new_alloc_region);
4779 return new_alloc_region;
4780 }
4781 return NULL;
4782 }
4783
4784 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4785 size_t allocated_bytes,
4786 G1HeapRegionAttr dest) {
4787 _bytes_used_during_gc += allocated_bytes;
4788 if (dest.is_old()) {
4789 old_set_add(alloc_region);
4790 } else {
4791 assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
4792 _survivor.add_used_bytes(allocated_bytes);
4793 }
4794
4795 bool const during_im = collector_state()->in_concurrent_start_gc();
4796 if (during_im && allocated_bytes > 0) {
4797 _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
4798 }
4799 _hr_printer.retire(alloc_region);
4800 }
4801
4802 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4803 bool expanded = false;
4804 uint index = _hrm->find_highest_free(&expanded);
4805
4806 if (index != G1_NO_HRM_INDEX) {
4807 if (expanded) {
4808 log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4809 HeapRegion::GrainWords * HeapWordSize);
4810 }
4811 return _hrm->allocate_free_regions_starting_at(index, 1);
4812 }
4813 return NULL;
4814 }
4815
|