910 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
911
912 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
913 gch->trace_heap_before_gc(gc_tracer());
914
915 init_assuming_no_promotion_failure();
916
917 if (UseAdaptiveSizePolicy) {
918 set_survivor_overflow(false);
919 size_policy->minor_collection_begin();
920 }
921
922 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
923 // Capture heap used before collection (for printing).
924 size_t gch_prev_used = gch->used();
925
926 age_table()->clear();
927 to()->clear(SpaceDecorator::Mangle);
928
929 gch->save_marks();
930 assert(workers != NULL, "Need parallel worker threads.");
931 uint n_workers = active_workers;
932
933 // Set the correct parallelism (number of queues) in the reference processor
934 ref_processor()->set_active_mt_degree(n_workers);
935
936 // Always set the terminator for the active number of workers
937 // because only those workers go through the termination protocol.
938 ParallelTaskTerminator _term(n_workers, task_queues());
939 ParScanThreadStateSet thread_state_set(workers->active_workers(),
940 *to(), *this, *_old_gen, *task_queues(),
941 _overflow_stacks, desired_plab_sz(), _term);
942
943 thread_state_set.reset(n_workers, promotion_failed());
944
945 {
946 StrongRootsScope srs(n_workers);
947
948 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
949 gch->rem_set()->prepare_for_younger_refs_iterate(true);
950 // It turns out that even when we're using 1 thread, doing the work in a
951 // separate thread causes wide variance in run times. We can't help this
952 // in the multi-threaded case, but we special-case n=1 here to get
953 // repeatable measurements of the 1-thread overhead of the parallel code.
954 if (n_workers > 1) {
955 workers->run_task(&tsk);
956 } else {
957 tsk.work(0);
958 }
959 }
960
961 thread_state_set.reset(0 /* Bad value in debug if not reset */,
962 promotion_failed());
963
964 // Trace and reset failed promotion info.
965 if (promotion_failed()) {
966 thread_state_set.trace_promotion_failed(gc_tracer());
967 }
968
969 // Process (weak) reference objects found during scavenge.
970 ReferenceProcessor* rp = ref_processor();
971 IsAliveClosure is_alive(this);
972 ScanWeakRefClosure scan_weak_ref(this);
973 KeepAliveClosure keep_alive(&scan_weak_ref);
974 ScanClosure scan_without_gc_barrier(this, false);
1007 // other spaces.
1008 to()->mangle_unused_area();
1009 }
1010 swap_spaces();
1011
1012 // A successful scavenge should restart the GC time limit count which is
1013 // for full GC's.
1014 size_policy->reset_gc_overhead_limit_count();
1015
1016 assert(to()->is_empty(), "to space should be empty now");
1017
1018 adjust_desired_tenuring_threshold();
1019 } else {
1020 handle_promotion_failed(gch, thread_state_set);
1021 }
1022 // set new iteration safe limit for the survivor spaces
1023 from()->set_concurrent_iteration_safe_limit(from()->top());
1024 to()->set_concurrent_iteration_safe_limit(to()->top());
1025
1026 if (ResizePLAB) {
1027 plab_stats()->adjust_desired_plab_sz(n_workers);
1028 }
1029
1030 if (PrintGC && !PrintGCDetails) {
1031 gch->print_heap_change(gch_prev_used);
1032 }
1033
1034 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1035 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1036
1037 if (UseAdaptiveSizePolicy) {
1038 size_policy->minor_collection_end(gch->gc_cause());
1039 size_policy->avg_survived()->sample(from()->used());
1040 }
1041
1042 // We need to use a monotonically non-decreasing time in ms
1043 // or we will see time-warp warnings and os::javaTimeMillis()
1044 // does not guarantee monotonicity.
1045 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1046 update_time_of_last_gc(now);
1047
|
910 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
911
912 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
913 gch->trace_heap_before_gc(gc_tracer());
914
915 init_assuming_no_promotion_failure();
916
917 if (UseAdaptiveSizePolicy) {
918 set_survivor_overflow(false);
919 size_policy->minor_collection_begin();
920 }
921
922 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
923 // Capture heap used before collection (for printing).
924 size_t gch_prev_used = gch->used();
925
926 age_table()->clear();
927 to()->clear(SpaceDecorator::Mangle);
928
929 gch->save_marks();
930
931 // Set the correct parallelism (number of queues) in the reference processor
932 ref_processor()->set_active_mt_degree(active_workers);
933
934 // Always set the terminator for the active number of workers
935 // because only those workers go through the termination protocol.
936 ParallelTaskTerminator _term(active_workers, task_queues());
937 ParScanThreadStateSet thread_state_set(active_workers,
938 *to(), *this, *_old_gen, *task_queues(),
939 _overflow_stacks, desired_plab_sz(), _term);
940
941 thread_state_set.reset(active_workers, promotion_failed());
942
943 {
944 StrongRootsScope srs(active_workers);
945
946 ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
947 gch->rem_set()->prepare_for_younger_refs_iterate(true);
948 // It turns out that even when we're using 1 thread, doing the work in a
949 // separate thread causes wide variance in run times. We can't help this
950 // in the multi-threaded case, but we special-case n=1 here to get
951 // repeatable measurements of the 1-thread overhead of the parallel code.
952 if (active_workers > 1) {
953 workers->run_task(&tsk);
954 } else {
955 tsk.work(0);
956 }
957 }
958
959 thread_state_set.reset(0 /* Bad value in debug if not reset */,
960 promotion_failed());
961
962 // Trace and reset failed promotion info.
963 if (promotion_failed()) {
964 thread_state_set.trace_promotion_failed(gc_tracer());
965 }
966
967 // Process (weak) reference objects found during scavenge.
968 ReferenceProcessor* rp = ref_processor();
969 IsAliveClosure is_alive(this);
970 ScanWeakRefClosure scan_weak_ref(this);
971 KeepAliveClosure keep_alive(&scan_weak_ref);
972 ScanClosure scan_without_gc_barrier(this, false);
1005 // other spaces.
1006 to()->mangle_unused_area();
1007 }
1008 swap_spaces();
1009
1010 // A successful scavenge should restart the GC time limit count which is
1011 // for full GC's.
1012 size_policy->reset_gc_overhead_limit_count();
1013
1014 assert(to()->is_empty(), "to space should be empty now");
1015
1016 adjust_desired_tenuring_threshold();
1017 } else {
1018 handle_promotion_failed(gch, thread_state_set);
1019 }
1020 // set new iteration safe limit for the survivor spaces
1021 from()->set_concurrent_iteration_safe_limit(from()->top());
1022 to()->set_concurrent_iteration_safe_limit(to()->top());
1023
1024 if (ResizePLAB) {
1025 plab_stats()->adjust_desired_plab_sz(active_workers);
1026 }
1027
1028 if (PrintGC && !PrintGCDetails) {
1029 gch->print_heap_change(gch_prev_used);
1030 }
1031
1032 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1033 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1034
1035 if (UseAdaptiveSizePolicy) {
1036 size_policy->minor_collection_end(gch->gc_cause());
1037 size_policy->avg_survived()->sample(from()->used());
1038 }
1039
1040 // We need to use a monotonically non-decreasing time in ms
1041 // or we will see time-warp warnings and os::javaTimeMillis()
1042 // does not guarantee monotonicity.
1043 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1044 update_time_of_last_gc(now);
1045
|