< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 57895 : [mq]: 8215297-remove-ptt
rev 57897 : imported patch 8238220-rename-owsttaskterminator
rev 57898 : [mq]: 8238229-remove-tracespinning-code


1115   // marking is no longer active. Therefore we need not
1116   // re-enable reference discovery for the CM ref processor.
1117   // That will be done at the start of the next marking cycle.
1118   // We also know that the STW processor should no longer
1119   // discover any new references.
1120   assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
1121   assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
1122   _ref_processor_stw->verify_no_references_recorded();
1123   _ref_processor_cm->verify_no_references_recorded();
1124 }
1125 
1126 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
1127   // Post collection logging.
1128   // We should do this after we potentially resize the heap so
1129   // that all the COMMIT / UNCOMMIT events are generated before
1130   // the compaction events.
1131   print_hrm_post_compaction();
1132   heap_transition->print();
1133   print_heap_after_gc();
1134   print_heap_regions();
1135 #ifdef TRACESPINNING
1136   TaskTerminator::print_termination_counts();
1137 #endif
1138 }
1139 
1140 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1141                                          bool clear_all_soft_refs) {
1142   assert_at_safepoint_on_vm_thread();
1143 
1144   if (GCLocker::check_active_before_gc()) {
1145     // Full GC was not completed.
1146     return false;
1147   }
1148 
1149   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1150       soft_ref_policy()->should_clear_all_soft_refs();
1151 
1152   G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1153   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1154 
1155   collector.prepare_collection();
1156   collector.collect();
1157   collector.complete_collection();


3122           // they can start working to make sure that all the
3123           // appropriate initialization is done on the CM object.
3124           concurrent_mark()->post_initial_mark();
3125           // Note that we don't actually trigger the CM thread at
3126           // this point. We do that later when we're sure that
3127           // the current thread has completed its logging output.
3128         }
3129 
3130         allocate_dummy_regions();
3131 
3132         _allocator->init_mutator_alloc_regions();
3133 
3134         expand_heap_after_young_collection();
3135 
3136         double sample_end_time_sec = os::elapsedTime();
3137         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3138         policy()->record_collection_pause_end(pause_time_ms);
3139       }
3140 
3141       verify_after_young_collection(verify_type);
3142 
3143 #ifdef TRACESPINNING
3144       TaskTerminator::print_termination_counts();
3145 #endif
3146 
3147       gc_epilogue(false);
3148     }
3149 
3150     // Print the remainder of the GC log output.
3151     if (evacuation_failed()) {
3152       log_info(gc)("To-space exhausted");
3153     }
3154 
3155     policy()->print_phases();
3156     heap_transition.print();
3157 
3158     _hrm->verify_optional();
3159     _verifier->verify_region_sets_optional();
3160 
3161     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3162     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3163 
3164     print_heap_after_gc();
3165     print_heap_regions();




1115   // marking is no longer active. Therefore we need not
1116   // re-enable reference discovery for the CM ref processor.
1117   // That will be done at the start of the next marking cycle.
1118   // We also know that the STW processor should no longer
1119   // discover any new references.
1120   assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
1121   assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
1122   _ref_processor_stw->verify_no_references_recorded();
1123   _ref_processor_cm->verify_no_references_recorded();
1124 }
1125 
1126 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
1127   // Post collection logging.
1128   // We should do this after we potentially resize the heap so
1129   // that all the COMMIT / UNCOMMIT events are generated before
1130   // the compaction events.
1131   print_hrm_post_compaction();
1132   heap_transition->print();
1133   print_heap_after_gc();
1134   print_heap_regions();



1135 }
1136 
1137 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1138                                          bool clear_all_soft_refs) {
1139   assert_at_safepoint_on_vm_thread();
1140 
1141   if (GCLocker::check_active_before_gc()) {
1142     // Full GC was not completed.
1143     return false;
1144   }
1145 
1146   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1147       soft_ref_policy()->should_clear_all_soft_refs();
1148 
1149   G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
1150   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1151 
1152   collector.prepare_collection();
1153   collector.collect();
1154   collector.complete_collection();


3119           // they can start working to make sure that all the
3120           // appropriate initialization is done on the CM object.
3121           concurrent_mark()->post_initial_mark();
3122           // Note that we don't actually trigger the CM thread at
3123           // this point. We do that later when we're sure that
3124           // the current thread has completed its logging output.
3125         }
3126 
3127         allocate_dummy_regions();
3128 
3129         _allocator->init_mutator_alloc_regions();
3130 
3131         expand_heap_after_young_collection();
3132 
3133         double sample_end_time_sec = os::elapsedTime();
3134         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3135         policy()->record_collection_pause_end(pause_time_ms);
3136       }
3137 
3138       verify_after_young_collection(verify_type);




3139 
3140       gc_epilogue(false);
3141     }
3142 
3143     // Print the remainder of the GC log output.
3144     if (evacuation_failed()) {
3145       log_info(gc)("To-space exhausted");
3146     }
3147 
3148     policy()->print_phases();
3149     heap_transition.print();
3150 
3151     _hrm->verify_optional();
3152     _verifier->verify_region_sets_optional();
3153 
3154     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3155     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3156 
3157     print_heap_after_gc();
3158     print_heap_regions();


< prev index next >