2310
2311 // This notify_all() will ensure that a thread that called
2312 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2313 // and it's waiting for a full GC to finish will be woken up. It is
2314 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2315 FullGCCount_lock->notify_all();
2316 }
2317
2318 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2319 GCIdMarkAndRestore conc_gc_id_mark;
2320 collector_state()->set_concurrent_cycle_started(true);
2321 _gc_timer_cm->register_gc_start(start_time);
2322
2323 _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2324 trace_heap_before_gc(_gc_tracer_cm);
2325 _cmThread->set_gc_id(GCId::current());
2326 }
2327
2328 void G1CollectedHeap::register_concurrent_cycle_end() {
2329 if (collector_state()->concurrent_cycle_started()) {
2330 GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
2331 if (_cm->has_aborted()) {
2332 _gc_tracer_cm->report_concurrent_mode_failure();
2333 }
2334
2335 _gc_timer_cm->register_gc_end();
2336 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2337
2338 // Clear state variables to prepare for the next concurrent cycle.
2339 collector_state()->set_concurrent_cycle_started(false);
2340 _heap_summary_sent = false;
2341 }
2342 }
2343
2344 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2345 if (collector_state()->concurrent_cycle_started()) {
2346 // This function can be called when:
2347 // the cleanup pause is run
2348 // the concurrent cycle is aborted before the cleanup pause.
2349 // the concurrent cycle is aborted after the cleanup pause,
2350 // but before the concurrent cycle end has been registered.
2351 // Make sure that we only send the heap information once.
2352 if (!_heap_summary_sent) {
2353 GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
2354 trace_heap_after_gc(_gc_tracer_cm);
2355 _heap_summary_sent = true;
|
2310
2311 // This notify_all() will ensure that a thread that called
2312 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2313 // and it's waiting for a full GC to finish will be woken up. It is
2314 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2315 FullGCCount_lock->notify_all();
2316 }
2317
2318 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2319 GCIdMarkAndRestore conc_gc_id_mark;
2320 collector_state()->set_concurrent_cycle_started(true);
2321 _gc_timer_cm->register_gc_start(start_time);
2322
2323 _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2324 trace_heap_before_gc(_gc_tracer_cm);
2325 _cmThread->set_gc_id(GCId::current());
2326 }
2327
2328 void G1CollectedHeap::register_concurrent_cycle_end() {
2329 if (collector_state()->concurrent_cycle_started()) {
2330 Ticks end_tick = Ticks::now();
2331 GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
2332 if (_cm->has_aborted()) {
2333 _gc_tracer_cm->report_concurrent_mode_failure();
2334
2335 if (_cm->concurrent_marking_from_roots()) {
2336 _gc_timer_cm->register_gc_concurrent_end(end_tick);
2337 }
2338 }
2339
2340 _gc_timer_cm->register_gc_end(end_tick);
2341 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2342
2343 // Clear state variables to prepare for the next concurrent cycle.
2344 collector_state()->set_concurrent_cycle_started(false);
2345 _heap_summary_sent = false;
2346 }
2347 }
2348
2349 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2350 if (collector_state()->concurrent_cycle_started()) {
2351 // This function can be called when:
2352 // the cleanup pause is run
2353 // the concurrent cycle is aborted before the cleanup pause.
2354 // the concurrent cycle is aborted after the cleanup pause,
2355 // but before the concurrent cycle end has been registered.
2356 // Make sure that we only send the heap information once.
2357 if (!_heap_summary_sent) {
2358 GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
2359 trace_heap_after_gc(_gc_tracer_cm);
2360 _heap_summary_sent = true;
|