21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/symbolTable.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc/g1/bufferingOopClosure.hpp"
32 #include "gc/g1/concurrentG1Refine.hpp"
33 #include "gc/g1/concurrentG1RefineThread.hpp"
34 #include "gc/g1/concurrentMarkThread.inline.hpp"
35 #include "gc/g1/g1Allocator.inline.hpp"
36 #include "gc/g1/g1CollectedHeap.inline.hpp"
37 #include "gc/g1/g1CollectorPolicy.hpp"
38 #include "gc/g1/g1CollectorState.hpp"
39 #include "gc/g1/g1EvacStats.inline.hpp"
40 #include "gc/g1/g1GCPhaseTimes.hpp"
41 #include "gc/g1/g1HeapVerifier.hpp"
42 #include "gc/g1/g1MarkSweep.hpp"
43 #include "gc/g1/g1OopClosures.inline.hpp"
44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
46 #include "gc/g1/g1RemSet.inline.hpp"
47 #include "gc/g1/g1RootClosures.hpp"
48 #include "gc/g1/g1RootProcessor.hpp"
49 #include "gc/g1/g1StringDedup.hpp"
50 #include "gc/g1/g1YCTypes.hpp"
51 #include "gc/g1/heapRegion.inline.hpp"
52 #include "gc/g1/heapRegionRemSet.hpp"
53 #include "gc/g1/heapRegionSet.inline.hpp"
54 #include "gc/g1/suspendibleThreadSet.hpp"
55 #include "gc/g1/vm_operations_g1.hpp"
56 #include "gc/shared/gcHeapSummary.hpp"
57 #include "gc/shared/gcId.hpp"
58 #include "gc/shared/gcLocker.inline.hpp"
59 #include "gc/shared/gcTimer.hpp"
60 #include "gc/shared/gcTrace.hpp"
1233
1234 _verifier->verify_region_sets_optional();
1235
1236 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1237 collector_policy()->should_clear_all_soft_refs();
1238
1239 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1240
1241 {
1242 IsGCActiveMark x;
1243
1244 // Timing
1245 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1246 GCTraceCPUTime tcpu;
1247
1248 {
1249 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1250 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1251 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1252
1253 g1_policy()->record_full_collection_start();
1254
1255 // Note: When we have a more flexible GC logging framework that
1256 // allows us to add optional attributes to a GC log record we
1257 // could consider timing and reporting how long we wait in the
1258 // following two methods.
1259 wait_while_free_regions_coming();
1260 // If we start the compaction before the CM threads finish
1261 // scanning the root regions we might trip them over as we'll
1262 // be moving objects / updating references. So let's wait until
1263 // they are done. By telling them to abort, they should complete
1264 // early.
1265 _cm->root_regions()->abort();
1266 _cm->root_regions()->wait_until_scan_finished();
1267 append_secondary_free_list_if_not_empty_with_lock();
1268
1269 gc_prologue(true);
1270 increment_total_collections(true /* full gc */);
1271 increment_old_marking_cycles_started();
1272
1425 }
1426 _verifier->check_bitmaps("Full GC End");
1427
1428 // Start a new incremental collection set for the next pause
1429 assert(g1_policy()->collection_set() == NULL, "must be");
1430 g1_policy()->start_incremental_cset_building();
1431
1432 clear_cset_fast_test();
1433
1434 _allocator->init_mutator_alloc_region();
1435
1436 g1_policy()->record_full_collection_end();
1437
1438 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1439 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1440 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1441 // before any GC notifications are raised.
1442 g1mm()->update_sizes();
1443
1444 gc_epilogue(true);
1445 }
1446
1447 g1_policy()->print_detailed_heap_transition();
1448
1449 print_heap_after_gc();
1450 trace_heap_after_gc(gc_tracer);
1451
1452 post_full_gc_dump(gc_timer);
1453
1454 gc_timer->register_gc_end();
1455 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1456 }
1457
1458 return true;
1459 }
1460
1461 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1462 // Currently, there is no facility in the do_full_collection(bool) API to notify
1463 // the caller that the collection did not succeed (e.g., because it was locked
1464 // out by the GC locker). So, right now, we'll ignore the return value.
1465 bool dummy = do_full_collection(true, /* explicit_gc */
1466 clear_all_soft_refs);
1467 }
1468
1469 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1470 // Include bytes that will be pre-allocated to support collections, as "used".
1471 const size_t used_after_gc = used();
1472 const size_t capacity_after_gc = capacity();
3152
3153 TaskQueueStats totals;
3154 const uint n = num_task_queues();
3155 for (uint i = 0; i < n; ++i) {
3156 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3157 totals += task_queue(i)->stats;
3158 }
3159 st->print_raw("tot "); totals.print(st); st->cr();
3160
3161 DEBUG_ONLY(totals.verify());
3162 }
3163
3164 void G1CollectedHeap::reset_taskqueue_stats() {
3165 const uint n = num_task_queues();
3166 for (uint i = 0; i < n; ++i) {
3167 task_queue(i)->stats.reset();
3168 }
3169 }
3170 #endif // TASKQUEUE_STATS
3171
3172 void G1CollectedHeap::log_gc_footer() {
3173 if (evacuation_failed()) {
3174 log_info(gc)("To-space exhausted");
3175 }
3176
3177 g1_policy()->print_phases();
3178
3179 g1_policy()->print_detailed_heap_transition();
3180 }
3181
3182
3183 void G1CollectedHeap::wait_for_root_region_scanning() {
3184 double scan_wait_start = os::elapsedTime();
3185 // We have to wait until the CM threads finish scanning the
3186 // root regions as it's the only way to ensure that all the
3187 // objects on them have been correctly scanned before we start
3188 // moving them during the GC.
3189 bool waited = _cm->root_regions()->wait_until_scan_finished();
3190 double wait_time_ms = 0.0;
3191 if (waited) {
3192 double scan_wait_end = os::elapsedTime();
3193 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3194 }
3195 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3196 }
3197
3198 bool
3199 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3200 assert_at_safepoint(true /* should_be_vm_thread */);
3201 guarantee(!is_gc_active(), "collection is not reentrant");
3202
3264 } else {
3265 gc_string.append("Mixed");
3266 }
3267 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3268
3269 g1_policy()->note_gc_start(active_workers);
3270
3271 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3272 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3273
3274 // If the secondary_free_list is not empty, append it to the
3275 // free_list. No need to wait for the cleanup operation to finish;
3276 // the region allocation code will check the secondary_free_list
3277 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3278 // set, skip this step so that the region allocation code has to
3279 // get entries from the secondary_free_list.
3280 if (!G1StressConcRegionFreeing) {
3281 append_secondary_free_list_if_not_empty_with_lock();
3282 }
3283
3284 assert(check_young_list_well_formed(), "young list should be well formed");
3285
3286 // Don't dynamically change the number of GC threads this early. A value of
3287 // 0 is used to indicate serial work. When parallel work is done,
3288 // it will be set.
3289
3290 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3291 IsGCActiveMark x;
3292
3293 gc_prologue(false);
3294 increment_total_collections(false /* full gc */);
3295 increment_gc_time_stamp();
3296
3297 if (VerifyRememberedSets) {
3298 log_info(gc, verify)("[Verifying RemSets before GC]");
3299 VerifyRegionRemSetClosure v_cl;
3300 heap_region_iterate(&v_cl);
3301 }
3302
3303 _verifier->verify_before_gc();
3457 // expansion_amount() does this when it returns a value > 0.
3458 double expand_ms;
3459 if (!expand(expand_bytes, &expand_ms)) {
3460 // We failed to expand the heap. Cannot do anything about it.
3461 }
3462 g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3463 }
3464 }
3465
3466 // We redo the verification but now wrt to the new CSet which
3467 // has just got initialized after the previous CSet was freed.
3468 _cm->verify_no_cset_oops();
3469 _cm->note_end_of_gc();
3470
3471 // This timing is only used by the ergonomics to handle our pause target.
3472 // It is unclear why this should not include the full pause. We will
3473 // investigate this in CR 7178365.
3474 double sample_end_time_sec = os::elapsedTime();
3475 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3476 size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3477 g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned);
3478
3479 evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
3480 evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3481
3482 MemoryService::track_memory_usage();
3483
3484 // In prepare_for_verify() below we'll need to scan the deferred
3485 // update buffers to bring the RSets up-to-date if
3486 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3487 // the update buffers we'll probably need to scan cards on the
3488 // regions we just allocated to (i.e., the GC alloc
3489 // regions). However, during the last GC we called
3490 // set_saved_mark() on all the GC alloc regions, so card
3491 // scanning might skip the [saved_mark_word()...top()] area of
3492 // those regions (i.e., the area we allocated objects into
3493 // during the last GC). But it shouldn't. Given that
3494 // saved_mark_word() is conditional on whether the GC time stamp
3495 // on the region is current or not, by incrementing the GC time
3496 // stamp here we invalidate all the GC time stamps on all the
3497 // regions and saved_mark_word() will simply return top() for
3510 heap_region_iterate(&v_cl);
3511 }
3512
3513 _verifier->verify_after_gc();
3514 _verifier->check_bitmaps("GC End");
3515
3516 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3517 ref_processor_stw()->verify_no_references_recorded();
3518
3519 // CM reference discovery will be re-enabled if necessary.
3520 }
3521
3522 #ifdef TRACESPINNING
3523 ParallelTaskTerminator::print_termination_counts();
3524 #endif
3525
3526 gc_epilogue(false);
3527 }
3528
3529 // Print the remainder of the GC log output.
3530 log_gc_footer();
3531
3532 // It is not yet to safe to tell the concurrent mark to
3533 // start as we have some optional output below. We don't want the
3534 // output from the concurrent mark thread interfering with this
3535 // logging output either.
3536
3537 _hrm.verify_optional();
3538 _verifier->verify_region_sets_optional();
3539
3540 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3541 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3542
3543 print_heap_after_gc();
3544 trace_heap_after_gc(_gc_tracer_stw);
3545
3546 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3547 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3548 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3549 // before any GC notifications are raised.
3550 g1mm()->update_sizes();
|
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "classfile/symbolTable.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc/g1/bufferingOopClosure.hpp"
32 #include "gc/g1/concurrentG1Refine.hpp"
33 #include "gc/g1/concurrentG1RefineThread.hpp"
34 #include "gc/g1/concurrentMarkThread.inline.hpp"
35 #include "gc/g1/g1Allocator.inline.hpp"
36 #include "gc/g1/g1CollectedHeap.inline.hpp"
37 #include "gc/g1/g1CollectorPolicy.hpp"
38 #include "gc/g1/g1CollectorState.hpp"
39 #include "gc/g1/g1EvacStats.inline.hpp"
40 #include "gc/g1/g1GCPhaseTimes.hpp"
41 #include "gc/g1/g1HeapTransition.hpp"
42 #include "gc/g1/g1HeapVerifier.hpp"
43 #include "gc/g1/g1MarkSweep.hpp"
44 #include "gc/g1/g1OopClosures.inline.hpp"
45 #include "gc/g1/g1ParScanThreadState.inline.hpp"
46 #include "gc/g1/g1RegionToSpaceMapper.hpp"
47 #include "gc/g1/g1RemSet.inline.hpp"
48 #include "gc/g1/g1RootClosures.hpp"
49 #include "gc/g1/g1RootProcessor.hpp"
50 #include "gc/g1/g1StringDedup.hpp"
51 #include "gc/g1/g1YCTypes.hpp"
52 #include "gc/g1/heapRegion.inline.hpp"
53 #include "gc/g1/heapRegionRemSet.hpp"
54 #include "gc/g1/heapRegionSet.inline.hpp"
55 #include "gc/g1/suspendibleThreadSet.hpp"
56 #include "gc/g1/vm_operations_g1.hpp"
57 #include "gc/shared/gcHeapSummary.hpp"
58 #include "gc/shared/gcId.hpp"
59 #include "gc/shared/gcLocker.inline.hpp"
60 #include "gc/shared/gcTimer.hpp"
61 #include "gc/shared/gcTrace.hpp"
1234
1235 _verifier->verify_region_sets_optional();
1236
1237 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1238 collector_policy()->should_clear_all_soft_refs();
1239
1240 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1241
1242 {
1243 IsGCActiveMark x;
1244
1245 // Timing
1246 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1247 GCTraceCPUTime tcpu;
1248
1249 {
1250 GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1251 TraceCollectorStats tcs(g1mm()->full_collection_counters());
1252 TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1253
1254 G1HeapTransition heap_transition(this);
1255 g1_policy()->record_full_collection_start();
1256
1257 // Note: When we have a more flexible GC logging framework that
1258 // allows us to add optional attributes to a GC log record we
1259 // could consider timing and reporting how long we wait in the
1260 // following two methods.
1261 wait_while_free_regions_coming();
1262 // If we start the compaction before the CM threads finish
1263 // scanning the root regions we might trip them over as we'll
1264 // be moving objects / updating references. So let's wait until
1265 // they are done. By telling them to abort, they should complete
1266 // early.
1267 _cm->root_regions()->abort();
1268 _cm->root_regions()->wait_until_scan_finished();
1269 append_secondary_free_list_if_not_empty_with_lock();
1270
1271 gc_prologue(true);
1272 increment_total_collections(true /* full gc */);
1273 increment_old_marking_cycles_started();
1274
1427 }
1428 _verifier->check_bitmaps("Full GC End");
1429
1430 // Start a new incremental collection set for the next pause
1431 assert(g1_policy()->collection_set() == NULL, "must be");
1432 g1_policy()->start_incremental_cset_building();
1433
1434 clear_cset_fast_test();
1435
1436 _allocator->init_mutator_alloc_region();
1437
1438 g1_policy()->record_full_collection_end();
1439
1440 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1441 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1442 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1443 // before any GC notifications are raised.
1444 g1mm()->update_sizes();
1445
1446 gc_epilogue(true);
1447
1448 heap_transition.print();
1449
1450 print_heap_after_gc();
1451 trace_heap_after_gc(gc_tracer);
1452
1453 post_full_gc_dump(gc_timer);
1454 }
1455
1456 gc_timer->register_gc_end();
1457 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1458 }
1459
1460 return true;
1461 }
1462
1463 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1464 // Currently, there is no facility in the do_full_collection(bool) API to notify
1465 // the caller that the collection did not succeed (e.g., because it was locked
1466 // out by the GC locker). So, right now, we'll ignore the return value.
1467 bool dummy = do_full_collection(true, /* explicit_gc */
1468 clear_all_soft_refs);
1469 }
1470
1471 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1472 // Include bytes that will be pre-allocated to support collections, as "used".
1473 const size_t used_after_gc = used();
1474 const size_t capacity_after_gc = capacity();
3154
3155 TaskQueueStats totals;
3156 const uint n = num_task_queues();
3157 for (uint i = 0; i < n; ++i) {
3158 st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3159 totals += task_queue(i)->stats;
3160 }
3161 st->print_raw("tot "); totals.print(st); st->cr();
3162
3163 DEBUG_ONLY(totals.verify());
3164 }
3165
3166 void G1CollectedHeap::reset_taskqueue_stats() {
3167 const uint n = num_task_queues();
3168 for (uint i = 0; i < n; ++i) {
3169 task_queue(i)->stats.reset();
3170 }
3171 }
3172 #endif // TASKQUEUE_STATS
3173
3174 void G1CollectedHeap::wait_for_root_region_scanning() {
3175 double scan_wait_start = os::elapsedTime();
3176 // We have to wait until the CM threads finish scanning the
3177 // root regions as it's the only way to ensure that all the
3178 // objects on them have been correctly scanned before we start
3179 // moving them during the GC.
3180 bool waited = _cm->root_regions()->wait_until_scan_finished();
3181 double wait_time_ms = 0.0;
3182 if (waited) {
3183 double scan_wait_end = os::elapsedTime();
3184 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3185 }
3186 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3187 }
3188
3189 bool
3190 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3191 assert_at_safepoint(true /* should_be_vm_thread */);
3192 guarantee(!is_gc_active(), "collection is not reentrant");
3193
3255 } else {
3256 gc_string.append("Mixed");
3257 }
3258 GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3259
3260 g1_policy()->note_gc_start(active_workers);
3261
3262 TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3263 TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3264
3265 // If the secondary_free_list is not empty, append it to the
3266 // free_list. No need to wait for the cleanup operation to finish;
3267 // the region allocation code will check the secondary_free_list
3268 // and wait if necessary. If the G1StressConcRegionFreeing flag is
3269 // set, skip this step so that the region allocation code has to
3270 // get entries from the secondary_free_list.
3271 if (!G1StressConcRegionFreeing) {
3272 append_secondary_free_list_if_not_empty_with_lock();
3273 }
3274
3275 G1HeapTransition heap_transition(this);
3276 size_t heap_used_bytes_before_gc = used();
3277
3278 assert(check_young_list_well_formed(), "young list should be well formed");
3279
3280 // Don't dynamically change the number of GC threads this early. A value of
3281 // 0 is used to indicate serial work. When parallel work is done,
3282 // it will be set.
3283
3284 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3285 IsGCActiveMark x;
3286
3287 gc_prologue(false);
3288 increment_total_collections(false /* full gc */);
3289 increment_gc_time_stamp();
3290
3291 if (VerifyRememberedSets) {
3292 log_info(gc, verify)("[Verifying RemSets before GC]");
3293 VerifyRegionRemSetClosure v_cl;
3294 heap_region_iterate(&v_cl);
3295 }
3296
3297 _verifier->verify_before_gc();
3451 // expansion_amount() does this when it returns a value > 0.
3452 double expand_ms;
3453 if (!expand(expand_bytes, &expand_ms)) {
3454 // We failed to expand the heap. Cannot do anything about it.
3455 }
3456 g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3457 }
3458 }
3459
3460 // We redo the verification but now wrt to the new CSet which
3461 // has just got initialized after the previous CSet was freed.
3462 _cm->verify_no_cset_oops();
3463 _cm->note_end_of_gc();
3464
3465 // This timing is only used by the ergonomics to handle our pause target.
3466 // It is unclear why this should not include the full pause. We will
3467 // investigate this in CR 7178365.
3468 double sample_end_time_sec = os::elapsedTime();
3469 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3470 size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3471 g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3472
3473 evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before());
3474 evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3475
3476 MemoryService::track_memory_usage();
3477
3478 // In prepare_for_verify() below we'll need to scan the deferred
3479 // update buffers to bring the RSets up-to-date if
3480 // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3481 // the update buffers we'll probably need to scan cards on the
3482 // regions we just allocated to (i.e., the GC alloc
3483 // regions). However, during the last GC we called
3484 // set_saved_mark() on all the GC alloc regions, so card
3485 // scanning might skip the [saved_mark_word()...top()] area of
3486 // those regions (i.e., the area we allocated objects into
3487 // during the last GC). But it shouldn't. Given that
3488 // saved_mark_word() is conditional on whether the GC time stamp
3489 // on the region is current or not, by incrementing the GC time
3490 // stamp here we invalidate all the GC time stamps on all the
3491 // regions and saved_mark_word() will simply return top() for
3504 heap_region_iterate(&v_cl);
3505 }
3506
3507 _verifier->verify_after_gc();
3508 _verifier->check_bitmaps("GC End");
3509
3510 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3511 ref_processor_stw()->verify_no_references_recorded();
3512
3513 // CM reference discovery will be re-enabled if necessary.
3514 }
3515
3516 #ifdef TRACESPINNING
3517 ParallelTaskTerminator::print_termination_counts();
3518 #endif
3519
3520 gc_epilogue(false);
3521 }
3522
3523 // Print the remainder of the GC log output.
3524 if (evacuation_failed()) {
3525 log_info(gc)("To-space exhausted");
3526 }
3527
3528 g1_policy()->print_phases();
3529 heap_transition.print();
3530
3531 // It is not yet to safe to tell the concurrent mark to
3532 // start as we have some optional output below. We don't want the
3533 // output from the concurrent mark thread interfering with this
3534 // logging output either.
3535
3536 _hrm.verify_optional();
3537 _verifier->verify_region_sets_optional();
3538
3539 TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3540 TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3541
3542 print_heap_after_gc();
3543 trace_heap_after_gc(_gc_tracer_stw);
3544
3545 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3546 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3547 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3548 // before any GC notifications are raised.
3549 g1mm()->update_sizes();
|