1361 increment_old_marking_cycles_completed(false /* concurrent */);
1362
1363 _hrm.verify_optional();
1364 _verifier->verify_region_sets_optional();
1365
1366 _verifier->verify_after_gc();
1367
1368 // Clear the previous marking bitmap, if needed for bitmap verification.
1369 // Note we cannot do this when we clear the next marking bitmap in
1370 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371 // objects marked during a full GC against the previous bitmap.
1372 // But we need to clear it before calling check_bitmaps below since
1373 // the full GC has compacted objects and updated TAMS but not updated
1374 // the prev bitmap.
1375 if (G1VerifyBitmaps) {
1376 GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1377 _cm->clear_prev_bitmap(workers());
1378 }
1379 _verifier->check_bitmaps("Full GC End");
1380
1381 // Start a new incremental collection set for the next pause
1382 collection_set()->start_incremental_building();
1383
1384 clear_cset_fast_test();
1385
1386 _allocator->init_mutator_alloc_region();
1387
1388 g1_policy()->record_full_collection_end();
1389
1390 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1391 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1392 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1393 // before any GC notifications are raised.
1394 g1mm()->update_sizes();
1395
1396 gc_epilogue(true);
1397
1398 heap_transition.print();
1399
1400 print_heap_after_gc();
1401 print_heap_regions();
1402 trace_heap_after_gc(gc_tracer);
1403
1404 post_full_gc_dump(gc_timer);
2677 }
2678
2679 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2680 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2681 gc_tracer->report_gc_heap_summary(when, heap_summary);
2682
2683 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2684 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2685 }
2686
2687 G1CollectedHeap* G1CollectedHeap::heap() {
2688 CollectedHeap* heap = Universe::heap();
2689 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2690 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
2691 return (G1CollectedHeap*)heap;
2692 }
2693
2694 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2695 // always_do_update_barrier = false;
2696 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2697 // Fill TLAB's and such
2698 accumulate_statistics_all_tlabs();
2699 ensure_parsability(true);
2700
2701 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2702 }
2703
2704 void G1CollectedHeap::gc_epilogue(bool full) {
2705 // we are at the end of the GC. Total collections has already been increased.
2706 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2707
2708 // FIXME: what is this about?
2709 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2710 // is set.
2711 #if defined(COMPILER2) || INCLUDE_JVMCI
2712 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2713 #endif
2714 // always_do_update_barrier = true;
2715
2716 resize_all_tlabs();
2717 allocation_context_stats().update(full);
2718
2719 // We have just completed a GC. Update the soft reference
2720 // policy with the new heap occupancy
2721 Universe::update_heap_info_at_gc();
2722 }
2723
2724 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2725 uint gc_count_before,
2726 bool* succeeded,
2727 GCCause::Cause gc_cause) {
2728 assert_heap_not_locked_and_not_at_safepoint();
2729 VM_G1IncCollectionPause op(gc_count_before,
2730 word_size,
2731 false, /* should_initiate_conc_mark */
2732 g1_policy()->max_pause_time_ms(),
2733 gc_cause);
2734
2735 op.set_allocation_context(AllocationContext::current());
2736 VMThread::execute(&op);
2979 double wait_time_ms = 0.0;
2980 if (waited) {
2981 double scan_wait_end = os::elapsedTime();
2982 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2983 }
2984 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2985 }
2986
2987 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2988 private:
2989 G1HRPrinter* _hr_printer;
2990 public:
2991 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2992
2993 virtual bool doHeapRegion(HeapRegion* r) {
2994 _hr_printer->cset(r);
2995 return false;
2996 }
2997 };
2998
2999 bool
3000 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3001 assert_at_safepoint(true /* should_be_vm_thread */);
3002 guarantee(!is_gc_active(), "collection is not reentrant");
3003
3004 if (GCLocker::check_active_before_gc()) {
3005 return false;
3006 }
3007
3008 _gc_timer_stw->register_gc_start();
3009
3010 GCIdMark gc_id_mark;
3011 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3012
3013 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3014 ResourceMark rm;
3015
3016 g1_policy()->note_gc_start();
3017
3018 wait_for_root_region_scanning();
3181 // Initialize the GC alloc regions.
3182 _allocator->init_gc_alloc_regions(evacuation_info);
3183
3184 G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3185 pre_evacuate_collection_set();
3186
3187 // Actually do the work...
3188 evacuate_collection_set(evacuation_info, &per_thread_states);
3189
3190 post_evacuate_collection_set(evacuation_info, &per_thread_states);
3191
3192 const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3193 free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3194
3195 eagerly_reclaim_humongous_regions();
3196
3197 record_obj_copy_mem_stats();
3198 _survivor_evac_stats.adjust_desired_plab_sz();
3199 _old_evac_stats.adjust_desired_plab_sz();
3200
3201 // Start a new incremental collection set for the next pause.
3202 collection_set()->start_incremental_building();
3203
3204 clear_cset_fast_test();
3205
3206 guarantee(_eden.length() == 0, "eden should have been cleared");
3207 g1_policy()->transfer_survivors_to_cset(survivor());
3208
3209 if (evacuation_failed()) {
3210 set_used(recalculate_used());
3211 if (_archive_allocator != NULL) {
3212 _archive_allocator->clear_used();
3213 }
3214 for (uint i = 0; i < ParallelGCThreads; i++) {
3215 if (_evacuation_failed_info_array[i].has_failed()) {
3216 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3217 }
3218 }
3219 } else {
3220 // The "used" of the the collection set have already been subtracted
3221 // when they were freed. Add in the bytes evacuated.
3222 increase_used(g1_policy()->bytes_copied_during_gc());
3223 }
3224
3225 if (collector_state()->during_initial_mark_pause()) {
3226 // We have to do this before we notify the CM threads that
3227 // they can start working to make sure that all the
4505 if (g1_policy()->should_process_references()) {
4506 enqueue_discovered_references(per_thread_states);
4507 } else {
4508 g1_policy()->phase_times()->record_ref_enq_time(0);
4509 }
4510
4511 _allocator->release_gc_alloc_regions(evacuation_info);
4512
4513 merge_per_thread_state_info(per_thread_states);
4514
4515 // Reset and re-enable the hot card cache.
4516 // Note the counts for the cards in the regions in the
4517 // collection set are reset when the collection set is freed.
4518 _hot_card_cache->reset_hot_cache();
4519 _hot_card_cache->set_use_cache(true);
4520
4521 purge_code_root_memory();
4522
4523 redirty_logged_cards();
4524 #if defined(COMPILER2) || INCLUDE_JVMCI
4525 DerivedPointerTable::update_pointers();
4526 #endif
4527 g1_policy()->print_age_table();
4528 }
4529
4530 void G1CollectedHeap::record_obj_copy_mem_stats() {
4531 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4532
4533 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4534 create_g1_evac_summary(&_old_evac_stats));
4535 }
4536
4537 void G1CollectedHeap::free_region(HeapRegion* hr,
4538 FreeRegionList* free_list,
4539 bool skip_remset,
4540 bool skip_hot_card_cache,
4541 bool locked) {
4542 assert(!hr->is_free(), "the region should not be free");
4543 assert(!hr->is_empty(), "the region should not be empty");
4544 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
4545 assert(free_list != NULL, "pre-condition");
|
1361 increment_old_marking_cycles_completed(false /* concurrent */);
1362
1363 _hrm.verify_optional();
1364 _verifier->verify_region_sets_optional();
1365
1366 _verifier->verify_after_gc();
1367
1368 // Clear the previous marking bitmap, if needed for bitmap verification.
1369 // Note we cannot do this when we clear the next marking bitmap in
1370 // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371 // objects marked during a full GC against the previous bitmap.
1372 // But we need to clear it before calling check_bitmaps below since
1373 // the full GC has compacted objects and updated TAMS but not updated
1374 // the prev bitmap.
1375 if (G1VerifyBitmaps) {
1376 GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1377 _cm->clear_prev_bitmap(workers());
1378 }
1379 _verifier->check_bitmaps("Full GC End");
1380
1381 double start = os::elapsedTime();
1382 start_new_collection_set();
1383 g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
1384
1385 _allocator->init_mutator_alloc_region();
1386
1387 g1_policy()->record_full_collection_end();
1388
1389 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1390 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1391 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1392 // before any GC notifications are raised.
1393 g1mm()->update_sizes();
1394
1395 gc_epilogue(true);
1396
1397 heap_transition.print();
1398
1399 print_heap_after_gc();
1400 print_heap_regions();
1401 trace_heap_after_gc(gc_tracer);
1402
1403 post_full_gc_dump(gc_timer);
2676 }
2677
2678 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2679 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2680 gc_tracer->report_gc_heap_summary(when, heap_summary);
2681
2682 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2683 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2684 }
2685
2686 G1CollectedHeap* G1CollectedHeap::heap() {
2687 CollectedHeap* heap = Universe::heap();
2688 assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2689 assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
2690 return (G1CollectedHeap*)heap;
2691 }
2692
2693 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2694 // always_do_update_barrier = false;
2695 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2696
2697 double start = os::elapsedTime();
2698 // Fill TLAB's and such
2699 accumulate_statistics_all_tlabs();
2700 ensure_parsability(true);
2701 g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2702
2703 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2704 }
2705
2706 void G1CollectedHeap::gc_epilogue(bool full) {
2707 // we are at the end of the GC. Total collections has already been increased.
2708 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2709
2710 // FIXME: what is this about?
2711 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2712 // is set.
2713 #if defined(COMPILER2) || INCLUDE_JVMCI
2714 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2715 #endif
2716 // always_do_update_barrier = true;
2717
2718 double start = os::elapsedTime();
2719 resize_all_tlabs();
2720 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2721
2722 allocation_context_stats().update(full);
2723
2724 // We have just completed a GC. Update the soft reference
2725 // policy with the new heap occupancy
2726 Universe::update_heap_info_at_gc();
2727 }
2728
2729 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2730 uint gc_count_before,
2731 bool* succeeded,
2732 GCCause::Cause gc_cause) {
2733 assert_heap_not_locked_and_not_at_safepoint();
2734 VM_G1IncCollectionPause op(gc_count_before,
2735 word_size,
2736 false, /* should_initiate_conc_mark */
2737 g1_policy()->max_pause_time_ms(),
2738 gc_cause);
2739
2740 op.set_allocation_context(AllocationContext::current());
2741 VMThread::execute(&op);
2984 double wait_time_ms = 0.0;
2985 if (waited) {
2986 double scan_wait_end = os::elapsedTime();
2987 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2988 }
2989 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2990 }
2991
2992 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2993 private:
2994 G1HRPrinter* _hr_printer;
2995 public:
2996 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2997
2998 virtual bool doHeapRegion(HeapRegion* r) {
2999 _hr_printer->cset(r);
3000 return false;
3001 }
3002 };
3003
3004 void G1CollectedHeap::start_new_collection_set() {
3005 collection_set()->start_incremental_building();
3006
3007 clear_cset_fast_test();
3008
3009 guarantee(_eden.length() == 0, "eden should have been cleared");
3010 g1_policy()->transfer_survivors_to_cset(survivor());
3011 }
3012
3013 bool
3014 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3015 assert_at_safepoint(true /* should_be_vm_thread */);
3016 guarantee(!is_gc_active(), "collection is not reentrant");
3017
3018 if (GCLocker::check_active_before_gc()) {
3019 return false;
3020 }
3021
3022 _gc_timer_stw->register_gc_start();
3023
3024 GCIdMark gc_id_mark;
3025 _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3026
3027 SvcGCMarker sgcm(SvcGCMarker::MINOR);
3028 ResourceMark rm;
3029
3030 g1_policy()->note_gc_start();
3031
3032 wait_for_root_region_scanning();
3195 // Initialize the GC alloc regions.
3196 _allocator->init_gc_alloc_regions(evacuation_info);
3197
3198 G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3199 pre_evacuate_collection_set();
3200
3201 // Actually do the work...
3202 evacuate_collection_set(evacuation_info, &per_thread_states);
3203
3204 post_evacuate_collection_set(evacuation_info, &per_thread_states);
3205
3206 const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3207 free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3208
3209 eagerly_reclaim_humongous_regions();
3210
3211 record_obj_copy_mem_stats();
3212 _survivor_evac_stats.adjust_desired_plab_sz();
3213 _old_evac_stats.adjust_desired_plab_sz();
3214
3215 start_new_collection_set();
3216
3217 if (evacuation_failed()) {
3218 set_used(recalculate_used());
3219 if (_archive_allocator != NULL) {
3220 _archive_allocator->clear_used();
3221 }
3222 for (uint i = 0; i < ParallelGCThreads; i++) {
3223 if (_evacuation_failed_info_array[i].has_failed()) {
3224 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3225 }
3226 }
3227 } else {
3228 // The "used" of the the collection set have already been subtracted
3229 // when they were freed. Add in the bytes evacuated.
3230 increase_used(g1_policy()->bytes_copied_during_gc());
3231 }
3232
3233 if (collector_state()->during_initial_mark_pause()) {
3234 // We have to do this before we notify the CM threads that
3235 // they can start working to make sure that all the
4513 if (g1_policy()->should_process_references()) {
4514 enqueue_discovered_references(per_thread_states);
4515 } else {
4516 g1_policy()->phase_times()->record_ref_enq_time(0);
4517 }
4518
4519 _allocator->release_gc_alloc_regions(evacuation_info);
4520
4521 merge_per_thread_state_info(per_thread_states);
4522
4523 // Reset and re-enable the hot card cache.
4524 // Note the counts for the cards in the regions in the
4525 // collection set are reset when the collection set is freed.
4526 _hot_card_cache->reset_hot_cache();
4527 _hot_card_cache->set_use_cache(true);
4528
4529 purge_code_root_memory();
4530
4531 redirty_logged_cards();
4532 #if defined(COMPILER2) || INCLUDE_JVMCI
4533 double start = os::elapsedTime();
4534 DerivedPointerTable::update_pointers();
4535 g1_policy()->phase_times()->record_dpt_update_time((os::elapsedTime() - start) * 1000.0);
4536 #endif
4537 g1_policy()->print_age_table();
4538 }
4539
4540 void G1CollectedHeap::record_obj_copy_mem_stats() {
4541 g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4542
4543 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4544 create_g1_evac_summary(&_old_evac_stats));
4545 }
4546
4547 void G1CollectedHeap::free_region(HeapRegion* hr,
4548 FreeRegionList* free_list,
4549 bool skip_remset,
4550 bool skip_hot_card_cache,
4551 bool locked) {
4552 assert(!hr->is_free(), "the region should not be free");
4553 assert(!hr->is_empty(), "the region should not be empty");
4554 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
4555 assert(free_list != NULL, "pre-condition");
|