< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 12772 : [mq]: 8177453-start-new-collection-set-measured-at-wrong-location


1361       increment_old_marking_cycles_completed(false /* concurrent */);
1362 
1363       _hrm.verify_optional();
1364       _verifier->verify_region_sets_optional();
1365 
1366       _verifier->verify_after_gc();
1367 
1368       // Clear the previous marking bitmap, if needed for bitmap verification.
1369       // Note we cannot do this when we clear the next marking bitmap in
1370       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371       // objects marked during a full GC against the previous bitmap.
1372       // But we need to clear it before calling check_bitmaps below since
1373       // the full GC has compacted objects and updated TAMS but not updated
1374       // the prev bitmap.
1375       if (G1VerifyBitmaps) {
1376         GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1377         _cm->clear_prev_bitmap(workers());
1378       }
1379       _verifier->check_bitmaps("Full GC End");
1380 
1381       double start = os::elapsedTime();
1382       start_new_collection_set();
1383       g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
1384 
1385       _allocator->init_mutator_alloc_region();
1386 
1387       g1_policy()->record_full_collection_end();
1388 
1389       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1390       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1391       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1392       // before any GC notifications are raised.
1393       g1mm()->update_sizes();
1394 
1395       gc_epilogue(true);
1396 
1397       heap_transition.print();
1398 
1399       print_heap_after_gc();
1400       print_heap_regions();
1401       trace_heap_after_gc(gc_tracer);
1402 
1403       post_full_gc_dump(gc_timer);


3195         // Initialize the GC alloc regions.
3196         _allocator->init_gc_alloc_regions(evacuation_info);
3197 
3198         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3199         pre_evacuate_collection_set();
3200 
3201         // Actually do the work...
3202         evacuate_collection_set(evacuation_info, &per_thread_states);
3203 
3204         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3205 
3206         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3207         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3208 
3209         eagerly_reclaim_humongous_regions();
3210 
3211         record_obj_copy_mem_stats();
3212         _survivor_evac_stats.adjust_desired_plab_sz();
3213         _old_evac_stats.adjust_desired_plab_sz();
3214 

3215         start_new_collection_set();

3216 
3217         if (evacuation_failed()) {
3218           set_used(recalculate_used());
3219           if (_archive_allocator != NULL) {
3220             _archive_allocator->clear_used();
3221           }
3222           for (uint i = 0; i < ParallelGCThreads; i++) {
3223             if (_evacuation_failed_info_array[i].has_failed()) {
3224               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3225             }
3226           }
3227         } else {
3228           // The "used" of the the collection set have already been subtracted
3229           // when they were freed.  Add in the bytes evacuated.
3230           increase_used(g1_policy()->bytes_copied_during_gc());
3231         }
3232 
3233         if (collector_state()->during_initial_mark_pause()) {
3234           // We have to do this before we notify the CM threads that
3235           // they can start working to make sure that all the




1361       increment_old_marking_cycles_completed(false /* concurrent */);
1362 
1363       _hrm.verify_optional();
1364       _verifier->verify_region_sets_optional();
1365 
1366       _verifier->verify_after_gc();
1367 
1368       // Clear the previous marking bitmap, if needed for bitmap verification.
1369       // Note we cannot do this when we clear the next marking bitmap in
1370       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371       // objects marked during a full GC against the previous bitmap.
1372       // But we need to clear it before calling check_bitmaps below since
1373       // the full GC has compacted objects and updated TAMS but not updated
1374       // the prev bitmap.
1375       if (G1VerifyBitmaps) {
1376         GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1377         _cm->clear_prev_bitmap(workers());
1378       }
1379       _verifier->check_bitmaps("Full GC End");
1380 

1381       start_new_collection_set();

1382 
1383       _allocator->init_mutator_alloc_region();
1384 
1385       g1_policy()->record_full_collection_end();
1386 
1387       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1388       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1389       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1390       // before any GC notifications are raised.
1391       g1mm()->update_sizes();
1392 
1393       gc_epilogue(true);
1394 
1395       heap_transition.print();
1396 
1397       print_heap_after_gc();
1398       print_heap_regions();
1399       trace_heap_after_gc(gc_tracer);
1400 
1401       post_full_gc_dump(gc_timer);


3193         // Initialize the GC alloc regions.
3194         _allocator->init_gc_alloc_regions(evacuation_info);
3195 
3196         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3197         pre_evacuate_collection_set();
3198 
3199         // Actually do the work...
3200         evacuate_collection_set(evacuation_info, &per_thread_states);
3201 
3202         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3203 
3204         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3205         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3206 
3207         eagerly_reclaim_humongous_regions();
3208 
3209         record_obj_copy_mem_stats();
3210         _survivor_evac_stats.adjust_desired_plab_sz();
3211         _old_evac_stats.adjust_desired_plab_sz();
3212 
3213         double start = os::elapsedTime();
3214         start_new_collection_set();
3215         g1_policy()->phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
3216 
3217         if (evacuation_failed()) {
3218           set_used(recalculate_used());
3219           if (_archive_allocator != NULL) {
3220             _archive_allocator->clear_used();
3221           }
3222           for (uint i = 0; i < ParallelGCThreads; i++) {
3223             if (_evacuation_failed_info_array[i].has_failed()) {
3224               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3225             }
3226           }
3227         } else {
3228           // The "used" of the the collection set have already been subtracted
3229           // when they were freed.  Add in the bytes evacuated.
3230           increase_used(g1_policy()->bytes_copied_during_gc());
3231         }
3232 
3233         if (collector_state()->during_initial_mark_pause()) {
3234           // We have to do this before we notify the CM threads that
3235           // they can start working to make sure that all the


< prev index next >