< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 9408 : [mq]: rev.01


1184       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1185     } else if (hr->is_archive()) {
1186       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1187     } else if (hr->is_old()) {
1188       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1189     } else {
1190       ShouldNotReachHere();
1191     }
1192     return false;
1193   }
1194 
1195   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1196     : _hr_printer(hr_printer) { }
1197 };
1198 
1199 void G1CollectedHeap::print_hrm_post_compaction() {
1200   PostCompactionPrinterClosure cl(hr_printer());
1201   heap_region_iterate(&cl);
1202 }
1203 
1204 bool G1CollectedHeap::do_collection(bool explicit_gc,
1205                                     bool clear_all_soft_refs,
1206                                     size_t word_size) {
1207   assert_at_safepoint(true /* should_be_vm_thread */);
1208 
1209   if (GC_locker::check_active_before_gc()) {
1210     return false;
1211   }
1212 
1213   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1214   gc_timer->register_gc_start();
1215 
1216   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1217   GCIdMark gc_id_mark;
1218   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1219 
1220   SvcGCMarker sgcm(SvcGCMarker::FULL);
1221   ResourceMark rm;
1222 
1223   G1Log::update_level();
1224   print_heap_before_gc();
1225   trace_heap_before_gc(gc_tracer);
1226 


1344       ref_processor_stw()->verify_no_references_recorded();
1345 
1346       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1347       ClassLoaderDataGraph::purge();
1348       MetaspaceAux::verify_metrics();
1349 
1350       // Note: since we've just done a full GC, concurrent
1351       // marking is no longer active. Therefore we need not
1352       // re-enable reference discovery for the CM ref processor.
1353       // That will be done at the start of the next marking cycle.
1354       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1355       ref_processor_cm()->verify_no_references_recorded();
1356 
1357       reset_gc_time_stamp();
1358       // Since everything potentially moved, we will clear all remembered
1359       // sets, and clear all cards.  Later we will rebuild remembered
1360       // sets. We will also reset the GC time stamps of the regions.
1361       clear_rsets_post_compaction();
1362       check_gc_time_stamps();
1363 
1364       // Resize the heap if necessary.
1365       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1366 
1367       if (_hr_printer.is_active()) {
1368         // We should do this after we potentially resize the heap so
1369         // that all the COMMIT / UNCOMMIT events are generated before
1370         // the end GC event.
1371 
1372         print_hrm_post_compaction();
1373         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1374       }
1375 
1376       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1377       if (hot_card_cache->use_cache()) {
1378         hot_card_cache->reset_card_counts();
1379         hot_card_cache->reset_hot_cache();
1380       }
1381 
1382       // Rebuild remembered sets of all regions.
1383       uint n_workers =
1384         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1385                                                 workers()->active_workers(),


1453       gc_epilogue(true);
1454     }
1455 
1456     if (G1Log::finer()) {
1457       g1_policy()->print_detailed_heap_transition(true /* full */);
1458     }
1459 
1460     print_heap_after_gc();
1461     trace_heap_after_gc(gc_tracer);
1462 
1463     post_full_gc_dump(gc_timer);
1464 
1465     gc_timer->register_gc_end();
1466     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1467   }
1468 
1469   return true;
1470 }
1471 
1472 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1473   // do_collection() will return whether it succeeded in performing
1474   // the GC. Currently, there is no facility on the
1475   // do_full_collection() API to notify the caller than the collection
1476   // did not succeed (e.g., because it was locked out by the GC
1477   // locker). So, right now, we'll ignore the return value.
1478   bool dummy = do_collection(true,                /* explicit_gc */
1479                              clear_all_soft_refs,
1480                              0                    /* word_size */);
1481 }
1482 
1483 // This code is mostly copied from TenuredGeneration.
1484 void
1485 G1CollectedHeap::
1486 resize_if_necessary_after_full_collection(size_t word_size) {
1487   // Include the current allocation, if any, and bytes that will be
1488   // pre-allocated to support collections, as "used".
1489   const size_t used_after_gc = used();
1490   const size_t capacity_after_gc = capacity();
1491   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1492 
1493   // This is enforced in arguments.cpp.
1494   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1495          "otherwise the code below doesn't make sense");
1496 
1497   // We don't have floating point command-line arguments
1498   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1499   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1500   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1501   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1502 
1503   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1504   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1505 
1506   // We have to be careful here as these two calculations can overflow
1507   // 32-bit size_t's.
1508   double used_after_gc_d = (double) used_after_gc;


1580     attempt_allocation_at_safepoint(word_size,
1581                                     context,
1582                                     expect_null_mutator_alloc_region);
1583   if (result != NULL) {
1584     assert(*gc_succeeded, "sanity");
1585     return result;
1586   }
1587 
1588   // In a G1 heap, we're supposed to keep allocation from failing by
1589   // incremental pauses.  Therefore, at least for now, we'll favor
1590   // expansion over collection.  (This might change in the future if we can
1591   // do something smarter than full collection to satisfy a failed alloc.)
1592   result = expand_and_allocate(word_size, context);
1593   if (result != NULL) {
1594     assert(*gc_succeeded, "sanity");
1595     return result;
1596   }
1597 
1598   if (do_gc) {
1599     // Expansion didn't work, we'll try to do a Full GC.
1600     *gc_succeeded = do_collection(false, /* explicit_gc */
1601                                   clear_all_soft_refs,
1602                                   word_size);
1603   }
1604 
1605   return NULL;
1606 }
1607 
1608 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1609                                                      AllocationContext_t context,
1610                                                      bool* succeeded) {
1611   assert_at_safepoint(true /* should_be_vm_thread */);
1612 
1613   // Attempts to allocate followed by Full GC.
1614   HeapWord* result =
1615     satisfy_failed_allocation_helper(word_size,
1616                                      context,
1617                                      true,  /* do_gc */
1618                                      false, /* clear_all_soft_refs */
1619                                      false, /* expect_null_mutator_alloc_region */
1620                                      succeeded);
1621 
1622   if (result != NULL || !*succeeded) {




1184       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1185     } else if (hr->is_archive()) {
1186       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1187     } else if (hr->is_old()) {
1188       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1189     } else {
1190       ShouldNotReachHere();
1191     }
1192     return false;
1193   }
1194 
1195   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1196     : _hr_printer(hr_printer) { }
1197 };
1198 
1199 void G1CollectedHeap::print_hrm_post_compaction() {
1200   PostCompactionPrinterClosure cl(hr_printer());
1201   heap_region_iterate(&cl);
1202 }
1203 
1204 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1205                                          bool clear_all_soft_refs) {

1206   assert_at_safepoint(true /* should_be_vm_thread */);
1207 
1208   if (GC_locker::check_active_before_gc()) {
1209     return false;
1210   }
1211 
1212   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1213   gc_timer->register_gc_start();
1214 
1215   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1216   GCIdMark gc_id_mark;
1217   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1218 
1219   SvcGCMarker sgcm(SvcGCMarker::FULL);
1220   ResourceMark rm;
1221 
1222   G1Log::update_level();
1223   print_heap_before_gc();
1224   trace_heap_before_gc(gc_tracer);
1225 


1343       ref_processor_stw()->verify_no_references_recorded();
1344 
1345       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1346       ClassLoaderDataGraph::purge();
1347       MetaspaceAux::verify_metrics();
1348 
1349       // Note: since we've just done a full GC, concurrent
1350       // marking is no longer active. Therefore we need not
1351       // re-enable reference discovery for the CM ref processor.
1352       // That will be done at the start of the next marking cycle.
1353       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1354       ref_processor_cm()->verify_no_references_recorded();
1355 
1356       reset_gc_time_stamp();
1357       // Since everything potentially moved, we will clear all remembered
1358       // sets, and clear all cards.  Later we will rebuild remembered
1359       // sets. We will also reset the GC time stamps of the regions.
1360       clear_rsets_post_compaction();
1361       check_gc_time_stamps();
1362 
1363       resize_if_necessary_after_full_collection();

1364 
1365       if (_hr_printer.is_active()) {
1366         // We should do this after we potentially resize the heap so
1367         // that all the COMMIT / UNCOMMIT events are generated before
1368         // the end GC event.
1369 
1370         print_hrm_post_compaction();
1371         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1372       }
1373 
1374       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1375       if (hot_card_cache->use_cache()) {
1376         hot_card_cache->reset_card_counts();
1377         hot_card_cache->reset_hot_cache();
1378       }
1379 
1380       // Rebuild remembered sets of all regions.
1381       uint n_workers =
1382         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1383                                                 workers()->active_workers(),


1451       gc_epilogue(true);
1452     }
1453 
1454     if (G1Log::finer()) {
1455       g1_policy()->print_detailed_heap_transition(true /* full */);
1456     }
1457 
1458     print_heap_after_gc();
1459     trace_heap_after_gc(gc_tracer);
1460 
1461     post_full_gc_dump(gc_timer);
1462 
1463     gc_timer->register_gc_end();
1464     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1465   }
1466 
1467   return true;
1468 }
1469 
1470 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1471   // Currently, there is no facility in the do_full_collection(bool) API to notify
1472   // the caller that the collection did not succeed (e.g., because it was locked
1473   // out by the GC locker). So, right now, we'll ignore the return value.
1474   bool dummy = do_full_collection(true,                /* explicit_gc */
1475                                   clear_all_soft_refs);



1476 }
1477 
1478 // This code is mostly copied from CardGeneration::compute_new_size.
1479 void
1480 G1CollectedHeap::
1481 resize_if_necessary_after_full_collection() {
1482   // Include bytes that will be pre-allocated to support collections, as "used".

1483   const size_t used_after_gc = used();
1484   const size_t capacity_after_gc = capacity();
1485   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1486 
1487   // This is enforced in arguments.cpp.
1488   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1489          "otherwise the code below doesn't make sense");
1490 
1491   // We don't have floating point command-line arguments
1492   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1493   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1494   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1495   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1496 
1497   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1498   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1499 
1500   // We have to be careful here as these two calculations can overflow
1501   // 32-bit size_t's.
1502   double used_after_gc_d = (double) used_after_gc;


1574     attempt_allocation_at_safepoint(word_size,
1575                                     context,
1576                                     expect_null_mutator_alloc_region);
1577   if (result != NULL) {
1578     assert(*gc_succeeded, "sanity");
1579     return result;
1580   }
1581 
1582   // In a G1 heap, we're supposed to keep allocation from failing by
1583   // incremental pauses.  Therefore, at least for now, we'll favor
1584   // expansion over collection.  (This might change in the future if we can
1585   // do something smarter than full collection to satisfy a failed alloc.)
1586   result = expand_and_allocate(word_size, context);
1587   if (result != NULL) {
1588     assert(*gc_succeeded, "sanity");
1589     return result;
1590   }
1591 
1592   if (do_gc) {
1593     // Expansion didn't work, we'll try to do a Full GC.
1594     *gc_succeeded = do_full_collection(false, /* explicit_gc */
1595                                        clear_all_soft_refs);

1596   }
1597 
1598   return NULL;
1599 }
1600 
1601 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1602                                                      AllocationContext_t context,
1603                                                      bool* succeeded) {
1604   assert_at_safepoint(true /* should_be_vm_thread */);
1605 
1606   // Attempts to allocate followed by Full GC.
1607   HeapWord* result =
1608     satisfy_failed_allocation_helper(word_size,
1609                                      context,
1610                                      true,  /* do_gc */
1611                                      false, /* clear_all_soft_refs */
1612                                      false, /* expect_null_mutator_alloc_region */
1613                                      succeeded);
1614 
1615   if (result != NULL || !*succeeded) {


< prev index next >