< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc/g1/bufferingOopClosure.hpp"
  31 #include "gc/g1/concurrentG1Refine.hpp"
  32 #include "gc/g1/concurrentG1RefineThread.hpp"
  33 #include "gc/g1/concurrentMarkThread.inline.hpp"
  34 #include "gc/g1/g1Allocator.inline.hpp"
  35 #include "gc/g1/g1CollectedHeap.inline.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ErgoVerbose.hpp"
  39 #include "gc/g1/g1EvacStats.inline.hpp"
  40 #include "gc/g1/g1GCPhaseTimes.hpp"
  41 #include "gc/g1/g1Log.hpp"
  42 #include "gc/g1/g1MarkSweep.hpp"
  43 #include "gc/g1/g1OopClosures.inline.hpp"
  44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  46 #include "gc/g1/g1RemSet.inline.hpp"
  47 #include "gc/g1/g1RootClosures.hpp"
  48 #include "gc/g1/g1RootProcessor.hpp"
  49 #include "gc/g1/g1StringDedup.hpp"
  50 #include "gc/g1/g1YCTypes.hpp"
  51 #include "gc/g1/heapRegion.inline.hpp"
  52 #include "gc/g1/heapRegionRemSet.hpp"
  53 #include "gc/g1/heapRegionSet.inline.hpp"
  54 #include "gc/g1/suspendibleThreadSet.hpp"
  55 #include "gc/g1/vm_operations_g1.hpp"
  56 #include "gc/shared/gcHeapSummary.hpp"
  57 #include "gc/shared/gcId.hpp"
  58 #include "gc/shared/gcLocker.inline.hpp"
  59 #include "gc/shared/gcTimer.hpp"
  60 #include "gc/shared/gcTrace.hpp"
  61 #include "gc/shared/gcTraceTime.hpp"
  62 #include "gc/shared/generationSpec.hpp"
  63 #include "gc/shared/isGCActiveMark.hpp"
  64 #include "gc/shared/referenceProcessor.hpp"
  65 #include "gc/shared/taskqueue.inline.hpp"

  66 #include "memory/allocation.hpp"
  67 #include "memory/iterator.hpp"
  68 #include "oops/oop.inline.hpp"
  69 #include "runtime/atomic.inline.hpp"
  70 #include "runtime/init.hpp"
  71 #include "runtime/orderAccess.inline.hpp"
  72 #include "runtime/vmThread.hpp"
  73 #include "utilities/globalDefinitions.hpp"
  74 #include "utilities/stack.inline.hpp"
  75 
  76 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  77 
  78 // INVARIANTS/NOTES
  79 //
  80 // All allocation activity covered by the G1CollectedHeap interface is
  81 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  82 // and allocate_new_tlab, which are the "entry" points to the
  83 // allocation code from the rest of the JVM.  (Note that this does not
  84 // apply to TLAB allocation, which is not part of this interface: it
  85 // is done by clients of this interface.)


 206   } while (hr != head);
 207   assert(hr != NULL, "invariant");
 208   hr->set_next_dirty_cards_region(NULL);
 209   return hr;
 210 }
 211 
 212 // Returns true if the reference points to an object that
 213 // can move in an incremental collection.
 214 bool G1CollectedHeap::is_scavengable(const void* p) {
 215   HeapRegion* hr = heap_region_containing(p);
 216   return !hr->is_pinned();
 217 }
 218 
 219 // Private methods.
 220 
 221 HeapRegion*
 222 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 223   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 224   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 225     if (!_secondary_free_list.is_empty()) {
 226       if (G1ConcRegionFreeingVerbose) {
 227         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 228                                "secondary_free_list has %u entries",
 229                                _secondary_free_list.length());
 230       }
 231       // It looks as if there are free regions available on the
 232       // secondary_free_list. Let's move them to the free_list and try
 233       // again to allocate from it.
 234       append_secondary_free_list();
 235 
 236       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 237              "empty we should have moved at least one entry to the free_list");
 238       HeapRegion* res = _hrm.allocate_free_region(is_old);
 239       if (G1ConcRegionFreeingVerbose) {
 240         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 241                                "allocated " HR_FORMAT " from secondary_free_list",
 242                                HR_FORMAT_PARAMS(res));
 243       }
 244       return res;
 245     }
 246 
 247     // Wait here until we get notified either when (a) there are no
 248     // more free regions coming or (b) some regions have been moved on
 249     // the secondary_free_list.
 250     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 251   }
 252 
 253   if (G1ConcRegionFreeingVerbose) {
 254     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 255                            "could not allocate from secondary_free_list");
 256   }
 257   return NULL;
 258 }
 259 
 260 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 261   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 262          "the only time we use this to allocate a humongous region is "
 263          "when we are allocating a single humongous region");
 264 
 265   HeapRegion* res;
 266   if (G1StressConcRegionFreeing) {
 267     if (!_secondary_free_list.is_empty()) {
 268       if (G1ConcRegionFreeingVerbose) {
 269         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 270                                "forced to look at the secondary_free_list");
 271       }
 272       res = new_region_try_secondary_free_list(is_old);
 273       if (res != NULL) {
 274         return res;
 275       }
 276     }
 277   }
 278 
 279   res = _hrm.allocate_free_region(is_old);
 280 
 281   if (res == NULL) {
 282     if (G1ConcRegionFreeingVerbose) {
 283       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 284                              "res == NULL, trying the secondary_free_list");
 285     }
 286     res = new_region_try_secondary_free_list(is_old);
 287   }
 288   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 289     // Currently, only attempts to allocate GC alloc regions set
 290     // do_expand to true. So, we should only reach here during a
 291     // safepoint. If this assumption changes we might have to
 292     // reconsider the use of _expand_heap_after_alloc_failure.
 293     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 294 
 295     ergo_verbose1(ErgoHeapSizing,
 296                   "attempt heap expansion",
 297                   ergo_format_reason("region allocation request failed")
 298                   ergo_format_byte("allocation request"),
 299                   word_size * HeapWordSize);

 300     if (expand(word_size * HeapWordSize)) {
 301       // Given that expand() succeeded in expanding the heap, and we
 302       // always expand the heap by an amount aligned to the heap
 303       // region size, the free list should in theory not be empty.
 304       // In either case allocate_free_region() will check for NULL.
 305       res = _hrm.allocate_free_region(is_old);
 306     } else {
 307       _expand_heap_after_alloc_failure = false;
 308     }
 309   }
 310   return res;
 311 }
 312 
 313 HeapWord*
 314 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 315                                                            uint num_regions,
 316                                                            size_t word_size,
 317                                                            AllocationContext_t context) {
 318   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 319   assert(is_humongous(word_size), "word_size should be humongous");


 467     // potentially waits for regions from the secondary free list.
 468     wait_while_free_regions_coming();
 469     append_secondary_free_list_if_not_empty_with_lock();
 470 
 471     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 472     // are lucky enough to find some.
 473     first = _hrm.find_contiguous_only_empty(obj_regions);
 474     if (first != G1_NO_HRM_INDEX) {
 475       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 476     }
 477   }
 478 
 479   if (first == G1_NO_HRM_INDEX) {
 480     // Policy: We could not find enough regions for the humongous object in the
 481     // free list. Look through the heap to find a mix of free and uncommitted regions.
 482     // If so, try expansion.
 483     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 484     if (first != G1_NO_HRM_INDEX) {
 485       // We found something. Make sure these regions are committed, i.e. expand
 486       // the heap. Alternatively we could do a defragmentation GC.
 487       ergo_verbose1(ErgoHeapSizing,
 488                     "attempt heap expansion",
 489                     ergo_format_reason("humongous allocation request failed")
 490                     ergo_format_byte("allocation request"),
 491                     word_size * HeapWordSize);
 492 

 493       _hrm.expand_at(first, obj_regions);
 494       g1_policy()->record_new_heap_size(num_regions());
 495 
 496 #ifdef ASSERT
 497       for (uint i = first; i < first + obj_regions; ++i) {
 498         HeapRegion* hr = region_at(i);
 499         assert(hr->is_free(), "sanity");
 500         assert(hr->is_empty(), "sanity");
 501         assert(is_on_master_free_list(hr), "sanity");
 502       }
 503 #endif
 504       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 505     } else {
 506       // Policy: Potentially trigger a defragmentation GC.
 507     }
 508   }
 509 
 510   HeapWord* result = NULL;
 511   if (first != G1_NO_HRM_INDEX) {
 512     result = humongous_obj_allocate_initialize_regions(first, obj_regions,


 790     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 791     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 792       start_address = start_region->end();
 793       if (start_address > last_address) {
 794         increase_used(word_size * HeapWordSize);
 795         start_region->set_top(last_address + 1);
 796         continue;
 797       }
 798       start_region->set_top(start_address);
 799       curr_range = MemRegion(start_address, last_address + 1);
 800       start_region = _hrm.addr_to_region(start_address);
 801     }
 802 
 803     // Perform the actual region allocation, exiting if it fails.
 804     // Then note how much new space we have allocated.
 805     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 806       return false;
 807     }
 808     increase_used(word_size * HeapWordSize);
 809     if (commits != 0) {
 810       ergo_verbose1(ErgoHeapSizing,
 811                     "attempt heap expansion",
 812                     ergo_format_reason("allocate archive regions")
 813                     ergo_format_byte("total size"),
 814                     HeapRegion::GrainWords * HeapWordSize * commits);

 815     }
 816 
 817     // Mark each G1 region touched by the range as archive, add it to the old set,
 818     // and set the allocation context and top.
 819     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 820     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 821     prev_last_region = last_region;
 822 
 823     while (curr_region != NULL) {
 824       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 825              "Region already in use (index %u)", curr_region->hrm_index());
 826       _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
 827       curr_region->set_allocation_context(AllocationContext::system());
 828       curr_region->set_archive();
 829       _old_set.add(curr_region);
 830       if (curr_region != last_region) {
 831         curr_region->set_top(curr_region->end());
 832         curr_region = _hrm.next_region_in_heap(curr_region);
 833       } else {
 834         curr_region->set_top(last_address + 1);


 975       guarantee(curr_region->is_archive(),
 976                 "Expected archive region at index %u", curr_region->hrm_index());
 977       uint curr_index = curr_region->hrm_index();
 978       _old_set.remove(curr_region);
 979       curr_region->set_free();
 980       curr_region->set_top(curr_region->bottom());
 981       if (curr_region != last_region) {
 982         curr_region = _hrm.next_region_in_heap(curr_region);
 983       } else {
 984         curr_region = NULL;
 985       }
 986       _hrm.shrink_at(curr_index, 1);
 987       uncommitted_regions++;
 988     }
 989 
 990     // Notify mark-sweep that this is no longer an archive range.
 991     G1MarkSweep::set_range_archive(ranges[i], false);
 992   }
 993 
 994   if (uncommitted_regions != 0) {
 995     ergo_verbose1(ErgoHeapSizing,
 996                   "attempt heap shrinking",
 997                   ergo_format_reason("uncommitted archive regions")
 998                   ergo_format_byte("total size"),
 999                   HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
1000   }
1001   decrease_used(size_used);
1002 }
1003 
1004 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1005                                                         uint* gc_count_before_ret,
1006                                                         uint* gclocker_retry_count_ret) {
1007   // The structure of this method has a lot of similarities to
1008   // attempt_allocation_slow(). The reason these two were not merged
1009   // into a single one is that such a method would require several "if
1010   // allocation is not humongous do this, otherwise do that"
1011   // conditional paths which would obscure its flow. In fact, an early
1012   // version of this code did use a unified method which was harder to
1013   // follow and, as a result, it had subtle bugs that were hard to
1014   // track down. So keeping these two methods separate allows each to
1015   // be more readable. It will be good to keep these two in sync as
1016   // much as possible.
1017 
1018   assert_heap_not_locked_and_not_at_safepoint();


1218       // We only generate output for non-empty regions.
1219     } else if (hr->is_starts_humongous()) {
1220       _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1221     } else if (hr->is_continues_humongous()) {
1222       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1223     } else if (hr->is_archive()) {
1224       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1225     } else if (hr->is_old()) {
1226       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1227     } else {
1228       ShouldNotReachHere();
1229     }
1230     return false;
1231   }
1232 
1233   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1234     : _hr_printer(hr_printer) { }
1235 };
1236 
1237 void G1CollectedHeap::print_hrm_post_compaction() {

1238   PostCompactionPrinterClosure cl(hr_printer());
1239   heap_region_iterate(&cl);


1240 }
1241 
1242 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1243                                          bool clear_all_soft_refs) {
1244   assert_at_safepoint(true /* should_be_vm_thread */);
1245 
1246   if (GC_locker::check_active_before_gc()) {
1247     return false;
1248   }
1249 
1250   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1251   gc_timer->register_gc_start();
1252 
1253   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1254   GCIdMark gc_id_mark;
1255   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1256 
1257   SvcGCMarker sgcm(SvcGCMarker::FULL);
1258   ResourceMark rm;
1259 
1260   G1Log::update_level();
1261   print_heap_before_gc();
1262   trace_heap_before_gc(gc_tracer);
1263 
1264   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1265 
1266   verify_region_sets_optional();
1267 
1268   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1269                            collector_policy()->should_clear_all_soft_refs();
1270 
1271   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1272 
1273   {
1274     IsGCActiveMark x;
1275 
1276     // Timing
1277     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1278     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1279 
1280     {
1281       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1282       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1283       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1284 
1285       g1_policy()->record_full_collection_start();
1286 
1287       // Note: When we have a more flexible GC logging framework that
1288       // allows us to add optional attributes to a GC log record we
1289       // could consider timing and reporting how long we wait in the
1290       // following two methods.
1291       wait_while_free_regions_coming();
1292       // If we start the compaction before the CM threads finish
1293       // scanning the root regions we might trip them over as we'll
1294       // be moving objects / updating references. So let's wait until
1295       // they are done. By telling them to abort, they should complete
1296       // early.
1297       _cm->root_regions()->abort();
1298       _cm->root_regions()->wait_until_scan_finished();
1299       append_secondary_free_list_if_not_empty_with_lock();
1300 
1301       gc_prologue(true);


1312 #if defined(COMPILER2) || INCLUDE_JVMCI
1313       DerivedPointerTable::clear();
1314 #endif
1315 
1316       // Disable discovery and empty the discovered lists
1317       // for the CM ref processor.
1318       ref_processor_cm()->disable_discovery();
1319       ref_processor_cm()->abandon_partial_discovery();
1320       ref_processor_cm()->verify_no_references_recorded();
1321 
1322       // Abandon current iterations of concurrent marking and concurrent
1323       // refinement, if any are in progress. We have to do this before
1324       // wait_until_scan_finished() below.
1325       concurrent_mark()->abort();
1326 
1327       // Make sure we'll choose a new allocation region afterwards.
1328       _allocator->release_mutator_alloc_region();
1329       _allocator->abandon_gc_alloc_regions();
1330       g1_rem_set()->cleanupHRRS();
1331 
1332       // We should call this after we retire any currently active alloc
1333       // regions so that all the ALLOC / RETIRE events are generated
1334       // before the start GC event.
1335       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1336 
1337       // We may have added regions to the current incremental collection
1338       // set between the last GC or pause and now. We need to clear the
1339       // incremental collection set and then start rebuilding it afresh
1340       // after this full GC.
1341       abandon_collection_set(g1_policy()->inc_cset_head());
1342       g1_policy()->clear_incremental_cset();
1343       g1_policy()->stop_incremental_cset_building();
1344 
1345       tear_down_region_sets(false /* free_list_only */);
1346       collector_state()->set_gcs_are_young(true);
1347 
1348       // See the comments in g1CollectedHeap.hpp and
1349       // G1CollectedHeap::ref_processing_init() about
1350       // how reference processing currently works in G1.
1351 
1352       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1353       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1354 
1355       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1356       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);


1383       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1384       ClassLoaderDataGraph::purge();
1385       MetaspaceAux::verify_metrics();
1386 
1387       // Note: since we've just done a full GC, concurrent
1388       // marking is no longer active. Therefore we need not
1389       // re-enable reference discovery for the CM ref processor.
1390       // That will be done at the start of the next marking cycle.
1391       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1392       ref_processor_cm()->verify_no_references_recorded();
1393 
1394       reset_gc_time_stamp();
1395       // Since everything potentially moved, we will clear all remembered
1396       // sets, and clear all cards.  Later we will rebuild remembered
1397       // sets. We will also reset the GC time stamps of the regions.
1398       clear_rsets_post_compaction();
1399       check_gc_time_stamps();
1400 
1401       resize_if_necessary_after_full_collection();
1402 
1403       if (_hr_printer.is_active()) {
1404         // We should do this after we potentially resize the heap so
1405         // that all the COMMIT / UNCOMMIT events are generated before
1406         // the end GC event.
1407 
1408         print_hrm_post_compaction();
1409         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1410       }
1411 
1412       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1413       if (hot_card_cache->use_cache()) {
1414         hot_card_cache->reset_card_counts();
1415         hot_card_cache->reset_hot_cache();
1416       }
1417 
1418       // Rebuild remembered sets of all regions.
1419       uint n_workers =
1420         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1421                                                 workers()->active_workers(),
1422                                                 Threads::number_of_non_daemon_threads());
1423       workers()->set_active_workers(n_workers);
1424 
1425       ParRebuildRSTask rebuild_rs_task(this);
1426       workers()->run_task(&rebuild_rs_task);
1427 
1428       // Rebuild the strong code root lists for each region
1429       rebuild_strong_code_roots();
1430 


1459       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1460       // objects marked during a full GC against the previous bitmap.
1461       // But we need to clear it before calling check_bitmaps below since
1462       // the full GC has compacted objects and updated TAMS but not updated
1463       // the prev bitmap.
1464       if (G1VerifyBitmaps) {
1465         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1466       }
1467       check_bitmaps("Full GC End");
1468 
1469       // Start a new incremental collection set for the next pause
1470       assert(g1_policy()->collection_set() == NULL, "must be");
1471       g1_policy()->start_incremental_cset_building();
1472 
1473       clear_cset_fast_test();
1474 
1475       _allocator->init_mutator_alloc_region();
1476 
1477       g1_policy()->record_full_collection_end();
1478 
1479       if (G1Log::fine()) {
1480         g1_policy()->print_heap_transition();
1481       }
1482 
1483       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1484       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1485       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1486       // before any GC notifications are raised.
1487       g1mm()->update_sizes();
1488 
1489       gc_epilogue(true);
1490     }
1491 
1492     if (G1Log::finer()) {
1493       g1_policy()->print_detailed_heap_transition(true /* full */);
1494     }
1495 
1496     print_heap_after_gc();
1497     trace_heap_after_gc(gc_tracer);
1498 
1499     post_full_gc_dump(gc_timer);
1500 
1501     gc_timer->register_gc_end();
1502     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1503   }
1504 
1505   return true;
1506 }
1507 
1508 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1509   // Currently, there is no facility in the do_full_collection(bool) API to notify
1510   // the caller that the collection did not succeed (e.g., because it was locked
1511   // out by the GC locker). So, right now, we'll ignore the return value.
1512   bool dummy = do_full_collection(true,                /* explicit_gc */
1513                                   clear_all_soft_refs);
1514 }


1552 
1553   // This assert only makes sense here, before we adjust them
1554   // with respect to the min and max heap size.
1555   assert(minimum_desired_capacity <= maximum_desired_capacity,
1556          "minimum_desired_capacity = " SIZE_FORMAT ", "
1557          "maximum_desired_capacity = " SIZE_FORMAT,
1558          minimum_desired_capacity, maximum_desired_capacity);
1559 
1560   // Should not be greater than the heap max size. No need to adjust
1561   // it with respect to the heap min size as it's a lower bound (i.e.,
1562   // we'll try to make the capacity larger than it, not smaller).
1563   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1564   // Should not be less than the heap min size. No need to adjust it
1565   // with respect to the heap max size as it's an upper bound (i.e.,
1566   // we'll try to make the capacity smaller than it, not greater).
1567   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1568 
1569   if (capacity_after_gc < minimum_desired_capacity) {
1570     // Don't expand unless it's significant
1571     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1572     ergo_verbose4(ErgoHeapSizing,
1573                   "attempt heap expansion",
1574                   ergo_format_reason("capacity lower than "
1575                                      "min desired capacity after Full GC")
1576                   ergo_format_byte("capacity")
1577                   ergo_format_byte("occupancy")
1578                   ergo_format_byte_perc("min desired capacity"),
1579                   capacity_after_gc, used_after_gc,
1580                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1581     expand(expand_bytes);
1582 
1583     // No expansion, now see if we want to shrink
1584   } else if (capacity_after_gc > maximum_desired_capacity) {
1585     // Capacity too large, compute shrinking size
1586     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1587     ergo_verbose4(ErgoHeapSizing,
1588                   "attempt heap shrinking",
1589                   ergo_format_reason("capacity higher than "
1590                                      "max desired capacity after Full GC")
1591                   ergo_format_byte("capacity")
1592                   ergo_format_byte("occupancy")
1593                   ergo_format_byte_perc("max desired capacity"),
1594                   capacity_after_gc, used_after_gc,
1595                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1596     shrink(shrink_bytes);
1597   }
1598 }
1599 
1600 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1601                                                             AllocationContext_t context,
1602                                                             bool do_gc,
1603                                                             bool clear_all_soft_refs,
1604                                                             bool expect_null_mutator_alloc_region,
1605                                                             bool* gc_succeeded) {
1606   *gc_succeeded = true;
1607   // Let's attempt the allocation first.
1608   HeapWord* result =
1609     attempt_allocation_at_safepoint(word_size,
1610                                     context,
1611                                     expect_null_mutator_alloc_region);
1612   if (result != NULL) {
1613     assert(*gc_succeeded, "sanity");
1614     return result;
1615   }


1681 
1682   // What else?  We might try synchronous finalization later.  If the total
1683   // space available is large enough for the allocation, then a more
1684   // complete compaction phase than we've tried so far might be
1685   // appropriate.
1686   assert(*succeeded, "sanity");
1687   return NULL;
1688 }
1689 
1690 // Attempting to expand the heap sufficiently
1691 // to support an allocation of the given "word_size".  If
1692 // successful, perform the allocation and return the address of the
1693 // allocated block, or else "NULL".
1694 
1695 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1696   assert_at_safepoint(true /* should_be_vm_thread */);
1697 
1698   verify_region_sets_optional();
1699 
1700   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1701   ergo_verbose1(ErgoHeapSizing,
1702                 "attempt heap expansion",
1703                 ergo_format_reason("allocation request failed")
1704                 ergo_format_byte("allocation request"),
1705                 word_size * HeapWordSize);


1706   if (expand(expand_bytes)) {
1707     _hrm.verify_optional();
1708     verify_region_sets_optional();
1709     return attempt_allocation_at_safepoint(word_size,
1710                                            context,
1711                                            false /* expect_null_mutator_alloc_region */);
1712   }
1713   return NULL;
1714 }
1715 
1716 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1717   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1718   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1719                                        HeapRegion::GrainBytes);
1720   ergo_verbose2(ErgoHeapSizing,
1721                 "expand the heap",
1722                 ergo_format_byte("requested expansion amount")
1723                 ergo_format_byte("attempted expansion amount"),
1724                 expand_bytes, aligned_expand_bytes);
1725 
1726   if (is_maximal_no_gc()) {
1727     ergo_verbose0(ErgoHeapSizing,
1728                       "did not expand the heap",
1729                       ergo_format_reason("heap already fully expanded"));
1730     return false;
1731   }
1732 
1733   double expand_heap_start_time_sec = os::elapsedTime();
1734   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1735   assert(regions_to_expand > 0, "Must expand by at least one region");
1736 
1737   uint expanded_by = _hrm.expand_by(regions_to_expand);
1738   if (expand_time_ms != NULL) {
1739     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1740   }
1741 
1742   if (expanded_by > 0) {
1743     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1744     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1745     g1_policy()->record_new_heap_size(num_regions());
1746   } else {
1747     ergo_verbose0(ErgoHeapSizing,
1748                   "did not expand the heap",
1749                   ergo_format_reason("heap expansion operation failed"));
1750     // The expansion of the virtual storage space was unsuccessful.
1751     // Let's see if it was because we ran out of swap.
1752     if (G1ExitOnExpansionFailure &&
1753         _hrm.available() >= regions_to_expand) {
1754       // We had head room...
1755       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1756     }
1757   }
1758   return regions_to_expand > 0;
1759 }
1760 
1761 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1762   size_t aligned_shrink_bytes =
1763     ReservedSpace::page_align_size_down(shrink_bytes);
1764   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1765                                          HeapRegion::GrainBytes);
1766   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1767 
1768   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1769   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1770 
1771   ergo_verbose3(ErgoHeapSizing,
1772                 "shrink the heap",
1773                 ergo_format_byte("requested shrinking amount")
1774                 ergo_format_byte("aligned shrinking amount")
1775                 ergo_format_byte("attempted shrinking amount"),
1776                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1777   if (num_regions_removed > 0) {
1778     g1_policy()->record_new_heap_size(num_regions());
1779   } else {
1780     ergo_verbose0(ErgoHeapSizing,
1781                   "did not shrink the heap",
1782                   ergo_format_reason("heap shrinking operation failed"));
1783   }
1784 }
1785 
1786 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1787   verify_region_sets_optional();
1788 
1789   // We should only reach here at the end of a Full GC which means we
1790   // should not not be holding to any GC alloc regions. The method
1791   // below will make sure of that and do any remaining clean up.
1792   _allocator->abandon_gc_alloc_regions();
1793 
1794   // Instead of tearing down / rebuilding the free lists here, we
1795   // could instead use the remove_all_pending() method on free_list to
1796   // remove only the ones that we need to remove.
1797   tear_down_region_sets(true /* free_list_only */);
1798   shrink_helper(shrink_bytes);
1799   rebuild_region_sets(true /* free_list_only */);
1800 
1801   _hrm.verify_optional();
1802   verify_region_sets_optional();


1874   // Initialize the G1EvacuationFailureALot counters and flags.
1875   NOT_PRODUCT(reset_evacuation_should_fail();)
1876 
1877   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1878 }
1879 
1880 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1881                                                                  size_t size,
1882                                                                  size_t translation_factor) {
1883   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1884   // Allocate a new reserved space, preferring to use large pages.
1885   ReservedSpace rs(size, preferred_page_size);
1886   G1RegionToSpaceMapper* result  =
1887     G1RegionToSpaceMapper::create_mapper(rs,
1888                                          size,
1889                                          rs.alignment(),
1890                                          HeapRegion::GrainBytes,
1891                                          translation_factor,
1892                                          mtGC);
1893   if (TracePageSizes) {
1894     gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1895                            description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1896   }
1897   return result;
1898 }
1899 
1900 jint G1CollectedHeap::initialize() {
1901   CollectedHeap::pre_initialize();
1902   os::enable_vtime();
1903 
1904   G1Log::init();
1905 
1906   // Necessary to satisfy locking discipline assertions.
1907 
1908   MutexLocker x(Heap_lock);
1909 
1910   // We have to initialize the printer before committing the heap, as
1911   // it will be used then.
1912   _hr_printer.set_active(G1PrintHeapRegions);
1913 
1914   // While there are no constraints in the GC code that HeapWordSize
1915   // be any particular value, there are multiple other areas in the
1916   // system which believe this to be true (e.g. oop->object_size in some
1917   // cases incorrectly returns the size in wordSize units rather than
1918   // HeapWordSize).
1919   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1920 
1921   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1922   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1923   size_t heap_alignment = collector_policy()->heap_alignment();
1924 
1925   // Ensure that the sizes are properly aligned.
1926   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1927   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1928   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1929 
1930   _refine_cte_cl = new RefineCardTableEntryClosure();
1931 
1932   jint ecode = JNI_OK;
1933   _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);


2086   G1AllocRegion::setup(this, dummy_region);
2087 
2088   _allocator->init_mutator_alloc_region();
2089 
2090   // Do create of the monitoring and management support so that
2091   // values in the heap have been properly initialized.
2092   _g1mm = new G1MonitoringSupport(this);
2093 
2094   G1StringDedup::initialize();
2095 
2096   _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2097   for (uint i = 0; i < ParallelGCThreads; i++) {
2098     new (&_preserved_objs[i]) OopAndMarkOopStack();
2099   }
2100 
2101   return JNI_OK;
2102 }
2103 
2104 void G1CollectedHeap::stop() {
2105   // Stop all concurrent threads. We do this to make sure these threads
2106   // do not continue to execute and access resources (e.g. gclog_or_tty)
2107   // that are destroyed during shutdown.
2108   _cg1r->stop();
2109   _cmThread->stop();
2110   if (G1StringDedup::is_enabled()) {
2111     G1StringDedup::stop();
2112   }
2113 }
2114 
2115 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2116   return HeapRegion::max_region_size();
2117 }
2118 
2119 void G1CollectedHeap::post_initialize() {
2120   CollectedHeap::post_initialize();
2121   ref_processing_init();
2122 }
2123 
2124 void G1CollectedHeap::ref_processing_init() {
2125   // Reference processing in G1 currently works as follows:
2126   //


2203 }
2204 
2205 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2206   hr->reset_gc_time_stamp();
2207 }
2208 
2209 #ifndef PRODUCT
2210 
2211 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2212 private:
2213   unsigned _gc_time_stamp;
2214   bool _failures;
2215 
2216 public:
2217   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2218     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2219 
2220   virtual bool doHeapRegion(HeapRegion* hr) {
2221     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2222     if (_gc_time_stamp != region_gc_time_stamp) {
2223       gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2224                              "expected %d", HR_FORMAT_PARAMS(hr),
2225                              region_gc_time_stamp, _gc_time_stamp);
2226       _failures = true;
2227     }
2228     return false;
2229   }
2230 
2231   bool failures() { return _failures; }
2232 };
2233 
2234 void G1CollectedHeap::check_gc_time_stamps() {
2235   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2236   heap_region_iterate(&cl);
2237   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2238 }
2239 #endif // PRODUCT
2240 
2241 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2242   _cg1r->hot_card_cache()->drain(cl, worker_i);
2243 }
2244 


2792 private:
2793   G1CollectedHeap* _g1h;
2794   VerifyOption     _vo;
2795   bool             _failures;
2796 public:
2797   // _vo == UsePrevMarking -> use "prev" marking information,
2798   // _vo == UseNextMarking -> use "next" marking information,
2799   // _vo == UseMarkWord    -> use mark word from object header.
2800   VerifyRootsClosure(VerifyOption vo) :
2801     _g1h(G1CollectedHeap::heap()),
2802     _vo(vo),
2803     _failures(false) { }
2804 
2805   bool failures() { return _failures; }
2806 
2807   template <class T> void do_oop_nv(T* p) {
2808     T heap_oop = oopDesc::load_heap_oop(p);
2809     if (!oopDesc::is_null(heap_oop)) {
2810       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2811       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2812         gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
2813                                "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2814         if (_vo == VerifyOption_G1UseMarkWord) {
2815           gclog_or_tty->print_cr("  Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2816         }
2817         obj->print_on(gclog_or_tty);

2818         _failures = true;
2819       }
2820     }
2821   }
2822 
2823   void do_oop(oop* p)       { do_oop_nv(p); }
2824   void do_oop(narrowOop* p) { do_oop_nv(p); }
2825 };
2826 
2827 class G1VerifyCodeRootOopClosure: public OopClosure {
2828   G1CollectedHeap* _g1h;
2829   OopClosure* _root_cl;
2830   nmethod* _nm;
2831   VerifyOption _vo;
2832   bool _failures;
2833 
2834   template <class T> void do_oop_work(T* p) {
2835     // First verify that this root is live
2836     _root_cl->do_oop(p);
2837 


2842 
2843     // Don't check the code roots during marking verification in a full GC
2844     if (_vo == VerifyOption_G1UseMarkWord) {
2845       return;
2846     }
2847 
2848     // Now verify that the current nmethod (which contains p) is
2849     // in the code root list of the heap region containing the
2850     // object referenced by p.
2851 
2852     T heap_oop = oopDesc::load_heap_oop(p);
2853     if (!oopDesc::is_null(heap_oop)) {
2854       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2855 
2856       // Now fetch the region containing the object
2857       HeapRegion* hr = _g1h->heap_region_containing(obj);
2858       HeapRegionRemSet* hrrs = hr->rem_set();
2859       // Verify that the strong code root list for this region
2860       // contains the nmethod
2861       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2862         gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
2863                                "from nmethod " PTR_FORMAT " not in strong "
2864                                "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2865                                p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2866         _failures = true;
2867       }
2868     }
2869   }
2870 
2871 public:
2872   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2873     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2874 
2875   void do_oop(oop* p) { do_oop_work(p); }
2876   void do_oop(narrowOop* p) { do_oop_work(p); }
2877 
2878   void set_nmethod(nmethod* nm) { _nm = nm; }
2879   bool failures() { return _failures; }
2880 };
2881 
2882 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {


3023   }
3024 
3025   bool doHeapRegion(HeapRegion* r) {
3026     // For archive regions, verify there are no heap pointers to
3027     // non-pinned regions. For all others, verify liveness info.
3028     if (r->is_archive()) {
3029       VerifyArchiveRegionClosure verify_oop_pointers(r);
3030       r->object_iterate(&verify_oop_pointers);
3031       return true;
3032     }
3033     if (!r->is_continues_humongous()) {
3034       bool failures = false;
3035       r->verify(_vo, &failures);
3036       if (failures) {
3037         _failures = true;
3038       } else if (!r->is_starts_humongous()) {
3039         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3040         r->object_iterate(&not_dead_yet_cl);
3041         if (_vo != VerifyOption_G1UseNextMarking) {
3042           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3043             gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3044                                    "max_live_bytes " SIZE_FORMAT " "
3045                                    "< calculated " SIZE_FORMAT,
3046                                    p2i(r->bottom()), p2i(r->end()),
3047                                    r->max_live_bytes(),
3048                                  not_dead_yet_cl.live_bytes());
3049             _failures = true;
3050           }
3051         } else {
3052           // When vo == UseNextMarking we cannot currently do a sanity
3053           // check on the live bytes as the calculation has not been
3054           // finalized yet.
3055         }
3056       }
3057     }
3058     return false; // stop the region iteration if we hit a failure
3059   }
3060 };
3061 
3062 // This is the task used for parallel verification of the heap regions
3063 
3064 class G1ParVerifyTask: public AbstractGangTask {
3065 private:
3066   G1CollectedHeap*  _g1h;
3067   VerifyOption      _vo;
3068   bool              _failures;


3076       AbstractGangTask("Parallel verify task"),
3077       _g1h(g1h),
3078       _vo(vo),
3079       _failures(false),
3080       _hrclaimer(g1h->workers()->active_workers()) {}
3081 
3082   bool failures() {
3083     return _failures;
3084   }
3085 
3086   void work(uint worker_id) {
3087     HandleMark hm;
3088     VerifyRegionClosure blk(true, _vo);
3089     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3090     if (blk.failures()) {
3091       _failures = true;
3092     }
3093   }
3094 };
3095 
3096 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3097   if (SafepointSynchronize::is_at_safepoint()) {



3098     assert(Thread::current()->is_VM_thread(),
3099            "Expected to be executed serially by the VM thread at this point");
3100 
3101     if (!silent) { gclog_or_tty->print("Roots "); }
3102     VerifyRootsClosure rootsCl(vo);
3103     VerifyKlassClosure klassCl(this, &rootsCl);
3104     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3105 
3106     // We apply the relevant closures to all the oops in the
3107     // system dictionary, class loader data graph, the string table
3108     // and the nmethods in the code cache.
3109     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3110     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3111 
3112     {
3113       G1RootProcessor root_processor(this, 1);
3114       root_processor.process_all_roots(&rootsCl,
3115                                        &cldCl,
3116                                        &blobsCl);
3117     }
3118 
3119     bool failures = rootsCl.failures() || codeRootsCl.failures();
3120 
3121     if (vo != VerifyOption_G1UseMarkWord) {
3122       // If we're verifying during a full GC then the region sets
3123       // will have been torn down at the start of the GC. Therefore
3124       // verifying the region sets will fail. So we only verify
3125       // the region sets when not in a full GC.
3126       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3127       verify_region_sets();
3128     }
3129 
3130     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3131     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3132 
3133       G1ParVerifyTask task(this, vo);
3134       workers()->run_task(&task);
3135       if (task.failures()) {
3136         failures = true;
3137       }
3138 
3139     } else {
3140       VerifyRegionClosure blk(false, vo);
3141       heap_region_iterate(&blk);
3142       if (blk.failures()) {
3143         failures = true;
3144       }
3145     }
3146 
3147     if (G1StringDedup::is_enabled()) {
3148       if (!silent) gclog_or_tty->print("StrDedup ");
3149       G1StringDedup::verify();
3150     }
3151 
3152     if (failures) {
3153       gclog_or_tty->print_cr("Heap:");
3154       // It helps to have the per-region information in the output to
3155       // help us track down what went wrong. This is why we call
3156       // print_extended_on() instead of print_on().
3157       print_extended_on(gclog_or_tty);
3158       gclog_or_tty->cr();
3159       gclog_or_tty->flush();
3160     }
3161     guarantee(!failures, "there should not have been any failures");
3162   } else {
3163     if (!silent) {
3164       gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3165       if (G1StringDedup::is_enabled()) {
3166         gclog_or_tty->print(", StrDedup");
3167       }
3168       gclog_or_tty->print(") ");
3169     }
3170   }
3171 }
3172 
3173 void G1CollectedHeap::verify(bool silent) {
3174   verify(silent, VerifyOption_G1UsePrevMarking);
3175 }
3176 
3177 double G1CollectedHeap::verify(bool guard, const char* msg) {
3178   double verify_time_ms = 0.0;
3179 
3180   if (guard && total_collections() >= VerifyGCStartAt) {
3181     double verify_start = os::elapsedTime();
3182     HandleMark hm;  // Discard invalid handles created during verification
3183     prepare_for_verify();
3184     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3185     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3186   }
3187 
3188   return verify_time_ms;
3189 }
3190 
3191 void G1CollectedHeap::verify_before_gc() {
3192   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3193   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3194 }
3195 
3196 void G1CollectedHeap::verify_after_gc() {
3197   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3198   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3199 }
3200 
3201 class PrintRegionClosure: public HeapRegionClosure {
3202   outputStream* _st;
3203 public:
3204   PrintRegionClosure(outputStream* st) : _st(st) {}
3205   bool doHeapRegion(HeapRegion* r) {
3206     r->print_on(_st);
3207     return false;
3208   }
3209 };
3210 
3211 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3212                                        const HeapRegion* hr,
3213                                        const VerifyOption vo) const {
3214   switch (vo) {
3215   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3216   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3217   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();


3287     G1StringDedup::print_worker_threads_on(st);
3288   }
3289 }
3290 
3291 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3292   workers()->threads_do(tc);
3293   tc->do_thread(_cmThread);
3294   _cg1r->threads_do(tc);
3295   if (G1StringDedup::is_enabled()) {
3296     G1StringDedup::threads_do(tc);
3297   }
3298 }
3299 
3300 void G1CollectedHeap::print_tracing_info() const {
3301   // We'll overload this to mean "trace GC pause statistics."
3302   if (TraceYoungGenTime || TraceOldGenTime) {
3303     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3304     // to that.
3305     g1_policy()->print_tracing_info();
3306   }
3307   if (G1SummarizeRSetStats) {
3308     g1_rem_set()->print_summary_info();
3309   }
3310   if (G1SummarizeConcMark) {
3311     concurrent_mark()->print_summary_info();
3312   }
3313   g1_policy()->print_yg_surv_rate_info();
3314 }
3315 
3316 #ifndef PRODUCT
3317 // Helpful for debugging RSet issues.
3318 
3319 class PrintRSetsClosure : public HeapRegionClosure {
3320 private:
3321   const char* _msg;
3322   size_t _occupied_sum;
3323 
3324 public:
3325   bool doHeapRegion(HeapRegion* r) {
3326     HeapRegionRemSet* hrrs = r->rem_set();
3327     size_t occupied = hrrs->occupied();
3328     _occupied_sum += occupied;
3329 
3330     gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3331                            HR_FORMAT_PARAMS(r));
3332     if (occupied == 0) {
3333       gclog_or_tty->print_cr("  RSet is empty");
3334     } else {
3335       hrrs->print();
3336     }
3337     gclog_or_tty->print_cr("----------");
3338     return false;
3339   }
3340 
3341   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3342     gclog_or_tty->cr();
3343     gclog_or_tty->print_cr("========================================");
3344     gclog_or_tty->print_cr("%s", msg);
3345     gclog_or_tty->cr();
3346   }
3347 
3348   ~PrintRSetsClosure() {
3349     gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3350     gclog_or_tty->print_cr("========================================");
3351     gclog_or_tty->cr();
3352   }
3353 };
3354 
3355 void G1CollectedHeap::print_cset_rsets() {
3356   PrintRSetsClosure cl("Printing CSet RSets");
3357   collection_set_iterate(&cl);
3358 }
3359 
3360 void G1CollectedHeap::print_all_rsets() {
3361   PrintRSetsClosure cl("Printing All RSets");;
3362   heap_region_iterate(&cl);
3363 }
3364 #endif // PRODUCT
3365 
3366 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3367   YoungList* young_list = heap()->young_list();
3368 
3369   size_t eden_used_bytes = young_list->eden_used_bytes();
3370   size_t survivor_used_bytes = young_list->survivor_used_bytes();
3371 


3389 
3390   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3391   gc_tracer->report_metaspace_summary(when, metaspace_summary);
3392 }
3393 
3394 
3395 G1CollectedHeap* G1CollectedHeap::heap() {
3396   CollectedHeap* heap = Universe::heap();
3397   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3398   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3399   return (G1CollectedHeap*)heap;
3400 }
3401 
3402 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3403   // always_do_update_barrier = false;
3404   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3405   // Fill TLAB's and such
3406   accumulate_statistics_all_tlabs();
3407   ensure_parsability(true);
3408 
3409   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3410       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3411     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3412   }
3413 }
3414 
3415 void G1CollectedHeap::gc_epilogue(bool full) {
3416 
3417   if (G1SummarizeRSetStats &&
3418       (G1SummarizeRSetStatsPeriod > 0) &&
3419       // we are at the end of the GC. Total collections has already been increased.
3420       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3421     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3422   }
3423 
3424   // FIXME: what is this about?
3425   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3426   // is set.
3427 #if defined(COMPILER2) || INCLUDE_JVMCI
3428   assert(DerivedPointerTable::is_empty(), "derived pointer present");
3429 #endif
3430   // always_do_update_barrier = true;
3431 
3432   resize_all_tlabs();
3433   allocation_context_stats().update(full);
3434 
3435   // We have just completed a GC. Update the soft reference
3436   // policy with the new heap occupancy
3437   Universe::update_heap_info_at_gc();
3438 }
3439 
3440 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3441                                                uint gc_count_before,
3442                                                bool* succeeded,


3648     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3649 
3650     // Here's a good place to add any other checks we'd like to
3651     // perform on CSet regions.
3652     return false;
3653   }
3654 };
3655 #endif // ASSERT
3656 
3657 uint G1CollectedHeap::num_task_queues() const {
3658   return _task_queues->size();
3659 }
3660 
3661 #if TASKQUEUE_STATS
3662 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3663   st->print_raw_cr("GC Task Stats");
3664   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3665   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3666 }
3667 
3668 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {







3669   print_taskqueue_stats_hdr(st);
3670 
3671   TaskQueueStats totals;
3672   const uint n = num_task_queues();
3673   for (uint i = 0; i < n; ++i) {
3674     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3675     totals += task_queue(i)->stats;
3676   }
3677   st->print_raw("tot "); totals.print(st); st->cr();
3678 
3679   DEBUG_ONLY(totals.verify());
3680 }
3681 
3682 void G1CollectedHeap::reset_taskqueue_stats() {
3683   const uint n = num_task_queues();
3684   for (uint i = 0; i < n; ++i) {
3685     task_queue(i)->stats.reset();
3686   }
3687 }
3688 #endif // TASKQUEUE_STATS
3689 
3690 void G1CollectedHeap::log_gc_header() {
3691   if (!G1Log::fine()) {
3692     return;
3693   }
3694 
3695   gclog_or_tty->gclog_stamp();
3696 
3697   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3698     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3699     .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3700 
3701   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3702 }
3703 
3704 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3705   if (!G1Log::fine()) {
3706     return;
3707   }
3708 
3709   if (G1Log::finer()) {
3710     if (evacuation_failed()) {
3711       gclog_or_tty->print(" (to-space exhausted)");
3712     }
3713     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);

3714     g1_policy()->print_phases(pause_time_sec);

3715     g1_policy()->print_detailed_heap_transition();
3716   } else {
3717     if (evacuation_failed()) {
3718       gclog_or_tty->print("--");
3719     }
3720     g1_policy()->print_heap_transition();
3721     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3722   }
3723   gclog_or_tty->flush();
3724 }
3725 

3726 void G1CollectedHeap::wait_for_root_region_scanning() {
3727   double scan_wait_start = os::elapsedTime();
3728   // We have to wait until the CM threads finish scanning the
3729   // root regions as it's the only way to ensure that all the
3730   // objects on them have been correctly scanned before we start
3731   // moving them during the GC.
3732   bool waited = _cm->root_regions()->wait_until_scan_finished();
3733   double wait_time_ms = 0.0;
3734   if (waited) {
3735     double scan_wait_end = os::elapsedTime();
3736     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3737   }
3738   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3739 }
3740 
3741 bool
3742 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3743   assert_at_safepoint(true /* should_be_vm_thread */);
3744   guarantee(!is_gc_active(), "collection is not reentrant");
3745 
3746   if (GC_locker::check_active_before_gc()) {
3747     return false;
3748   }
3749 
3750   _gc_timer_stw->register_gc_start();
3751 
3752   GCIdMark gc_id_mark;
3753   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3754 
3755   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3756   ResourceMark rm;
3757 
3758   wait_for_root_region_scanning();
3759 
3760   G1Log::update_level();
3761   print_heap_before_gc();
3762   trace_heap_before_gc(_gc_tracer_stw);
3763 
3764   verify_region_sets_optional();
3765   verify_dirty_young_regions();
3766 
3767   // This call will decide whether this pause is an initial-mark
3768   // pause. If it is, during_initial_mark_pause() will return true
3769   // for the duration of this pause.
3770   g1_policy()->decide_on_conc_mark_initiation();
3771 
3772   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3773   assert(!collector_state()->during_initial_mark_pause() ||
3774           collector_state()->gcs_are_young(), "sanity");
3775 
3776   // We also do not allow mixed GCs during marking.
3777   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3778 
3779   // Record whether this pause is an initial mark. When the current
3780   // thread has completed its logging output and it's safe to signal
3781   // the CM thread, the flag's value in the policy has been reset.
3782   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3783 
3784   // Inner scope for scope based logging, timers, and stats collection
3785   {
3786     EvacuationInfo evacuation_info;
3787 
3788     if (collector_state()->during_initial_mark_pause()) {
3789       // We are about to start a marking cycle, so we increment the
3790       // full collection counter.
3791       increment_old_marking_cycles_started();
3792       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3793     }
3794 
3795     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3796 
3797     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3798 
3799     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3800                                                                   workers()->active_workers(),
3801                                                                   Threads::number_of_non_daemon_threads());
3802     workers()->set_active_workers(active_workers);









3803 
3804     double pause_start_sec = os::elapsedTime();

3805     g1_policy()->note_gc_start(active_workers);
3806     log_gc_header();
3807 
3808     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3809     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3810 
3811     // If the secondary_free_list is not empty, append it to the
3812     // free_list. No need to wait for the cleanup operation to finish;
3813     // the region allocation code will check the secondary_free_list
3814     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3815     // set, skip this step so that the region allocation code has to
3816     // get entries from the secondary_free_list.
3817     if (!G1StressConcRegionFreeing) {
3818       append_secondary_free_list_if_not_empty_with_lock();
3819     }
3820 
3821     assert(check_young_list_well_formed(), "young list should be well formed");
3822 
3823     // Don't dynamically change the number of GC threads this early.  A value of
3824     // 0 is used to indicate serial work.  When parallel work is done,
3825     // it will be set.
3826 


3844       // reference processing currently works in G1.
3845 
3846       // Enable discovery in the STW reference processor
3847       if (g1_policy()->should_process_references()) {
3848         ref_processor_stw()->enable_discovery();
3849       } else {
3850         ref_processor_stw()->disable_discovery();
3851       }
3852 
3853       {
3854         // We want to temporarily turn off discovery by the
3855         // CM ref processor, if necessary, and turn it back on
3856         // on again later if we do. Using a scoped
3857         // NoRefDiscovery object will do this.
3858         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3859 
3860         // Forget the current alloc region (we might even choose it to be part
3861         // of the collection set!).
3862         _allocator->release_mutator_alloc_region();
3863 
3864         // We should call this after we retire the mutator alloc
3865         // region(s) so that all the ALLOC / RETIRE events are generated
3866         // before the start GC event.
3867         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3868 
3869         // This timing is only used by the ergonomics to handle our pause target.
3870         // It is unclear why this should not include the full pause. We will
3871         // investigate this in CR 7178365.
3872         //
3873         // Preserving the old comment here if that helps the investigation:
3874         //
3875         // The elapsed time induced by the start time below deliberately elides
3876         // the possible verification above.
3877         double sample_start_time_sec = os::elapsedTime();
3878 
3879         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3880 
3881         if (collector_state()->during_initial_mark_pause()) {
3882           concurrent_mark()->checkpointRootsInitialPre();
3883         }
3884 
3885         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3886         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3887 
3888         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());


3972 
3973         if (collector_state()->during_initial_mark_pause()) {
3974           // We have to do this before we notify the CM threads that
3975           // they can start working to make sure that all the
3976           // appropriate initialization is done on the CM object.
3977           concurrent_mark()->checkpointRootsInitialPost();
3978           collector_state()->set_mark_in_progress(true);
3979           // Note that we don't actually trigger the CM thread at
3980           // this point. We do that later when we're sure that
3981           // the current thread has completed its logging output.
3982         }
3983 
3984         allocate_dummy_regions();
3985 
3986         _allocator->init_mutator_alloc_region();
3987 
3988         {
3989           size_t expand_bytes = g1_policy()->expansion_amount();
3990           if (expand_bytes > 0) {
3991             size_t bytes_before = capacity();
3992             // No need for an ergo verbose message here,
3993             // expansion_amount() does this when it returns a value > 0.
3994             double expand_ms;
3995             if (!expand(expand_bytes, &expand_ms)) {
3996               // We failed to expand the heap. Cannot do anything about it.
3997             }
3998             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3999           }
4000         }
4001 
4002         // We redo the verification but now wrt to the new CSet which
4003         // has just got initialized after the previous CSet was freed.
4004         _cm->verify_no_cset_oops();
4005         _cm->note_end_of_gc();
4006 
4007         // This timing is only used by the ergonomics to handle our pause target.
4008         // It is unclear why this should not include the full pause. We will
4009         // investigate this in CR 7178365.
4010         double sample_end_time_sec = os::elapsedTime();
4011         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4012         size_t total_cards_scanned = per_thread_states.total_cards_scanned();


4032         // stamp here we invalidate all the GC time stamps on all the
4033         // regions and saved_mark_word() will simply return top() for
4034         // all the regions. This is a nicer way of ensuring this rather
4035         // than iterating over the regions and fixing them. In fact, the
4036         // GC time stamp increment here also ensures that
4037         // saved_mark_word() will return top() between pauses, i.e.,
4038         // during concurrent refinement. So we don't need the
4039         // is_gc_active() check to decided which top to use when
4040         // scanning cards (see CR 7039627).
4041         increment_gc_time_stamp();
4042 
4043         verify_after_gc();
4044         check_bitmaps("GC End");
4045 
4046         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4047         ref_processor_stw()->verify_no_references_recorded();
4048 
4049         // CM reference discovery will be re-enabled if necessary.
4050       }
4051 
4052       // We should do this after we potentially expand the heap so
4053       // that all the COMMIT events are generated before the end GC
4054       // event, and after we retire the GC alloc regions so that all
4055       // RETIRE events are generated before the end GC event.
4056       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4057 
4058 #ifdef TRACESPINNING
4059       ParallelTaskTerminator::print_termination_counts();
4060 #endif
4061 
4062       gc_epilogue(false);
4063     }
4064 
4065     // Print the remainder of the GC log output.
4066     log_gc_footer(os::elapsedTime() - pause_start_sec);
4067 
4068     // It is not yet to safe to tell the concurrent mark to
4069     // start as we have some optional output below. We don't want the
4070     // output from the concurrent mark thread interfering with this
4071     // logging output either.
4072 
4073     _hrm.verify_optional();
4074     verify_region_sets_optional();
4075 
4076     TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4077     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4078 
4079     print_heap_after_gc();
4080     trace_heap_after_gc(_gc_tracer_stw);
4081 
4082     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4083     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4084     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4085     // before any GC notifications are raised.
4086     g1mm()->update_sizes();
4087 
4088     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4089     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4090     _gc_timer_stw->register_gc_end();
4091     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4092   }
4093   // It should now be safe to tell the concurrent mark thread to start
4094   // without its logging output interfering with the logging output
4095   // that came from the pause.
4096 


4211 
4212       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4213 
4214       double term_sec = 0.0;
4215       size_t evac_term_attempts = 0;
4216       {
4217         double start = os::elapsedTime();
4218         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4219         evac.do_void();
4220 
4221         evac_term_attempts = evac.term_attempts();
4222         term_sec = evac.term_time();
4223         double elapsed_sec = os::elapsedTime() - start;
4224         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4225         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4226         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4227       }
4228 
4229       assert(pss->queue_is_empty(), "should be empty");
4230 
4231       if (PrintTerminationStats) {
4232         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4233         size_t lab_waste;
4234         size_t lab_undo_waste;
4235         pss->waste(lab_waste, lab_undo_waste);
4236         _g1h->print_termination_stats(gclog_or_tty,
4237                                       worker_id,
4238                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
4239                                       strong_roots_sec * 1000.0,                  /* strong roots time */
4240                                       term_sec * 1000.0,                          /* evac term time */
4241                                       evac_term_attempts,                         /* evac term attempts */
4242                                       lab_waste,                                  /* alloc buffer waste */
4243                                       lab_undo_waste                              /* undo waste */
4244                                       );
4245       }
4246 
4247       // Close the inner scope so that the ResourceMark and HandleMark
4248       // destructors are executed here and are included as part of the
4249       // "GC Worker Time".
4250     }
4251     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4252   }
4253 };
4254 
4255 void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) {
4256   st->print_raw_cr("GC Termination Stats");
4257   st->print_raw_cr("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
4258   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts  total   alloc    undo");
4259   st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4260 }
4261 
4262 void G1CollectedHeap::print_termination_stats(outputStream* const st,
4263                                               uint worker_id,
4264                                               double elapsed_ms,
4265                                               double strong_roots_ms,
4266                                               double term_ms,
4267                                               size_t term_attempts,
4268                                               size_t alloc_buffer_waste,
4269                                               size_t undo_waste) const {
4270   st->print_cr("%3d %9.2f %9.2f %6.2f "

4271                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4272                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4273                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4274                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4275                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4276                alloc_buffer_waste * HeapWordSize / K,
4277                undo_waste * HeapWordSize / K);
4278 }
4279 
4280 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4281 private:
4282   BoolObjectClosure* _is_alive;
4283   int _initial_string_table_size;
4284   int _initial_symbol_table_size;
4285 
4286   bool  _process_strings;
4287   int _strings_processed;
4288   int _strings_removed;
4289 
4290   bool  _process_symbols;


4299     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4300 
4301     _initial_string_table_size = StringTable::the_table()->table_size();
4302     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4303     if (process_strings) {
4304       StringTable::clear_parallel_claimed_index();
4305     }
4306     if (process_symbols) {
4307       SymbolTable::clear_parallel_claimed_index();
4308     }
4309   }
4310 
4311   ~G1StringSymbolTableUnlinkTask() {
4312     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4313               "claim value %d after unlink less than initial string table size %d",
4314               StringTable::parallel_claimed_index(), _initial_string_table_size);
4315     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4316               "claim value %d after unlink less than initial symbol table size %d",
4317               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4318 
4319     if (G1TraceStringSymbolTableScrubbing) {
4320       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4321                              "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4322                              "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4323                              strings_processed(), strings_removed(),
4324                              symbols_processed(), symbols_removed());
4325     }
4326   }
4327 
4328   void work(uint worker_id) {
4329     int strings_processed = 0;
4330     int strings_removed = 0;
4331     int symbols_processed = 0;
4332     int symbols_removed = 0;
4333     if (_process_strings) {
4334       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4335       Atomic::add(strings_processed, &_strings_processed);
4336       Atomic::add(strings_removed, &_strings_removed);
4337     }
4338     if (_process_symbols) {
4339       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4340       Atomic::add(symbols_processed, &_symbols_processed);
4341       Atomic::add(symbols_removed, &_symbols_removed);
4342     }
4343   }
4344 
4345   size_t strings_processed() const { return (size_t)_strings_processed; }
4346   size_t strings_removed()   const { return (size_t)_strings_removed; }


5145   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5146 }
5147 
5148 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5149   // Should G1EvacuationFailureALot be in effect for this GC?
5150   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5151 
5152   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5153   double start_par_time_sec = os::elapsedTime();
5154   double end_par_time_sec;
5155 
5156   {
5157     const uint n_workers = workers()->active_workers();
5158     G1RootProcessor root_processor(this, n_workers);
5159     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5160     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5161     if (collector_state()->during_initial_mark_pause()) {
5162       ClassLoaderDataGraph::clear_claimed_marks();
5163     }
5164 
5165     // The individual threads will set their evac-failure closures.
5166     if (PrintTerminationStats) {
5167       print_termination_stats_hdr(gclog_or_tty);
5168     }
5169 
5170     workers()->run_task(&g1_par_task);
5171     end_par_time_sec = os::elapsedTime();
5172 
5173     // Closing the inner scope will execute the destructor
5174     // for the G1RootProcessor object. We record the current
5175     // elapsed time before closing the scope so that time
5176     // taken for the destructor is NOT included in the
5177     // reported parallel time.
5178   }
5179 
5180   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5181 
5182   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5183   phase_times->record_par_time(par_time_ms);
5184 
5185   double code_root_fixup_time_ms =
5186         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5187   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5188 }


5387   }
5388 }
5389 
5390 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5391   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5392   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5393     verify_dirty_region(hr);
5394   }
5395 }
5396 
5397 void G1CollectedHeap::verify_dirty_young_regions() {
5398   verify_dirty_young_list(_young_list->first_region());
5399 }
5400 
5401 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5402                                                HeapWord* tams, HeapWord* end) {
5403   guarantee(tams <= end,
5404             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5405   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5406   if (result < end) {
5407     gclog_or_tty->cr();
5408     gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5409                            bitmap_name, p2i(result));
5410     gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5411                            bitmap_name, p2i(tams), p2i(end));
5412     return false;
5413   }
5414   return true;
5415 }
5416 
5417 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5418   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5419   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5420 
5421   HeapWord* bottom = hr->bottom();
5422   HeapWord* ptams  = hr->prev_top_at_mark_start();
5423   HeapWord* ntams  = hr->next_top_at_mark_start();
5424   HeapWord* end    = hr->end();
5425 
5426   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5427 
5428   bool res_n = true;
5429   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5430   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5431   // if we happen to be in that state.
5432   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5433     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5434   }
5435   if (!res_p || !res_n) {
5436     gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
5437                            HR_FORMAT_PARAMS(hr));
5438     gclog_or_tty->print_cr("#### Caller: %s", caller);
5439     return false;
5440   }
5441   return true;
5442 }
5443 
5444 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5445   if (!G1VerifyBitmaps) return;
5446 
5447   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5448 }
5449 
5450 class G1VerifyBitmapClosure : public HeapRegionClosure {
5451 private:
5452   const char* _caller;
5453   G1CollectedHeap* _g1h;
5454   bool _failures;
5455 
5456 public:
5457   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5458     _caller(caller), _g1h(g1h), _failures(false) { }


5470 
5471 void G1CollectedHeap::check_bitmaps(const char* caller) {
5472   if (!G1VerifyBitmaps) return;
5473 
5474   G1VerifyBitmapClosure cl(caller, this);
5475   heap_region_iterate(&cl);
5476   guarantee(!cl.failures(), "bitmap verification");
5477 }
5478 
5479 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5480  private:
5481   bool _failures;
5482  public:
5483   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5484 
5485   virtual bool doHeapRegion(HeapRegion* hr) {
5486     uint i = hr->hrm_index();
5487     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5488     if (hr->is_humongous()) {
5489       if (hr->in_collection_set()) {
5490         gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
5491         _failures = true;
5492         return true;
5493       }
5494       if (cset_state.is_in_cset()) {
5495         gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5496         _failures = true;
5497         return true;
5498       }
5499       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5500         gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5501         _failures = true;
5502         return true;
5503       }
5504     } else {
5505       if (cset_state.is_humongous()) {
5506         gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5507         _failures = true;
5508         return true;
5509       }
5510       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5511         gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
5512                                hr->in_collection_set(), cset_state.value(), i);
5513         _failures = true;
5514         return true;
5515       }
5516       if (cset_state.is_in_cset()) {
5517         if (hr->is_young() != (cset_state.is_young())) {
5518           gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
5519                                  hr->is_young(), cset_state.value(), i);
5520           _failures = true;
5521           return true;
5522         }
5523         if (hr->is_old() != (cset_state.is_old())) {
5524           gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
5525                                  hr->is_old(), cset_state.value(), i);
5526           _failures = true;
5527           return true;
5528         }
5529       }
5530     }
5531     return false;
5532   }
5533 
5534   bool failures() const { return _failures; }
5535 };
5536 
5537 bool G1CollectedHeap::check_cset_fast_test() {
5538   G1CheckCSetFastTableClosure cl;
5539   _hrm.iterate(&cl);
5540   return !cl.failures();
5541 }
5542 #endif // PRODUCT
5543 
5544 void G1CollectedHeap::cleanUpCardTable() {


5722     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5723     // until the end of a concurrent mark.
5724     //
5725     // It is not required to check whether the object has been found dead by marking
5726     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5727     // all objects allocated during that time are considered live.
5728     // SATB marking is even more conservative than the remembered set.
5729     // So if at this point in the collection there is no remembered set entry,
5730     // nobody has a reference to it.
5731     // At the start of collection we flush all refinement logs, and remembered sets
5732     // are completely up-to-date wrt to references to the humongous object.
5733     //
5734     // Other implementation considerations:
5735     // - never consider object arrays at this time because they would pose
5736     // considerable effort for cleaning up the the remembered sets. This is
5737     // required because stale remembered sets might reference locations that
5738     // are currently allocated into.
5739     uint region_idx = r->hrm_index();
5740     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5741         !r->rem_set()->is_empty()) {
5742 
5743       if (G1TraceEagerReclaimHumongousObjects) {
5744         gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5745                                region_idx,
5746                                (size_t)obj->size() * HeapWordSize,
5747                                p2i(r->bottom()),
5748                                r->rem_set()->occupied(),
5749                                r->rem_set()->strong_code_roots_list_length(),
5750                                next_bitmap->isMarked(r->bottom()),
5751                                g1h->is_humongous_reclaim_candidate(region_idx),
5752                                obj->is_typeArray()
5753                               );
5754       }
5755 
5756       return false;
5757     }
5758 
5759     guarantee(obj->is_typeArray(),
5760               "Only eagerly reclaiming type arrays is supported, but the object "
5761               PTR_FORMAT " is not.", p2i(r->bottom()));
5762 
5763     if (G1TraceEagerReclaimHumongousObjects) {
5764       gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5765                              region_idx,
5766                              (size_t)obj->size() * HeapWordSize,
5767                              p2i(r->bottom()),
5768                              r->rem_set()->occupied(),
5769                              r->rem_set()->strong_code_roots_list_length(),
5770                              next_bitmap->isMarked(r->bottom()),
5771                              g1h->is_humongous_reclaim_candidate(region_idx),
5772                              obj->is_typeArray()
5773                             );
5774     }
5775     // Need to clear mark bit of the humongous object if already set.
5776     if (next_bitmap->isMarked(r->bottom())) {
5777       next_bitmap->clear(r->bottom());
5778     }
5779     do {
5780       HeapRegion* next = g1h->next_region_in_humongous(r);
5781       _freed_bytes += r->used();
5782       r->set_containing_set(NULL);
5783       _humongous_regions_removed.increment(1u, r->capacity());
5784       g1h->free_humongous_region(r, _free_region_list, false);
5785       r = next;
5786     } while (r != NULL);
5787 
5788     return false;
5789   }
5790 
5791   HeapRegionSetCount& humongous_free_count() {
5792     return _humongous_regions_removed;
5793   }
5794 
5795   size_t bytes_freed() const {
5796     return _freed_bytes;
5797   }
5798 
5799   size_t humongous_reclaimed() const {
5800     return _humongous_regions_removed.length();
5801   }
5802 };
5803 
5804 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5805   assert_at_safepoint(true);
5806 
5807   if (!G1EagerReclaimHumongousObjects ||
5808       (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
5809     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5810     return;
5811   }
5812 
5813   double start_time = os::elapsedTime();
5814 
5815   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5816 
5817   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5818   heap_region_iterate(&cl);
5819 
5820   HeapRegionSetCount empty_set;
5821   remove_from_old_sets(empty_set, cl.humongous_free_count());
5822 
5823   G1HRPrinter* hrp = hr_printer();
5824   if (hrp->is_active()) {
5825     FreeRegionListIterator iter(&local_cleanup_list);
5826     while (iter.more_available()) {
5827       HeapRegion* hr = iter.get_next();
5828       hrp->cleanup(hr);


5841 // the current incremental collection set in preparation of a
5842 // full collection. After the full GC we will start to build up
5843 // the incremental collection set again.
5844 // This is only called when we're doing a full collection
5845 // and is immediately followed by the tearing down of the young list.
5846 
5847 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5848   HeapRegion* cur = cs_head;
5849 
5850   while (cur != NULL) {
5851     HeapRegion* next = cur->next_in_collection_set();
5852     assert(cur->in_collection_set(), "bad CS");
5853     cur->set_next_in_collection_set(NULL);
5854     clear_in_cset(cur);
5855     cur->set_young_index_in_cset(-1);
5856     cur = next;
5857   }
5858 }
5859 
5860 void G1CollectedHeap::set_free_regions_coming() {
5861   if (G1ConcRegionFreeingVerbose) {
5862     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5863                            "setting free regions coming");
5864   }
5865 
5866   assert(!free_regions_coming(), "pre-condition");
5867   _free_regions_coming = true;
5868 }
5869 
5870 void G1CollectedHeap::reset_free_regions_coming() {
5871   assert(free_regions_coming(), "pre-condition");
5872 
5873   {
5874     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5875     _free_regions_coming = false;
5876     SecondaryFreeList_lock->notify_all();
5877   }
5878 
5879   if (G1ConcRegionFreeingVerbose) {
5880     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5881                            "reset free regions coming");
5882   }
5883 }
5884 
5885 void G1CollectedHeap::wait_while_free_regions_coming() {
5886   // Most of the time we won't have to wait, so let's do a quick test
5887   // first before we take the lock.
5888   if (!free_regions_coming()) {
5889     return;
5890   }
5891 
5892   if (G1ConcRegionFreeingVerbose) {
5893     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5894                            "waiting for free regions");
5895   }
5896 
5897   {
5898     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5899     while (free_regions_coming()) {
5900       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5901     }
5902   }
5903 
5904   if (G1ConcRegionFreeingVerbose) {
5905     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5906                            "done waiting for free regions");
5907   }
5908 }
5909 
5910 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5911   return _allocator->is_retained_old_region(hr);
5912 }
5913 
5914 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5915   _young_list->push_region(hr);
5916 }
5917 
5918 class NoYoungRegionsClosure: public HeapRegionClosure {
5919 private:
5920   bool _success;
5921 public:
5922   NoYoungRegionsClosure() : _success(true) { }
5923   bool doHeapRegion(HeapRegion* r) {
5924     if (r->is_young()) {
5925       gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5926                              p2i(r->bottom()), p2i(r->end()));
5927       _success = false;
5928     }
5929     return false;
5930   }
5931   bool success() { return _success; }
5932 };
5933 
5934 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5935   bool ret = _young_list->check_list_empty(check_sample);
5936 
5937   if (check_heap) {
5938     NoYoungRegionsClosure closure;
5939     heap_region_iterate(&closure);
5940     ret = ret && closure.success();
5941   }
5942 
5943   return ret;
5944 }
5945 


6156 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6157                                              size_t allocated_bytes,
6158                                              InCSetState dest) {
6159   bool during_im = collector_state()->during_initial_mark_pause();
6160   alloc_region->note_end_of_copying(during_im);
6161   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6162   if (dest.is_young()) {
6163     young_list()->add_survivor_region(alloc_region);
6164   } else {
6165     _old_set.add(alloc_region);
6166   }
6167   _hr_printer.retire(alloc_region);
6168 }
6169 
6170 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6171   bool expanded = false;
6172   uint index = _hrm.find_highest_free(&expanded);
6173 
6174   if (index != G1_NO_HRM_INDEX) {
6175     if (expanded) {
6176       ergo_verbose1(ErgoHeapSizing,
6177                     "attempt heap expansion",
6178                     ergo_format_reason("requested address range outside heap bounds")
6179                     ergo_format_byte("region size"),
6180                     HeapRegion::GrainWords * HeapWordSize);
6181     }
6182     _hrm.allocate_free_regions_starting_at(index, 1);
6183     return region_at(index);
6184   }
6185   return NULL;
6186 }
6187 
6188 // Heap region set verification
6189 
6190 class VerifyRegionListsClosure : public HeapRegionClosure {
6191 private:
6192   HeapRegionSet*   _old_set;
6193   HeapRegionSet*   _humongous_set;
6194   HeapRegionManager*   _hrm;
6195 
6196 public:
6197   HeapRegionSetCount _old_count;
6198   HeapRegionSetCount _humongous_count;
6199   HeapRegionSetCount _free_count;




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc/g1/bufferingOopClosure.hpp"
  31 #include "gc/g1/concurrentG1Refine.hpp"
  32 #include "gc/g1/concurrentG1RefineThread.hpp"
  33 #include "gc/g1/concurrentMarkThread.inline.hpp"
  34 #include "gc/g1/g1Allocator.inline.hpp"
  35 #include "gc/g1/g1CollectedHeap.inline.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"

  38 #include "gc/g1/g1EvacStats.inline.hpp"
  39 #include "gc/g1/g1GCPhaseTimes.hpp"

  40 #include "gc/g1/g1MarkSweep.hpp"
  41 #include "gc/g1/g1OopClosures.inline.hpp"
  42 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  43 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  44 #include "gc/g1/g1RemSet.inline.hpp"
  45 #include "gc/g1/g1RootClosures.hpp"
  46 #include "gc/g1/g1RootProcessor.hpp"
  47 #include "gc/g1/g1StringDedup.hpp"
  48 #include "gc/g1/g1YCTypes.hpp"
  49 #include "gc/g1/heapRegion.inline.hpp"
  50 #include "gc/g1/heapRegionRemSet.hpp"
  51 #include "gc/g1/heapRegionSet.inline.hpp"
  52 #include "gc/g1/suspendibleThreadSet.hpp"
  53 #include "gc/g1/vm_operations_g1.hpp"
  54 #include "gc/shared/gcHeapSummary.hpp"
  55 #include "gc/shared/gcId.hpp"
  56 #include "gc/shared/gcLocker.inline.hpp"
  57 #include "gc/shared/gcTimer.hpp"
  58 #include "gc/shared/gcTrace.hpp"
  59 #include "gc/shared/gcTraceTime.inline.hpp"
  60 #include "gc/shared/generationSpec.hpp"
  61 #include "gc/shared/isGCActiveMark.hpp"
  62 #include "gc/shared/referenceProcessor.hpp"
  63 #include "gc/shared/taskqueue.inline.hpp"
  64 #include "logging/log.hpp"
  65 #include "memory/allocation.hpp"
  66 #include "memory/iterator.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "runtime/atomic.inline.hpp"
  69 #include "runtime/init.hpp"
  70 #include "runtime/orderAccess.inline.hpp"
  71 #include "runtime/vmThread.hpp"
  72 #include "utilities/globalDefinitions.hpp"
  73 #include "utilities/stack.inline.hpp"
  74 
  75 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  76 
  77 // INVARIANTS/NOTES
  78 //
  79 // All allocation activity covered by the G1CollectedHeap interface is
  80 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  81 // and allocate_new_tlab, which are the "entry" points to the
  82 // allocation code from the rest of the JVM.  (Note that this does not
  83 // apply to TLAB allocation, which is not part of this interface: it
  84 // is done by clients of this interface.)


 205   } while (hr != head);
 206   assert(hr != NULL, "invariant");
 207   hr->set_next_dirty_cards_region(NULL);
 208   return hr;
 209 }
 210 
 211 // Returns true if the reference points to an object that
 212 // can move in an incremental collection.
 213 bool G1CollectedHeap::is_scavengable(const void* p) {
 214   HeapRegion* hr = heap_region_containing(p);
 215   return !hr->is_pinned();
 216 }
 217 
 218 // Private methods.
 219 
 220 HeapRegion*
 221 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 222   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 223   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 224     if (!_secondary_free_list.is_empty()) {
 225       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 226                                       "secondary_free_list has %u entries",
 227                                       _secondary_free_list.length());

 228       // It looks as if there are free regions available on the
 229       // secondary_free_list. Let's move them to the free_list and try
 230       // again to allocate from it.
 231       append_secondary_free_list();
 232 
 233       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 234              "empty we should have moved at least one entry to the free_list");
 235       HeapRegion* res = _hrm.allocate_free_region(is_old);
 236       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 237                                       "allocated " HR_FORMAT " from secondary_free_list",
 238                                       HR_FORMAT_PARAMS(res));

 239       return res;
 240     }
 241 
 242     // Wait here until we get notified either when (a) there are no
 243     // more free regions coming or (b) some regions have been moved on
 244     // the secondary_free_list.
 245     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 246   }
 247 
 248   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 249                                   "could not allocate from secondary_free_list");

 250   return NULL;
 251 }
 252 
 253 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 254   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 255          "the only time we use this to allocate a humongous region is "
 256          "when we are allocating a single humongous region");
 257 
 258   HeapRegion* res;
 259   if (G1StressConcRegionFreeing) {
 260     if (!_secondary_free_list.is_empty()) {
 261       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 262                                       "forced to look at the secondary_free_list");

 263       res = new_region_try_secondary_free_list(is_old);
 264       if (res != NULL) {
 265         return res;
 266       }
 267     }
 268   }
 269 
 270   res = _hrm.allocate_free_region(is_old);
 271 
 272   if (res == NULL) {
 273     log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 274                                     "res == NULL, trying the secondary_free_list");

 275     res = new_region_try_secondary_free_list(is_old);
 276   }
 277   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 278     // Currently, only attempts to allocate GC alloc regions set
 279     // do_expand to true. So, we should only reach here during a
 280     // safepoint. If this assumption changes we might have to
 281     // reconsider the use of _expand_heap_after_alloc_failure.
 282     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 283 
 284     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",



 285                               word_size * HeapWordSize);
 286 
 287     if (expand(word_size * HeapWordSize)) {
 288       // Given that expand() succeeded in expanding the heap, and we
 289       // always expand the heap by an amount aligned to the heap
 290       // region size, the free list should in theory not be empty.
 291       // In either case allocate_free_region() will check for NULL.
 292       res = _hrm.allocate_free_region(is_old);
 293     } else {
 294       _expand_heap_after_alloc_failure = false;
 295     }
 296   }
 297   return res;
 298 }
 299 
 300 HeapWord*
 301 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 302                                                            uint num_regions,
 303                                                            size_t word_size,
 304                                                            AllocationContext_t context) {
 305   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 306   assert(is_humongous(word_size), "word_size should be humongous");


 454     // potentially waits for regions from the secondary free list.
 455     wait_while_free_regions_coming();
 456     append_secondary_free_list_if_not_empty_with_lock();
 457 
 458     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 459     // are lucky enough to find some.
 460     first = _hrm.find_contiguous_only_empty(obj_regions);
 461     if (first != G1_NO_HRM_INDEX) {
 462       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 463     }
 464   }
 465 
 466   if (first == G1_NO_HRM_INDEX) {
 467     // Policy: We could not find enough regions for the humongous object in the
 468     // free list. Look through the heap to find a mix of free and uncommitted regions.
 469     // If so, try expansion.
 470     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 471     if (first != G1_NO_HRM_INDEX) {
 472       // We found something. Make sure these regions are committed, i.e. expand
 473       // the heap. Alternatively we could do a defragmentation GC.
 474       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",



 475                                     word_size * HeapWordSize);
 476 
 477 
 478       _hrm.expand_at(first, obj_regions);
 479       g1_policy()->record_new_heap_size(num_regions());
 480 
 481 #ifdef ASSERT
 482       for (uint i = first; i < first + obj_regions; ++i) {
 483         HeapRegion* hr = region_at(i);
 484         assert(hr->is_free(), "sanity");
 485         assert(hr->is_empty(), "sanity");
 486         assert(is_on_master_free_list(hr), "sanity");
 487       }
 488 #endif
 489       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 490     } else {
 491       // Policy: Potentially trigger a defragmentation GC.
 492     }
 493   }
 494 
 495   HeapWord* result = NULL;
 496   if (first != G1_NO_HRM_INDEX) {
 497     result = humongous_obj_allocate_initialize_regions(first, obj_regions,


 775     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 776     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 777       start_address = start_region->end();
 778       if (start_address > last_address) {
 779         increase_used(word_size * HeapWordSize);
 780         start_region->set_top(last_address + 1);
 781         continue;
 782       }
 783       start_region->set_top(start_address);
 784       curr_range = MemRegion(start_address, last_address + 1);
 785       start_region = _hrm.addr_to_region(start_address);
 786     }
 787 
 788     // Perform the actual region allocation, exiting if it fails.
 789     // Then note how much new space we have allocated.
 790     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 791       return false;
 792     }
 793     increase_used(word_size * HeapWordSize);
 794     if (commits != 0) {
 795       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",



 796                                 HeapRegion::GrainWords * HeapWordSize * commits);
 797 
 798     }
 799 
 800     // Mark each G1 region touched by the range as archive, add it to the old set,
 801     // and set the allocation context and top.
 802     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 803     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 804     prev_last_region = last_region;
 805 
 806     while (curr_region != NULL) {
 807       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 808              "Region already in use (index %u)", curr_region->hrm_index());
 809       _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
 810       curr_region->set_allocation_context(AllocationContext::system());
 811       curr_region->set_archive();
 812       _old_set.add(curr_region);
 813       if (curr_region != last_region) {
 814         curr_region->set_top(curr_region->end());
 815         curr_region = _hrm.next_region_in_heap(curr_region);
 816       } else {
 817         curr_region->set_top(last_address + 1);


 958       guarantee(curr_region->is_archive(),
 959                 "Expected archive region at index %u", curr_region->hrm_index());
 960       uint curr_index = curr_region->hrm_index();
 961       _old_set.remove(curr_region);
 962       curr_region->set_free();
 963       curr_region->set_top(curr_region->bottom());
 964       if (curr_region != last_region) {
 965         curr_region = _hrm.next_region_in_heap(curr_region);
 966       } else {
 967         curr_region = NULL;
 968       }
 969       _hrm.shrink_at(curr_index, 1);
 970       uncommitted_regions++;
 971     }
 972 
 973     // Notify mark-sweep that this is no longer an archive range.
 974     G1MarkSweep::set_range_archive(ranges[i], false);
 975   }
 976 
 977   if (uncommitted_regions != 0) {
 978     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",



 979                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 980   }
 981   decrease_used(size_used);
 982 }
 983 
 984 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 985                                                         uint* gc_count_before_ret,
 986                                                         uint* gclocker_retry_count_ret) {
 987   // The structure of this method has a lot of similarities to
 988   // attempt_allocation_slow(). The reason these two were not merged
 989   // into a single one is that such a method would require several "if
 990   // allocation is not humongous do this, otherwise do that"
 991   // conditional paths which would obscure its flow. In fact, an early
 992   // version of this code did use a unified method which was harder to
 993   // follow and, as a result, it had subtle bugs that were hard to
 994   // track down. So keeping these two methods separate allows each to
 995   // be more readable. It will be good to keep these two in sync as
 996   // much as possible.
 997 
 998   assert_heap_not_locked_and_not_at_safepoint();


1198       // We only generate output for non-empty regions.
1199     } else if (hr->is_starts_humongous()) {
1200       _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1201     } else if (hr->is_continues_humongous()) {
1202       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1203     } else if (hr->is_archive()) {
1204       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1205     } else if (hr->is_old()) {
1206       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1207     } else {
1208       ShouldNotReachHere();
1209     }
1210     return false;
1211   }
1212 
1213   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1214     : _hr_printer(hr_printer) { }
1215 };
1216 
1217 void G1CollectedHeap::print_hrm_post_compaction() {
1218   if (_hr_printer.is_active()) {
1219     PostCompactionPrinterClosure cl(hr_printer());
1220     heap_region_iterate(&cl);
1221   }
1222 
1223 }
1224 
1225 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1226                                          bool clear_all_soft_refs) {
1227   assert_at_safepoint(true /* should_be_vm_thread */);
1228 
1229   if (GC_locker::check_active_before_gc()) {
1230     return false;
1231   }
1232 
1233   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1234   gc_timer->register_gc_start();
1235 
1236   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1237   GCIdMark gc_id_mark;
1238   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1239 
1240   SvcGCMarker sgcm(SvcGCMarker::FULL);
1241   ResourceMark rm;
1242 

1243   print_heap_before_gc();
1244   trace_heap_before_gc(gc_tracer);
1245 
1246   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1247 
1248   verify_region_sets_optional();
1249 
1250   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1251                            collector_policy()->should_clear_all_soft_refs();
1252 
1253   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1254 
1255   {
1256     IsGCActiveMark x;
1257 
1258     // Timing
1259     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1260     GCTraceCPUTime tcpu;
1261 
1262     {
1263       GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1264       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1265       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1266 
1267       g1_policy()->record_full_collection_start();
1268 
1269       // Note: When we have a more flexible GC logging framework that
1270       // allows us to add optional attributes to a GC log record we
1271       // could consider timing and reporting how long we wait in the
1272       // following two methods.
1273       wait_while_free_regions_coming();
1274       // If we start the compaction before the CM threads finish
1275       // scanning the root regions we might trip them over as we'll
1276       // be moving objects / updating references. So let's wait until
1277       // they are done. By telling them to abort, they should complete
1278       // early.
1279       _cm->root_regions()->abort();
1280       _cm->root_regions()->wait_until_scan_finished();
1281       append_secondary_free_list_if_not_empty_with_lock();
1282 
1283       gc_prologue(true);


1294 #if defined(COMPILER2) || INCLUDE_JVMCI
1295       DerivedPointerTable::clear();
1296 #endif
1297 
1298       // Disable discovery and empty the discovered lists
1299       // for the CM ref processor.
1300       ref_processor_cm()->disable_discovery();
1301       ref_processor_cm()->abandon_partial_discovery();
1302       ref_processor_cm()->verify_no_references_recorded();
1303 
1304       // Abandon current iterations of concurrent marking and concurrent
1305       // refinement, if any are in progress. We have to do this before
1306       // wait_until_scan_finished() below.
1307       concurrent_mark()->abort();
1308 
1309       // Make sure we'll choose a new allocation region afterwards.
1310       _allocator->release_mutator_alloc_region();
1311       _allocator->abandon_gc_alloc_regions();
1312       g1_rem_set()->cleanupHRRS();
1313 





1314       // We may have added regions to the current incremental collection
1315       // set between the last GC or pause and now. We need to clear the
1316       // incremental collection set and then start rebuilding it afresh
1317       // after this full GC.
1318       abandon_collection_set(g1_policy()->inc_cset_head());
1319       g1_policy()->clear_incremental_cset();
1320       g1_policy()->stop_incremental_cset_building();
1321 
1322       tear_down_region_sets(false /* free_list_only */);
1323       collector_state()->set_gcs_are_young(true);
1324 
1325       // See the comments in g1CollectedHeap.hpp and
1326       // G1CollectedHeap::ref_processing_init() about
1327       // how reference processing currently works in G1.
1328 
1329       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1330       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1331 
1332       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1333       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);


1360       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1361       ClassLoaderDataGraph::purge();
1362       MetaspaceAux::verify_metrics();
1363 
1364       // Note: since we've just done a full GC, concurrent
1365       // marking is no longer active. Therefore we need not
1366       // re-enable reference discovery for the CM ref processor.
1367       // That will be done at the start of the next marking cycle.
1368       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1369       ref_processor_cm()->verify_no_references_recorded();
1370 
1371       reset_gc_time_stamp();
1372       // Since everything potentially moved, we will clear all remembered
1373       // sets, and clear all cards.  Later we will rebuild remembered
1374       // sets. We will also reset the GC time stamps of the regions.
1375       clear_rsets_post_compaction();
1376       check_gc_time_stamps();
1377 
1378       resize_if_necessary_after_full_collection();
1379 

1380       // We should do this after we potentially resize the heap so
1381       // that all the COMMIT / UNCOMMIT events are generated before
1382       // the compaction events.

1383       print_hrm_post_compaction();


1384 
1385       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1386       if (hot_card_cache->use_cache()) {
1387         hot_card_cache->reset_card_counts();
1388         hot_card_cache->reset_hot_cache();
1389       }
1390 
1391       // Rebuild remembered sets of all regions.
1392       uint n_workers =
1393         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1394                                                 workers()->active_workers(),
1395                                                 Threads::number_of_non_daemon_threads());
1396       workers()->set_active_workers(n_workers);
1397 
1398       ParRebuildRSTask rebuild_rs_task(this);
1399       workers()->run_task(&rebuild_rs_task);
1400 
1401       // Rebuild the strong code root lists for each region
1402       rebuild_strong_code_roots();
1403 


1432       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1433       // objects marked during a full GC against the previous bitmap.
1434       // But we need to clear it before calling check_bitmaps below since
1435       // the full GC has compacted objects and updated TAMS but not updated
1436       // the prev bitmap.
1437       if (G1VerifyBitmaps) {
1438         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1439       }
1440       check_bitmaps("Full GC End");
1441 
1442       // Start a new incremental collection set for the next pause
1443       assert(g1_policy()->collection_set() == NULL, "must be");
1444       g1_policy()->start_incremental_cset_building();
1445 
1446       clear_cset_fast_test();
1447 
1448       _allocator->init_mutator_alloc_region();
1449 
1450       g1_policy()->record_full_collection_end();
1451 




1452       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1453       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1454       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1455       // before any GC notifications are raised.
1456       g1mm()->update_sizes();
1457 
1458       gc_epilogue(true);
1459     }
1460 
1461     g1_policy()->print_detailed_heap_transition();


1462 
1463     print_heap_after_gc();
1464     trace_heap_after_gc(gc_tracer);
1465 
1466     post_full_gc_dump(gc_timer);
1467 
1468     gc_timer->register_gc_end();
1469     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1470   }
1471 
1472   return true;
1473 }
1474 
1475 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1476   // Currently, there is no facility in the do_full_collection(bool) API to notify
1477   // the caller that the collection did not succeed (e.g., because it was locked
1478   // out by the GC locker). So, right now, we'll ignore the return value.
1479   bool dummy = do_full_collection(true,                /* explicit_gc */
1480                                   clear_all_soft_refs);
1481 }


1519 
1520   // This assert only makes sense here, before we adjust them
1521   // with respect to the min and max heap size.
1522   assert(minimum_desired_capacity <= maximum_desired_capacity,
1523          "minimum_desired_capacity = " SIZE_FORMAT ", "
1524          "maximum_desired_capacity = " SIZE_FORMAT,
1525          minimum_desired_capacity, maximum_desired_capacity);
1526 
1527   // Should not be greater than the heap max size. No need to adjust
1528   // it with respect to the heap min size as it's a lower bound (i.e.,
1529   // we'll try to make the capacity larger than it, not smaller).
1530   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1531   // Should not be less than the heap min size. No need to adjust it
1532   // with respect to the heap max size as it's an upper bound (i.e.,
1533   // we'll try to make the capacity smaller than it, not greater).
1534   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1535 
1536   if (capacity_after_gc < minimum_desired_capacity) {
1537     // Don't expand unless it's significant
1538     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1539 
1540     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1541                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1542                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1543 




1544     expand(expand_bytes);
1545 
1546     // No expansion, now see if we want to shrink
1547   } else if (capacity_after_gc > maximum_desired_capacity) {
1548     // Capacity too large, compute shrinking size
1549     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1550 
1551     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1552                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1553                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1554 




1555     shrink(shrink_bytes);
1556   }
1557 }
1558 
1559 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1560                                                             AllocationContext_t context,
1561                                                             bool do_gc,
1562                                                             bool clear_all_soft_refs,
1563                                                             bool expect_null_mutator_alloc_region,
1564                                                             bool* gc_succeeded) {
1565   *gc_succeeded = true;
1566   // Let's attempt the allocation first.
1567   HeapWord* result =
1568     attempt_allocation_at_safepoint(word_size,
1569                                     context,
1570                                     expect_null_mutator_alloc_region);
1571   if (result != NULL) {
1572     assert(*gc_succeeded, "sanity");
1573     return result;
1574   }


1640 
1641   // What else?  We might try synchronous finalization later.  If the total
1642   // space available is large enough for the allocation, then a more
1643   // complete compaction phase than we've tried so far might be
1644   // appropriate.
1645   assert(*succeeded, "sanity");
1646   return NULL;
1647 }
1648 
1649 // Attempting to expand the heap sufficiently
1650 // to support an allocation of the given "word_size".  If
1651 // successful, perform the allocation and return the address of the
1652 // allocated block, or else "NULL".
1653 
1654 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1655   assert_at_safepoint(true /* should_be_vm_thread */);
1656 
1657   verify_region_sets_optional();
1658 
1659   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1660   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",



1661                             word_size * HeapWordSize);
1662 
1663 
1664   if (expand(expand_bytes)) {
1665     _hrm.verify_optional();
1666     verify_region_sets_optional();
1667     return attempt_allocation_at_safepoint(word_size,
1668                                            context,
1669                                            false /* expect_null_mutator_alloc_region */);
1670   }
1671   return NULL;
1672 }
1673 
1674 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1675   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1676   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1677                                        HeapRegion::GrainBytes);
1678 
1679   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",


1680                             expand_bytes, aligned_expand_bytes);
1681 
1682   if (is_maximal_no_gc()) {
1683     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");


1684     return false;
1685   }
1686 
1687   double expand_heap_start_time_sec = os::elapsedTime();
1688   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1689   assert(regions_to_expand > 0, "Must expand by at least one region");
1690 
1691   uint expanded_by = _hrm.expand_by(regions_to_expand);
1692   if (expand_time_ms != NULL) {
1693     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1694   }
1695 
1696   if (expanded_by > 0) {
1697     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1698     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1699     g1_policy()->record_new_heap_size(num_regions());
1700   } else {
1701     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1702 

1703     // The expansion of the virtual storage space was unsuccessful.
1704     // Let's see if it was because we ran out of swap.
1705     if (G1ExitOnExpansionFailure &&
1706         _hrm.available() >= regions_to_expand) {
1707       // We had head room...
1708       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1709     }
1710   }
1711   return regions_to_expand > 0;
1712 }
1713 
1714 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1715   size_t aligned_shrink_bytes =
1716     ReservedSpace::page_align_size_down(shrink_bytes);
1717   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1718                                          HeapRegion::GrainBytes);
1719   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1720 
1721   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1722   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1723 
1724 
1725   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",



1726                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1727   if (num_regions_removed > 0) {
1728     g1_policy()->record_new_heap_size(num_regions());
1729   } else {
1730     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");


1731   }
1732 }
1733 
1734 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1735   verify_region_sets_optional();
1736 
1737   // We should only reach here at the end of a Full GC which means we
1738   // should not not be holding to any GC alloc regions. The method
1739   // below will make sure of that and do any remaining clean up.
1740   _allocator->abandon_gc_alloc_regions();
1741 
1742   // Instead of tearing down / rebuilding the free lists here, we
1743   // could instead use the remove_all_pending() method on free_list to
1744   // remove only the ones that we need to remove.
1745   tear_down_region_sets(true /* free_list_only */);
1746   shrink_helper(shrink_bytes);
1747   rebuild_region_sets(true /* free_list_only */);
1748 
1749   _hrm.verify_optional();
1750   verify_region_sets_optional();


1822   // Initialize the G1EvacuationFailureALot counters and flags.
1823   NOT_PRODUCT(reset_evacuation_should_fail();)
1824 
1825   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1826 }
1827 
1828 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1829                                                                  size_t size,
1830                                                                  size_t translation_factor) {
1831   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1832   // Allocate a new reserved space, preferring to use large pages.
1833   ReservedSpace rs(size, preferred_page_size);
1834   G1RegionToSpaceMapper* result  =
1835     G1RegionToSpaceMapper::create_mapper(rs,
1836                                          size,
1837                                          rs.alignment(),
1838                                          HeapRegion::GrainBytes,
1839                                          translation_factor,
1840                                          mtGC);
1841   if (TracePageSizes) {
1842     tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1843                   description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1844   }
1845   return result;
1846 }
1847 
1848 jint G1CollectedHeap::initialize() {
1849   CollectedHeap::pre_initialize();
1850   os::enable_vtime();
1851 


1852   // Necessary to satisfy locking discipline assertions.
1853 
1854   MutexLocker x(Heap_lock);
1855 




1856   // While there are no constraints in the GC code that HeapWordSize
1857   // be any particular value, there are multiple other areas in the
1858   // system which believe this to be true (e.g. oop->object_size in some
1859   // cases incorrectly returns the size in wordSize units rather than
1860   // HeapWordSize).
1861   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1862 
1863   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1864   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1865   size_t heap_alignment = collector_policy()->heap_alignment();
1866 
1867   // Ensure that the sizes are properly aligned.
1868   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1869   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1870   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1871 
1872   _refine_cte_cl = new RefineCardTableEntryClosure();
1873 
1874   jint ecode = JNI_OK;
1875   _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);


2028   G1AllocRegion::setup(this, dummy_region);
2029 
2030   _allocator->init_mutator_alloc_region();
2031 
2032   // Do create of the monitoring and management support so that
2033   // values in the heap have been properly initialized.
2034   _g1mm = new G1MonitoringSupport(this);
2035 
2036   G1StringDedup::initialize();
2037 
2038   _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2039   for (uint i = 0; i < ParallelGCThreads; i++) {
2040     new (&_preserved_objs[i]) OopAndMarkOopStack();
2041   }
2042 
2043   return JNI_OK;
2044 }
2045 
2046 void G1CollectedHeap::stop() {
2047   // Stop all concurrent threads. We do this to make sure these threads
2048   // do not continue to execute and access resources (e.g. logging)
2049   // that are destroyed during shutdown.
2050   _cg1r->stop();
2051   _cmThread->stop();
2052   if (G1StringDedup::is_enabled()) {
2053     G1StringDedup::stop();
2054   }
2055 }
2056 
2057 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2058   return HeapRegion::max_region_size();
2059 }
2060 
2061 void G1CollectedHeap::post_initialize() {
2062   CollectedHeap::post_initialize();
2063   ref_processing_init();
2064 }
2065 
2066 void G1CollectedHeap::ref_processing_init() {
2067   // Reference processing in G1 currently works as follows:
2068   //


2145 }
2146 
2147 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2148   hr->reset_gc_time_stamp();
2149 }
2150 
2151 #ifndef PRODUCT
2152 
2153 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2154 private:
2155   unsigned _gc_time_stamp;
2156   bool _failures;
2157 
2158 public:
2159   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2160     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2161 
2162   virtual bool doHeapRegion(HeapRegion* hr) {
2163     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2164     if (_gc_time_stamp != region_gc_time_stamp) {
2165       log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),

2166                            region_gc_time_stamp, _gc_time_stamp);
2167       _failures = true;
2168     }
2169     return false;
2170   }
2171 
2172   bool failures() { return _failures; }
2173 };
2174 
2175 void G1CollectedHeap::check_gc_time_stamps() {
2176   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2177   heap_region_iterate(&cl);
2178   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2179 }
2180 #endif // PRODUCT
2181 
2182 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2183   _cg1r->hot_card_cache()->drain(cl, worker_i);
2184 }
2185 


2733 private:
2734   G1CollectedHeap* _g1h;
2735   VerifyOption     _vo;
2736   bool             _failures;
2737 public:
2738   // _vo == UsePrevMarking -> use "prev" marking information,
2739   // _vo == UseNextMarking -> use "next" marking information,
2740   // _vo == UseMarkWord    -> use mark word from object header.
2741   VerifyRootsClosure(VerifyOption vo) :
2742     _g1h(G1CollectedHeap::heap()),
2743     _vo(vo),
2744     _failures(false) { }
2745 
2746   bool failures() { return _failures; }
2747 
2748   template <class T> void do_oop_nv(T* p) {
2749     T heap_oop = oopDesc::load_heap_oop(p);
2750     if (!oopDesc::is_null(heap_oop)) {
2751       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2752       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2753         LogHandle(gc, verify) log;
2754         log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2755         if (_vo == VerifyOption_G1UseMarkWord) {
2756           log.info("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
2757         }
2758         ResourceMark rm;
2759         obj->print_on(log.info_stream());
2760         _failures = true;
2761       }
2762     }
2763   }
2764 
2765   void do_oop(oop* p)       { do_oop_nv(p); }
2766   void do_oop(narrowOop* p) { do_oop_nv(p); }
2767 };
2768 
2769 class G1VerifyCodeRootOopClosure: public OopClosure {
2770   G1CollectedHeap* _g1h;
2771   OopClosure* _root_cl;
2772   nmethod* _nm;
2773   VerifyOption _vo;
2774   bool _failures;
2775 
2776   template <class T> void do_oop_work(T* p) {
2777     // First verify that this root is live
2778     _root_cl->do_oop(p);
2779 


2784 
2785     // Don't check the code roots during marking verification in a full GC
2786     if (_vo == VerifyOption_G1UseMarkWord) {
2787       return;
2788     }
2789 
2790     // Now verify that the current nmethod (which contains p) is
2791     // in the code root list of the heap region containing the
2792     // object referenced by p.
2793 
2794     T heap_oop = oopDesc::load_heap_oop(p);
2795     if (!oopDesc::is_null(heap_oop)) {
2796       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2797 
2798       // Now fetch the region containing the object
2799       HeapRegion* hr = _g1h->heap_region_containing(obj);
2800       HeapRegionRemSet* hrrs = hr->rem_set();
2801       // Verify that the strong code root list for this region
2802       // contains the nmethod
2803       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2804         log_info(gc, verify)("Code root location " PTR_FORMAT " "
2805                              "from nmethod " PTR_FORMAT " not in strong "
2806                              "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2807                              p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2808         _failures = true;
2809       }
2810     }
2811   }
2812 
2813 public:
2814   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2815     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2816 
2817   void do_oop(oop* p) { do_oop_work(p); }
2818   void do_oop(narrowOop* p) { do_oop_work(p); }
2819 
2820   void set_nmethod(nmethod* nm) { _nm = nm; }
2821   bool failures() { return _failures; }
2822 };
2823 
2824 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {


2965   }
2966 
2967   bool doHeapRegion(HeapRegion* r) {
2968     // For archive regions, verify there are no heap pointers to
2969     // non-pinned regions. For all others, verify liveness info.
2970     if (r->is_archive()) {
2971       VerifyArchiveRegionClosure verify_oop_pointers(r);
2972       r->object_iterate(&verify_oop_pointers);
2973       return true;
2974     }
2975     if (!r->is_continues_humongous()) {
2976       bool failures = false;
2977       r->verify(_vo, &failures);
2978       if (failures) {
2979         _failures = true;
2980       } else if (!r->is_starts_humongous()) {
2981         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2982         r->object_iterate(&not_dead_yet_cl);
2983         if (_vo != VerifyOption_G1UseNextMarking) {
2984           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2985             log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
2986                                  p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());




2987             _failures = true;
2988           }
2989         } else {
2990           // When vo == UseNextMarking we cannot currently do a sanity
2991           // check on the live bytes as the calculation has not been
2992           // finalized yet.
2993         }
2994       }
2995     }
2996     return false; // stop the region iteration if we hit a failure
2997   }
2998 };
2999 
3000 // This is the task used for parallel verification of the heap regions
3001 
3002 class G1ParVerifyTask: public AbstractGangTask {
3003 private:
3004   G1CollectedHeap*  _g1h;
3005   VerifyOption      _vo;
3006   bool              _failures;


3014       AbstractGangTask("Parallel verify task"),
3015       _g1h(g1h),
3016       _vo(vo),
3017       _failures(false),
3018       _hrclaimer(g1h->workers()->active_workers()) {}
3019 
3020   bool failures() {
3021     return _failures;
3022   }
3023 
3024   void work(uint worker_id) {
3025     HandleMark hm;
3026     VerifyRegionClosure blk(true, _vo);
3027     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3028     if (blk.failures()) {
3029       _failures = true;
3030     }
3031   }
3032 };
3033 
3034 void G1CollectedHeap::verify(VerifyOption vo) {
3035   if (!SafepointSynchronize::is_at_safepoint()) {
3036     log_info(gc, verify)("Skipping verification. Not at safepoint.");
3037   }
3038 
3039   assert(Thread::current()->is_VM_thread(),
3040          "Expected to be executed serially by the VM thread at this point");
3041 
3042   log_debug(gc, verify)("Roots");
3043   VerifyRootsClosure rootsCl(vo);
3044   VerifyKlassClosure klassCl(this, &rootsCl);
3045   CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3046 
3047   // We apply the relevant closures to all the oops in the
3048   // system dictionary, class loader data graph, the string table
3049   // and the nmethods in the code cache.
3050   G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3051   G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3052 
3053   {
3054     G1RootProcessor root_processor(this, 1);
3055     root_processor.process_all_roots(&rootsCl,
3056                                      &cldCl,
3057                                      &blobsCl);
3058   }
3059 
3060   bool failures = rootsCl.failures() || codeRootsCl.failures();
3061 
3062   if (vo != VerifyOption_G1UseMarkWord) {
3063     // If we're verifying during a full GC then the region sets
3064     // will have been torn down at the start of the GC. Therefore
3065     // verifying the region sets will fail. So we only verify
3066     // the region sets when not in a full GC.
3067     log_debug(gc, verify)("HeapRegionSets");
3068     verify_region_sets();
3069   }
3070 
3071   log_debug(gc, verify)("HeapRegions");
3072   if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3073 
3074     G1ParVerifyTask task(this, vo);
3075     workers()->run_task(&task);
3076     if (task.failures()) {
3077       failures = true;
3078     }
3079 
3080   } else {
3081     VerifyRegionClosure blk(false, vo);
3082     heap_region_iterate(&blk);
3083     if (blk.failures()) {
3084       failures = true;
3085     }
3086   }
3087 
3088   if (G1StringDedup::is_enabled()) {
3089     log_debug(gc, verify)("StrDedup");
3090     G1StringDedup::verify();
3091   }
3092 
3093   if (failures) {
3094     log_info(gc, verify)("Heap after failed verification:");
3095     // It helps to have the per-region information in the output to
3096     // help us track down what went wrong. This is why we call
3097     // print_extended_on() instead of print_on().
3098     LogHandle(gc, verify) log;
3099     ResourceMark rm;
3100     print_extended_on(log.info_stream());
3101   }
3102   guarantee(!failures, "there should not have been any failures");













3103 }
3104 
3105 double G1CollectedHeap::verify(bool guard, const char* msg) {
3106   double verify_time_ms = 0.0;
3107 
3108   if (guard && total_collections() >= VerifyGCStartAt) {
3109     double verify_start = os::elapsedTime();
3110     HandleMark hm;  // Discard invalid handles created during verification
3111     prepare_for_verify();
3112     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3113     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3114   }
3115 
3116   return verify_time_ms;
3117 }
3118 
3119 void G1CollectedHeap::verify_before_gc() {
3120   double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
3121   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3122 }
3123 
3124 void G1CollectedHeap::verify_after_gc() {
3125   double verify_time_ms = verify(VerifyAfterGC, "After GC");
3126   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3127 }
3128 
3129 class PrintRegionClosure: public HeapRegionClosure {
3130   outputStream* _st;
3131 public:
3132   PrintRegionClosure(outputStream* st) : _st(st) {}
3133   bool doHeapRegion(HeapRegion* r) {
3134     r->print_on(_st);
3135     return false;
3136   }
3137 };
3138 
3139 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3140                                        const HeapRegion* hr,
3141                                        const VerifyOption vo) const {
3142   switch (vo) {
3143   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3144   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3145   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();


3215     G1StringDedup::print_worker_threads_on(st);
3216   }
3217 }
3218 
3219 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3220   workers()->threads_do(tc);
3221   tc->do_thread(_cmThread);
3222   _cg1r->threads_do(tc);
3223   if (G1StringDedup::is_enabled()) {
3224     G1StringDedup::threads_do(tc);
3225   }
3226 }
3227 
3228 void G1CollectedHeap::print_tracing_info() const {
3229   // We'll overload this to mean "trace GC pause statistics."
3230   if (TraceYoungGenTime || TraceOldGenTime) {
3231     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3232     // to that.
3233     g1_policy()->print_tracing_info();
3234   }

3235   g1_rem_set()->print_summary_info();


3236   concurrent_mark()->print_summary_info();

3237   g1_policy()->print_yg_surv_rate_info();
3238 }
3239 
3240 #ifndef PRODUCT
3241 // Helpful for debugging RSet issues.
3242 
3243 class PrintRSetsClosure : public HeapRegionClosure {
3244 private:
3245   const char* _msg;
3246   size_t _occupied_sum;
3247 
3248 public:
3249   bool doHeapRegion(HeapRegion* r) {
3250     HeapRegionRemSet* hrrs = r->rem_set();
3251     size_t occupied = hrrs->occupied();
3252     _occupied_sum += occupied;
3253 
3254     tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));

3255     if (occupied == 0) {
3256       tty->print_cr("  RSet is empty");
3257     } else {
3258       hrrs->print();
3259     }
3260     tty->print_cr("----------");
3261     return false;
3262   }
3263 
3264   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3265     tty->cr();
3266     tty->print_cr("========================================");
3267     tty->print_cr("%s", msg);
3268     tty->cr();
3269   }
3270 
3271   ~PrintRSetsClosure() {
3272     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3273     tty->print_cr("========================================");
3274     tty->cr();
3275   }
3276 };
3277 
3278 void G1CollectedHeap::print_cset_rsets() {
3279   PrintRSetsClosure cl("Printing CSet RSets");
3280   collection_set_iterate(&cl);
3281 }
3282 
3283 void G1CollectedHeap::print_all_rsets() {
3284   PrintRSetsClosure cl("Printing All RSets");;
3285   heap_region_iterate(&cl);
3286 }
3287 #endif // PRODUCT
3288 
3289 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3290   YoungList* young_list = heap()->young_list();
3291 
3292   size_t eden_used_bytes = young_list->eden_used_bytes();
3293   size_t survivor_used_bytes = young_list->survivor_used_bytes();
3294 


3312 
3313   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3314   gc_tracer->report_metaspace_summary(when, metaspace_summary);
3315 }
3316 
3317 
3318 G1CollectedHeap* G1CollectedHeap::heap() {
3319   CollectedHeap* heap = Universe::heap();
3320   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3321   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3322   return (G1CollectedHeap*)heap;
3323 }
3324 
3325 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3326   // always_do_update_barrier = false;
3327   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3328   // Fill TLAB's and such
3329   accumulate_statistics_all_tlabs();
3330   ensure_parsability(true);
3331 
3332   g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());



3333 }
3334 
3335 void G1CollectedHeap::gc_epilogue(bool full) {



3336   // we are at the end of the GC. Total collections has already been increased.
3337   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);


3338 
3339   // FIXME: what is this about?
3340   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3341   // is set.
3342 #if defined(COMPILER2) || INCLUDE_JVMCI
3343   assert(DerivedPointerTable::is_empty(), "derived pointer present");
3344 #endif
3345   // always_do_update_barrier = true;
3346 
3347   resize_all_tlabs();
3348   allocation_context_stats().update(full);
3349 
3350   // We have just completed a GC. Update the soft reference
3351   // policy with the new heap occupancy
3352   Universe::update_heap_info_at_gc();
3353 }
3354 
3355 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3356                                                uint gc_count_before,
3357                                                bool* succeeded,


3563     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3564 
3565     // Here's a good place to add any other checks we'd like to
3566     // perform on CSet regions.
3567     return false;
3568   }
3569 };
3570 #endif // ASSERT
3571 
3572 uint G1CollectedHeap::num_task_queues() const {
3573   return _task_queues->size();
3574 }
3575 
3576 #if TASKQUEUE_STATS
3577 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3578   st->print_raw_cr("GC Task Stats");
3579   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3580   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3581 }
3582 
3583 void G1CollectedHeap::print_taskqueue_stats() const {
3584   if (!develop_log_is_enabled(Trace, gc, task, stats)) {
3585     return;
3586   }
3587   LogHandle(gc, task, stats) log;
3588   ResourceMark rm;
3589   outputStream* st = log.trace_stream();
3590 
3591   print_taskqueue_stats_hdr(st);
3592 
3593   TaskQueueStats totals;
3594   const uint n = num_task_queues();
3595   for (uint i = 0; i < n; ++i) {
3596     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3597     totals += task_queue(i)->stats;
3598   }
3599   st->print_raw("tot "); totals.print(st); st->cr();
3600 
3601   DEBUG_ONLY(totals.verify());
3602 }
3603 
3604 void G1CollectedHeap::reset_taskqueue_stats() {
3605   const uint n = num_task_queues();
3606   for (uint i = 0; i < n; ++i) {
3607     task_queue(i)->stats.reset();
3608   }
3609 }
3610 #endif // TASKQUEUE_STATS
3611 
3612 void G1CollectedHeap::log_gc_footer(double pause_time_counter) {



















3613   if (evacuation_failed()) {
3614     log_info(gc)("To-space exhausted");
3615   }
3616 
3617   double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter);
3618   g1_policy()->print_phases(pause_time_sec);
3619 
3620   g1_policy()->print_detailed_heap_transition();








3621 }
3622 
3623 
3624 void G1CollectedHeap::wait_for_root_region_scanning() {
3625   double scan_wait_start = os::elapsedTime();
3626   // We have to wait until the CM threads finish scanning the
3627   // root regions as it's the only way to ensure that all the
3628   // objects on them have been correctly scanned before we start
3629   // moving them during the GC.
3630   bool waited = _cm->root_regions()->wait_until_scan_finished();
3631   double wait_time_ms = 0.0;
3632   if (waited) {
3633     double scan_wait_end = os::elapsedTime();
3634     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3635   }
3636   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3637 }
3638 
3639 bool
3640 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3641   assert_at_safepoint(true /* should_be_vm_thread */);
3642   guarantee(!is_gc_active(), "collection is not reentrant");
3643 
3644   if (GC_locker::check_active_before_gc()) {
3645     return false;
3646   }
3647 
3648   _gc_timer_stw->register_gc_start();
3649 
3650   GCIdMark gc_id_mark;
3651   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3652 
3653   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3654   ResourceMark rm;
3655 
3656   wait_for_root_region_scanning();
3657 

3658   print_heap_before_gc();
3659   trace_heap_before_gc(_gc_tracer_stw);
3660 
3661   verify_region_sets_optional();
3662   verify_dirty_young_regions();
3663 
3664   // This call will decide whether this pause is an initial-mark
3665   // pause. If it is, during_initial_mark_pause() will return true
3666   // for the duration of this pause.
3667   g1_policy()->decide_on_conc_mark_initiation();
3668 
3669   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3670   assert(!collector_state()->during_initial_mark_pause() ||
3671           collector_state()->gcs_are_young(), "sanity");
3672 
3673   // We also do not allow mixed GCs during marking.
3674   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3675 
3676   // Record whether this pause is an initial mark. When the current
3677   // thread has completed its logging output and it's safe to signal
3678   // the CM thread, the flag's value in the policy has been reset.
3679   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3680 
3681   // Inner scope for scope based logging, timers, and stats collection
3682   {
3683     EvacuationInfo evacuation_info;
3684 
3685     if (collector_state()->during_initial_mark_pause()) {
3686       // We are about to start a marking cycle, so we increment the
3687       // full collection counter.
3688       increment_old_marking_cycles_started();
3689       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3690     }
3691 
3692     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3693 
3694     GCTraceCPUTime tcpu;
3695 
3696     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3697                                                                   workers()->active_workers(),
3698                                                                   Threads::number_of_non_daemon_threads());
3699     workers()->set_active_workers(active_workers);
3700     FormatBuffer<> gc_string("Pause ");
3701     if (collector_state()->during_initial_mark_pause()) {
3702       gc_string.append("Initial Mark");
3703     } else if (collector_state()->gcs_are_young()) {
3704       gc_string.append("Young");
3705     } else {
3706       gc_string.append("Mixed");
3707     }
3708     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3709 
3710     double pause_start_sec = os::elapsedTime();
3711     double pause_start_counter = os::elapsed_counter();
3712     g1_policy()->note_gc_start(active_workers);

3713 
3714     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3715     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3716 
3717     // If the secondary_free_list is not empty, append it to the
3718     // free_list. No need to wait for the cleanup operation to finish;
3719     // the region allocation code will check the secondary_free_list
3720     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3721     // set, skip this step so that the region allocation code has to
3722     // get entries from the secondary_free_list.
3723     if (!G1StressConcRegionFreeing) {
3724       append_secondary_free_list_if_not_empty_with_lock();
3725     }
3726 
3727     assert(check_young_list_well_formed(), "young list should be well formed");
3728 
3729     // Don't dynamically change the number of GC threads this early.  A value of
3730     // 0 is used to indicate serial work.  When parallel work is done,
3731     // it will be set.
3732 


3750       // reference processing currently works in G1.
3751 
3752       // Enable discovery in the STW reference processor
3753       if (g1_policy()->should_process_references()) {
3754         ref_processor_stw()->enable_discovery();
3755       } else {
3756         ref_processor_stw()->disable_discovery();
3757       }
3758 
3759       {
3760         // We want to temporarily turn off discovery by the
3761         // CM ref processor, if necessary, and turn it back on
3762         // on again later if we do. Using a scoped
3763         // NoRefDiscovery object will do this.
3764         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3765 
3766         // Forget the current alloc region (we might even choose it to be part
3767         // of the collection set!).
3768         _allocator->release_mutator_alloc_region();
3769 





3770         // This timing is only used by the ergonomics to handle our pause target.
3771         // It is unclear why this should not include the full pause. We will
3772         // investigate this in CR 7178365.
3773         //
3774         // Preserving the old comment here if that helps the investigation:
3775         //
3776         // The elapsed time induced by the start time below deliberately elides
3777         // the possible verification above.
3778         double sample_start_time_sec = os::elapsedTime();
3779 
3780         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3781 
3782         if (collector_state()->during_initial_mark_pause()) {
3783           concurrent_mark()->checkpointRootsInitialPre();
3784         }
3785 
3786         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3787         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3788 
3789         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());


3873 
3874         if (collector_state()->during_initial_mark_pause()) {
3875           // We have to do this before we notify the CM threads that
3876           // they can start working to make sure that all the
3877           // appropriate initialization is done on the CM object.
3878           concurrent_mark()->checkpointRootsInitialPost();
3879           collector_state()->set_mark_in_progress(true);
3880           // Note that we don't actually trigger the CM thread at
3881           // this point. We do that later when we're sure that
3882           // the current thread has completed its logging output.
3883         }
3884 
3885         allocate_dummy_regions();
3886 
3887         _allocator->init_mutator_alloc_region();
3888 
3889         {
3890           size_t expand_bytes = g1_policy()->expansion_amount();
3891           if (expand_bytes > 0) {
3892             size_t bytes_before = capacity();
3893             // No need for an ergo logging here,
3894             // expansion_amount() does this when it returns a value > 0.
3895             double expand_ms;
3896             if (!expand(expand_bytes, &expand_ms)) {
3897               // We failed to expand the heap. Cannot do anything about it.
3898             }
3899             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3900           }
3901         }
3902 
3903         // We redo the verification but now wrt to the new CSet which
3904         // has just got initialized after the previous CSet was freed.
3905         _cm->verify_no_cset_oops();
3906         _cm->note_end_of_gc();
3907 
3908         // This timing is only used by the ergonomics to handle our pause target.
3909         // It is unclear why this should not include the full pause. We will
3910         // investigate this in CR 7178365.
3911         double sample_end_time_sec = os::elapsedTime();
3912         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3913         size_t total_cards_scanned = per_thread_states.total_cards_scanned();


3933         // stamp here we invalidate all the GC time stamps on all the
3934         // regions and saved_mark_word() will simply return top() for
3935         // all the regions. This is a nicer way of ensuring this rather
3936         // than iterating over the regions and fixing them. In fact, the
3937         // GC time stamp increment here also ensures that
3938         // saved_mark_word() will return top() between pauses, i.e.,
3939         // during concurrent refinement. So we don't need the
3940         // is_gc_active() check to decided which top to use when
3941         // scanning cards (see CR 7039627).
3942         increment_gc_time_stamp();
3943 
3944         verify_after_gc();
3945         check_bitmaps("GC End");
3946 
3947         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3948         ref_processor_stw()->verify_no_references_recorded();
3949 
3950         // CM reference discovery will be re-enabled if necessary.
3951       }
3952 






3953 #ifdef TRACESPINNING
3954       ParallelTaskTerminator::print_termination_counts();
3955 #endif
3956 
3957       gc_epilogue(false);
3958     }
3959 
3960     // Print the remainder of the GC log output.
3961     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3962 
3963     // It is not yet to safe to tell the concurrent mark to
3964     // start as we have some optional output below. We don't want the
3965     // output from the concurrent mark thread interfering with this
3966     // logging output either.
3967 
3968     _hrm.verify_optional();
3969     verify_region_sets_optional();
3970 
3971     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3972     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3973 
3974     print_heap_after_gc();
3975     trace_heap_after_gc(_gc_tracer_stw);
3976 
3977     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3978     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3979     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3980     // before any GC notifications are raised.
3981     g1mm()->update_sizes();
3982 
3983     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3984     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3985     _gc_timer_stw->register_gc_end();
3986     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3987   }
3988   // It should now be safe to tell the concurrent mark thread to start
3989   // without its logging output interfering with the logging output
3990   // that came from the pause.
3991 


4106 
4107       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4108 
4109       double term_sec = 0.0;
4110       size_t evac_term_attempts = 0;
4111       {
4112         double start = os::elapsedTime();
4113         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4114         evac.do_void();
4115 
4116         evac_term_attempts = evac.term_attempts();
4117         term_sec = evac.term_time();
4118         double elapsed_sec = os::elapsedTime() - start;
4119         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4120         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4121         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4122       }
4123 
4124       assert(pss->queue_is_empty(), "should be empty");
4125 
4126       if (log_is_enabled(Debug, gc, task, stats)) {
4127         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4128         size_t lab_waste;
4129         size_t lab_undo_waste;
4130         pss->waste(lab_waste, lab_undo_waste);
4131         _g1h->print_termination_stats(worker_id,

4132                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
4133                                       strong_roots_sec * 1000.0,                  /* strong roots time */
4134                                       term_sec * 1000.0,                          /* evac term time */
4135                                       evac_term_attempts,                         /* evac term attempts */
4136                                       lab_waste,                                  /* alloc buffer waste */
4137                                       lab_undo_waste                              /* undo waste */
4138                                       );
4139       }
4140 
4141       // Close the inner scope so that the ResourceMark and HandleMark
4142       // destructors are executed here and are included as part of the
4143       // "GC Worker Time".
4144     }
4145     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4146   }
4147 };
4148 
4149 void G1CollectedHeap::print_termination_stats_hdr() {
4150   log_debug(gc, task, stats)("GC Termination Stats");
4151   log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
4152   log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
4153   log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4154 }
4155 
4156 void G1CollectedHeap::print_termination_stats(uint worker_id,

4157                                               double elapsed_ms,
4158                                               double strong_roots_ms,
4159                                               double term_ms,
4160                                               size_t term_attempts,
4161                                               size_t alloc_buffer_waste,
4162                                               size_t undo_waste) const {
4163   log_debug(gc, task, stats)
4164               ("%3d %9.2f %9.2f %6.2f "
4165                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4166                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4167                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4168                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4169                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4170                alloc_buffer_waste * HeapWordSize / K,
4171                undo_waste * HeapWordSize / K);
4172 }
4173 
4174 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4175 private:
4176   BoolObjectClosure* _is_alive;
4177   int _initial_string_table_size;
4178   int _initial_symbol_table_size;
4179 
4180   bool  _process_strings;
4181   int _strings_processed;
4182   int _strings_removed;
4183 
4184   bool  _process_symbols;


4193     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4194 
4195     _initial_string_table_size = StringTable::the_table()->table_size();
4196     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4197     if (process_strings) {
4198       StringTable::clear_parallel_claimed_index();
4199     }
4200     if (process_symbols) {
4201       SymbolTable::clear_parallel_claimed_index();
4202     }
4203   }
4204 
4205   ~G1StringSymbolTableUnlinkTask() {
4206     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4207               "claim value %d after unlink less than initial string table size %d",
4208               StringTable::parallel_claimed_index(), _initial_string_table_size);
4209     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4210               "claim value %d after unlink less than initial symbol table size %d",
4211               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4212 
4213     log_debug(gc, stringdedup)("Cleaned string and symbol table, "

4214                                "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4215                                "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4216                                strings_processed(), strings_removed(),
4217                                symbols_processed(), symbols_removed());
4218   }

4219 
4220   void work(uint worker_id) {
4221     int strings_processed = 0;
4222     int strings_removed = 0;
4223     int symbols_processed = 0;
4224     int symbols_removed = 0;
4225     if (_process_strings) {
4226       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4227       Atomic::add(strings_processed, &_strings_processed);
4228       Atomic::add(strings_removed, &_strings_removed);
4229     }
4230     if (_process_symbols) {
4231       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4232       Atomic::add(symbols_processed, &_symbols_processed);
4233       Atomic::add(symbols_removed, &_symbols_removed);
4234     }
4235   }
4236 
4237   size_t strings_processed() const { return (size_t)_strings_processed; }
4238   size_t strings_removed()   const { return (size_t)_strings_removed; }


5037   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5038 }
5039 
5040 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5041   // Should G1EvacuationFailureALot be in effect for this GC?
5042   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5043 
5044   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5045   double start_par_time_sec = os::elapsedTime();
5046   double end_par_time_sec;
5047 
5048   {
5049     const uint n_workers = workers()->active_workers();
5050     G1RootProcessor root_processor(this, n_workers);
5051     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5052     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5053     if (collector_state()->during_initial_mark_pause()) {
5054       ClassLoaderDataGraph::clear_claimed_marks();
5055     }
5056 
5057     print_termination_stats_hdr();



5058 
5059     workers()->run_task(&g1_par_task);
5060     end_par_time_sec = os::elapsedTime();
5061 
5062     // Closing the inner scope will execute the destructor
5063     // for the G1RootProcessor object. We record the current
5064     // elapsed time before closing the scope so that time
5065     // taken for the destructor is NOT included in the
5066     // reported parallel time.
5067   }
5068 
5069   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5070 
5071   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5072   phase_times->record_par_time(par_time_ms);
5073 
5074   double code_root_fixup_time_ms =
5075         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5076   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5077 }


5276   }
5277 }
5278 
5279 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5280   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5281   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5282     verify_dirty_region(hr);
5283   }
5284 }
5285 
5286 void G1CollectedHeap::verify_dirty_young_regions() {
5287   verify_dirty_young_list(_young_list->first_region());
5288 }
5289 
5290 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5291                                                HeapWord* tams, HeapWord* end) {
5292   guarantee(tams <= end,
5293             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5294   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5295   if (result < end) {
5296     log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
5297     log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));



5298     return false;
5299   }
5300   return true;
5301 }
5302 
5303 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5304   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5305   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5306 
5307   HeapWord* bottom = hr->bottom();
5308   HeapWord* ptams  = hr->prev_top_at_mark_start();
5309   HeapWord* ntams  = hr->next_top_at_mark_start();
5310   HeapWord* end    = hr->end();
5311 
5312   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5313 
5314   bool res_n = true;
5315   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5316   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5317   // if we happen to be in that state.
5318   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5319     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5320   }
5321   if (!res_p || !res_n) {
5322     log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
5323     log_info(gc, verify)("#### Caller: %s", caller);

5324     return false;
5325   }
5326   return true;
5327 }
5328 
5329 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5330   if (!G1VerifyBitmaps) return;
5331 
5332   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5333 }
5334 
5335 class G1VerifyBitmapClosure : public HeapRegionClosure {
5336 private:
5337   const char* _caller;
5338   G1CollectedHeap* _g1h;
5339   bool _failures;
5340 
5341 public:
5342   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5343     _caller(caller), _g1h(g1h), _failures(false) { }


5355 
5356 void G1CollectedHeap::check_bitmaps(const char* caller) {
5357   if (!G1VerifyBitmaps) return;
5358 
5359   G1VerifyBitmapClosure cl(caller, this);
5360   heap_region_iterate(&cl);
5361   guarantee(!cl.failures(), "bitmap verification");
5362 }
5363 
5364 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5365  private:
5366   bool _failures;
5367  public:
5368   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5369 
5370   virtual bool doHeapRegion(HeapRegion* hr) {
5371     uint i = hr->hrm_index();
5372     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5373     if (hr->is_humongous()) {
5374       if (hr->in_collection_set()) {
5375         log_info(gc, verify)("\n## humongous region %u in CSet", i);
5376         _failures = true;
5377         return true;
5378       }
5379       if (cset_state.is_in_cset()) {
5380         log_info(gc, verify)("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5381         _failures = true;
5382         return true;
5383       }
5384       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5385         log_info(gc, verify)("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5386         _failures = true;
5387         return true;
5388       }
5389     } else {
5390       if (cset_state.is_humongous()) {
5391         log_info(gc, verify)("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5392         _failures = true;
5393         return true;
5394       }
5395       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5396         log_info(gc, verify)("\n## in CSet %d / cset state %d inconsistency for region %u",
5397                              hr->in_collection_set(), cset_state.value(), i);
5398         _failures = true;
5399         return true;
5400       }
5401       if (cset_state.is_in_cset()) {
5402         if (hr->is_young() != (cset_state.is_young())) {
5403           log_info(gc, verify)("\n## is_young %d / cset state %d inconsistency for region %u",
5404                                hr->is_young(), cset_state.value(), i);
5405           _failures = true;
5406           return true;
5407         }
5408         if (hr->is_old() != (cset_state.is_old())) {
5409           log_info(gc, verify)("\n## is_old %d / cset state %d inconsistency for region %u",
5410                                hr->is_old(), cset_state.value(), i);
5411           _failures = true;
5412           return true;
5413         }
5414       }
5415     }
5416     return false;
5417   }
5418 
5419   bool failures() const { return _failures; }
5420 };
5421 
5422 bool G1CollectedHeap::check_cset_fast_test() {
5423   G1CheckCSetFastTableClosure cl;
5424   _hrm.iterate(&cl);
5425   return !cl.failures();
5426 }
5427 #endif // PRODUCT
5428 
5429 void G1CollectedHeap::cleanUpCardTable() {


5607     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5608     // until the end of a concurrent mark.
5609     //
5610     // It is not required to check whether the object has been found dead by marking
5611     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5612     // all objects allocated during that time are considered live.
5613     // SATB marking is even more conservative than the remembered set.
5614     // So if at this point in the collection there is no remembered set entry,
5615     // nobody has a reference to it.
5616     // At the start of collection we flush all refinement logs, and remembered sets
5617     // are completely up-to-date wrt to references to the humongous object.
5618     //
5619     // Other implementation considerations:
5620     // - never consider object arrays at this time because they would pose
5621     // considerable effort for cleaning up the the remembered sets. This is
5622     // required because stale remembered sets might reference locations that
5623     // are currently allocated into.
5624     uint region_idx = r->hrm_index();
5625     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5626         !r->rem_set()->is_empty()) {
5627       log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",


5628                                region_idx,
5629                                (size_t)obj->size() * HeapWordSize,
5630                                p2i(r->bottom()),
5631                                r->rem_set()->occupied(),
5632                                r->rem_set()->strong_code_roots_list_length(),
5633                                next_bitmap->isMarked(r->bottom()),
5634                                g1h->is_humongous_reclaim_candidate(region_idx),
5635                                obj->is_typeArray()
5636                               );


5637       return false;
5638     }
5639 
5640     guarantee(obj->is_typeArray(),
5641               "Only eagerly reclaiming type arrays is supported, but the object "
5642               PTR_FORMAT " is not.", p2i(r->bottom()));
5643 
5644     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",

5645                              region_idx,
5646                              (size_t)obj->size() * HeapWordSize,
5647                              p2i(r->bottom()),
5648                              r->rem_set()->occupied(),
5649                              r->rem_set()->strong_code_roots_list_length(),
5650                              next_bitmap->isMarked(r->bottom()),
5651                              g1h->is_humongous_reclaim_candidate(region_idx),
5652                              obj->is_typeArray()
5653                             );
5654 
5655     // Need to clear mark bit of the humongous object if already set.
5656     if (next_bitmap->isMarked(r->bottom())) {
5657       next_bitmap->clear(r->bottom());
5658     }
5659     do {
5660       HeapRegion* next = g1h->next_region_in_humongous(r);
5661       _freed_bytes += r->used();
5662       r->set_containing_set(NULL);
5663       _humongous_regions_removed.increment(1u, r->capacity());
5664       g1h->free_humongous_region(r, _free_region_list, false);
5665       r = next;
5666     } while (r != NULL);
5667 
5668     return false;
5669   }
5670 
5671   HeapRegionSetCount& humongous_free_count() {
5672     return _humongous_regions_removed;
5673   }
5674 
5675   size_t bytes_freed() const {
5676     return _freed_bytes;
5677   }
5678 
5679   size_t humongous_reclaimed() const {
5680     return _humongous_regions_removed.length();
5681   }
5682 };
5683 
5684 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5685   assert_at_safepoint(true);
5686 
5687   if (!G1EagerReclaimHumongousObjects ||
5688       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
5689     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5690     return;
5691   }
5692 
5693   double start_time = os::elapsedTime();
5694 
5695   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5696 
5697   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5698   heap_region_iterate(&cl);
5699 
5700   HeapRegionSetCount empty_set;
5701   remove_from_old_sets(empty_set, cl.humongous_free_count());
5702 
5703   G1HRPrinter* hrp = hr_printer();
5704   if (hrp->is_active()) {
5705     FreeRegionListIterator iter(&local_cleanup_list);
5706     while (iter.more_available()) {
5707       HeapRegion* hr = iter.get_next();
5708       hrp->cleanup(hr);


5721 // the current incremental collection set in preparation of a
5722 // full collection. After the full GC we will start to build up
5723 // the incremental collection set again.
5724 // This is only called when we're doing a full collection
5725 // and is immediately followed by the tearing down of the young list.
5726 
5727 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5728   HeapRegion* cur = cs_head;
5729 
5730   while (cur != NULL) {
5731     HeapRegion* next = cur->next_in_collection_set();
5732     assert(cur->in_collection_set(), "bad CS");
5733     cur->set_next_in_collection_set(NULL);
5734     clear_in_cset(cur);
5735     cur->set_young_index_in_cset(-1);
5736     cur = next;
5737   }
5738 }
5739 
5740 void G1CollectedHeap::set_free_regions_coming() {
5741   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");



5742 
5743   assert(!free_regions_coming(), "pre-condition");
5744   _free_regions_coming = true;
5745 }
5746 
5747 void G1CollectedHeap::reset_free_regions_coming() {
5748   assert(free_regions_coming(), "pre-condition");
5749 
5750   {
5751     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5752     _free_regions_coming = false;
5753     SecondaryFreeList_lock->notify_all();
5754   }
5755 
5756   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");



5757 }
5758 
5759 void G1CollectedHeap::wait_while_free_regions_coming() {
5760   // Most of the time we won't have to wait, so let's do a quick test
5761   // first before we take the lock.
5762   if (!free_regions_coming()) {
5763     return;
5764   }
5765 
5766   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");



5767 
5768   {
5769     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5770     while (free_regions_coming()) {
5771       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5772     }
5773   }
5774 
5775   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");



5776 }
5777 
5778 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5779   return _allocator->is_retained_old_region(hr);
5780 }
5781 
5782 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5783   _young_list->push_region(hr);
5784 }
5785 
5786 class NoYoungRegionsClosure: public HeapRegionClosure {
5787 private:
5788   bool _success;
5789 public:
5790   NoYoungRegionsClosure() : _success(true) { }
5791   bool doHeapRegion(HeapRegion* r) {
5792     if (r->is_young()) {
5793       log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5794                            p2i(r->bottom()), p2i(r->end()));
5795       _success = false;
5796     }
5797     return false;
5798   }
5799   bool success() { return _success; }
5800 };
5801 
5802 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5803   bool ret = _young_list->check_list_empty(check_sample);
5804 
5805   if (check_heap) {
5806     NoYoungRegionsClosure closure;
5807     heap_region_iterate(&closure);
5808     ret = ret && closure.success();
5809   }
5810 
5811   return ret;
5812 }
5813 


6024 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6025                                              size_t allocated_bytes,
6026                                              InCSetState dest) {
6027   bool during_im = collector_state()->during_initial_mark_pause();
6028   alloc_region->note_end_of_copying(during_im);
6029   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6030   if (dest.is_young()) {
6031     young_list()->add_survivor_region(alloc_region);
6032   } else {
6033     _old_set.add(alloc_region);
6034   }
6035   _hr_printer.retire(alloc_region);
6036 }
6037 
6038 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6039   bool expanded = false;
6040   uint index = _hrm.find_highest_free(&expanded);
6041 
6042   if (index != G1_NO_HRM_INDEX) {
6043     if (expanded) {
6044       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",



6045                                 HeapRegion::GrainWords * HeapWordSize);
6046     }
6047     _hrm.allocate_free_regions_starting_at(index, 1);
6048     return region_at(index);
6049   }
6050   return NULL;
6051 }
6052 
6053 // Heap region set verification
6054 
6055 class VerifyRegionListsClosure : public HeapRegionClosure {
6056 private:
6057   HeapRegionSet*   _old_set;
6058   HeapRegionSet*   _humongous_set;
6059   HeapRegionManager*   _hrm;
6060 
6061 public:
6062   HeapRegionSetCount _old_count;
6063   HeapRegionSetCount _humongous_count;
6064   HeapRegionSetCount _free_count;


< prev index next >