< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc/g1/bufferingOopClosure.hpp"
  31 #include "gc/g1/concurrentG1Refine.hpp"
  32 #include "gc/g1/concurrentG1RefineThread.hpp"
  33 #include "gc/g1/concurrentMarkThread.inline.hpp"
  34 #include "gc/g1/g1Allocator.inline.hpp"
  35 #include "gc/g1/g1CollectedHeap.inline.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ErgoVerbose.hpp"
  39 #include "gc/g1/g1EvacFailure.hpp"
  40 #include "gc/g1/g1GCPhaseTimes.hpp"
  41 #include "gc/g1/g1Log.hpp"
  42 #include "gc/g1/g1MarkSweep.hpp"
  43 #include "gc/g1/g1OopClosures.inline.hpp"
  44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  46 #include "gc/g1/g1RemSet.inline.hpp"
  47 #include "gc/g1/g1RootClosures.hpp"
  48 #include "gc/g1/g1RootProcessor.hpp"
  49 #include "gc/g1/g1StringDedup.hpp"
  50 #include "gc/g1/g1YCTypes.hpp"
  51 #include "gc/g1/heapRegion.inline.hpp"
  52 #include "gc/g1/heapRegionRemSet.hpp"
  53 #include "gc/g1/heapRegionSet.inline.hpp"
  54 #include "gc/g1/suspendibleThreadSet.hpp"
  55 #include "gc/g1/vm_operations_g1.hpp"
  56 #include "gc/shared/gcHeapSummary.hpp"
  57 #include "gc/shared/gcId.hpp"
  58 #include "gc/shared/gcLocker.inline.hpp"
  59 #include "gc/shared/gcTimer.hpp"
  60 #include "gc/shared/gcTrace.hpp"
  61 #include "gc/shared/gcTraceTime.hpp"
  62 #include "gc/shared/generationSpec.hpp"
  63 #include "gc/shared/isGCActiveMark.hpp"
  64 #include "gc/shared/referenceProcessor.hpp"
  65 #include "gc/shared/taskqueue.inline.hpp"

  66 #include "memory/allocation.hpp"
  67 #include "memory/iterator.hpp"
  68 #include "oops/oop.inline.hpp"
  69 #include "runtime/atomic.inline.hpp"
  70 #include "runtime/init.hpp"
  71 #include "runtime/orderAccess.inline.hpp"
  72 #include "runtime/vmThread.hpp"
  73 #include "utilities/globalDefinitions.hpp"
  74 #include "utilities/stack.inline.hpp"
  75 
  76 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  77 
  78 // INVARIANTS/NOTES
  79 //
  80 // All allocation activity covered by the G1CollectedHeap interface is
  81 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  82 // and allocate_new_tlab, which are the "entry" points to the
  83 // allocation code from the rest of the JVM.  (Note that this does not
  84 // apply to TLAB allocation, which is not part of this interface: it
  85 // is done by clients of this interface.)


 187   } while (hr != head);
 188   assert(hr != NULL, "invariant");
 189   hr->set_next_dirty_cards_region(NULL);
 190   return hr;
 191 }
 192 
 193 // Returns true if the reference points to an object that
 194 // can move in an incremental collection.
 195 bool G1CollectedHeap::is_scavengable(const void* p) {
 196   HeapRegion* hr = heap_region_containing(p);
 197   return !hr->is_pinned();
 198 }
 199 
 200 // Private methods.
 201 
 202 HeapRegion*
 203 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 204   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 205   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 206     if (!_secondary_free_list.is_empty()) {
 207       if (G1ConcRegionFreeingVerbose) {
 208         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 209                                "secondary_free_list has %u entries",
 210                                _secondary_free_list.length());
 211       }
 212       // It looks as if there are free regions available on the
 213       // secondary_free_list. Let's move them to the free_list and try
 214       // again to allocate from it.
 215       append_secondary_free_list();
 216 
 217       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 218              "empty we should have moved at least one entry to the free_list");
 219       HeapRegion* res = _hrm.allocate_free_region(is_old);
 220       if (G1ConcRegionFreeingVerbose) {
 221         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 222                                "allocated " HR_FORMAT " from secondary_free_list",
 223                                HR_FORMAT_PARAMS(res));
 224       }
 225       return res;
 226     }
 227 
 228     // Wait here until we get notified either when (a) there are no
 229     // more free regions coming or (b) some regions have been moved on
 230     // the secondary_free_list.
 231     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 232   }
 233 
 234   if (G1ConcRegionFreeingVerbose) {
 235     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 236                            "could not allocate from secondary_free_list");
 237   }
 238   return NULL;
 239 }
 240 
 241 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 242   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 243          "the only time we use this to allocate a humongous region is "
 244          "when we are allocating a single humongous region");
 245 
 246   HeapRegion* res;
 247   if (G1StressConcRegionFreeing) {
 248     if (!_secondary_free_list.is_empty()) {
 249       if (G1ConcRegionFreeingVerbose) {
 250         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 251                                "forced to look at the secondary_free_list");
 252       }
 253       res = new_region_try_secondary_free_list(is_old);
 254       if (res != NULL) {
 255         return res;
 256       }
 257     }
 258   }
 259 
 260   res = _hrm.allocate_free_region(is_old);
 261 
 262   if (res == NULL) {
 263     if (G1ConcRegionFreeingVerbose) {
 264       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 265                              "res == NULL, trying the secondary_free_list");
 266     }
 267     res = new_region_try_secondary_free_list(is_old);
 268   }
 269   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 270     // Currently, only attempts to allocate GC alloc regions set
 271     // do_expand to true. So, we should only reach here during a
 272     // safepoint. If this assumption changes we might have to
 273     // reconsider the use of _expand_heap_after_alloc_failure.
 274     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 275 
 276     ergo_verbose1(ErgoHeapSizing,
 277                   "attempt heap expansion",
 278                   ergo_format_reason("region allocation request failed")
 279                   ergo_format_byte("allocation request"),
 280                   word_size * HeapWordSize);

 281     if (expand(word_size * HeapWordSize)) {
 282       // Given that expand() succeeded in expanding the heap, and we
 283       // always expand the heap by an amount aligned to the heap
 284       // region size, the free list should in theory not be empty.
 285       // In either case allocate_free_region() will check for NULL.
 286       res = _hrm.allocate_free_region(is_old);
 287     } else {
 288       _expand_heap_after_alloc_failure = false;
 289     }
 290   }
 291   return res;
 292 }
 293 
 294 HeapWord*
 295 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 296                                                            uint num_regions,
 297                                                            size_t word_size,
 298                                                            AllocationContext_t context) {
 299   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 300   assert(is_humongous(word_size), "word_size should be humongous");


 457     // potentially waits for regions from the secondary free list.
 458     wait_while_free_regions_coming();
 459     append_secondary_free_list_if_not_empty_with_lock();
 460 
 461     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 462     // are lucky enough to find some.
 463     first = _hrm.find_contiguous_only_empty(obj_regions);
 464     if (first != G1_NO_HRM_INDEX) {
 465       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 466     }
 467   }
 468 
 469   if (first == G1_NO_HRM_INDEX) {
 470     // Policy: We could not find enough regions for the humongous object in the
 471     // free list. Look through the heap to find a mix of free and uncommitted regions.
 472     // If so, try expansion.
 473     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 474     if (first != G1_NO_HRM_INDEX) {
 475       // We found something. Make sure these regions are committed, i.e. expand
 476       // the heap. Alternatively we could do a defragmentation GC.
 477       ergo_verbose1(ErgoHeapSizing,
 478                     "attempt heap expansion",
 479                     ergo_format_reason("humongous allocation request failed")
 480                     ergo_format_byte("allocation request"),
 481                     word_size * HeapWordSize);
 482 

 483       _hrm.expand_at(first, obj_regions);
 484       g1_policy()->record_new_heap_size(num_regions());
 485 
 486 #ifdef ASSERT
 487       for (uint i = first; i < first + obj_regions; ++i) {
 488         HeapRegion* hr = region_at(i);
 489         assert(hr->is_free(), "sanity");
 490         assert(hr->is_empty(), "sanity");
 491         assert(is_on_master_free_list(hr), "sanity");
 492       }
 493 #endif
 494       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 495     } else {
 496       // Policy: Potentially trigger a defragmentation GC.
 497     }
 498   }
 499 
 500   HeapWord* result = NULL;
 501   if (first != G1_NO_HRM_INDEX) {
 502     result = humongous_obj_allocate_initialize_regions(first, obj_regions,


 780     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 781     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 782       start_address = start_region->end();
 783       if (start_address > last_address) {
 784         increase_used(word_size * HeapWordSize);
 785         start_region->set_top(last_address + 1);
 786         continue;
 787       }
 788       start_region->set_top(start_address);
 789       curr_range = MemRegion(start_address, last_address + 1);
 790       start_region = _hrm.addr_to_region(start_address);
 791     }
 792 
 793     // Perform the actual region allocation, exiting if it fails.
 794     // Then note how much new space we have allocated.
 795     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 796       return false;
 797     }
 798     increase_used(word_size * HeapWordSize);
 799     if (commits != 0) {
 800       ergo_verbose1(ErgoHeapSizing,
 801                     "attempt heap expansion",
 802                     ergo_format_reason("allocate archive regions")
 803                     ergo_format_byte("total size"),
 804                     HeapRegion::GrainWords * HeapWordSize * commits);

 805     }
 806 
 807     // Mark each G1 region touched by the range as archive, add it to the old set,
 808     // and set the allocation context and top.
 809     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 810     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 811     prev_last_region = last_region;
 812 
 813     while (curr_region != NULL) {
 814       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 815              "Region already in use (index %u)", curr_region->hrm_index());
 816       _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
 817       curr_region->set_allocation_context(AllocationContext::system());
 818       curr_region->set_archive();
 819       _old_set.add(curr_region);
 820       if (curr_region != last_region) {
 821         curr_region->set_top(curr_region->end());
 822         curr_region = _hrm.next_region_in_heap(curr_region);
 823       } else {
 824         curr_region->set_top(last_address + 1);


 965       guarantee(curr_region->is_archive(),
 966                 "Expected archive region at index %u", curr_region->hrm_index());
 967       uint curr_index = curr_region->hrm_index();
 968       _old_set.remove(curr_region);
 969       curr_region->set_free();
 970       curr_region->set_top(curr_region->bottom());
 971       if (curr_region != last_region) {
 972         curr_region = _hrm.next_region_in_heap(curr_region);
 973       } else {
 974         curr_region = NULL;
 975       }
 976       _hrm.shrink_at(curr_index, 1);
 977       uncommitted_regions++;
 978     }
 979 
 980     // Notify mark-sweep that this is no longer an archive range.
 981     G1MarkSweep::set_range_archive(ranges[i], false);
 982   }
 983 
 984   if (uncommitted_regions != 0) {
 985     ergo_verbose1(ErgoHeapSizing,
 986                   "attempt heap shrinking",
 987                   ergo_format_reason("uncommitted archive regions")
 988                   ergo_format_byte("total size"),
 989                   HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 990   }
 991   decrease_used(size_used);
 992 }
 993 
 994 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 995                                                         uint* gc_count_before_ret,
 996                                                         uint* gclocker_retry_count_ret) {
 997   // The structure of this method has a lot of similarities to
 998   // attempt_allocation_slow(). The reason these two were not merged
 999   // into a single one is that such a method would require several "if
1000   // allocation is not humongous do this, otherwise do that"
1001   // conditional paths which would obscure its flow. In fact, an early
1002   // version of this code did use a unified method which was harder to
1003   // follow and, as a result, it had subtle bugs that were hard to
1004   // track down. So keeping these two methods separate allows each to
1005   // be more readable. It will be good to keep these two in sync as
1006   // much as possible.
1007 
1008   assert_heap_not_locked_and_not_at_safepoint();


1211       } else {
1212         _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1213       }
1214     } else if (hr->is_continues_humongous()) {
1215       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1216     } else if (hr->is_archive()) {
1217       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1218     } else if (hr->is_old()) {
1219       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1220     } else {
1221       ShouldNotReachHere();
1222     }
1223     return false;
1224   }
1225 
1226   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1227     : _hr_printer(hr_printer) { }
1228 };
1229 
1230 void G1CollectedHeap::print_hrm_post_compaction() {

1231   PostCompactionPrinterClosure cl(hr_printer());
1232   heap_region_iterate(&cl);


1233 }
1234 
1235 bool G1CollectedHeap::do_collection(bool explicit_gc,
1236                                     bool clear_all_soft_refs,
1237                                     size_t word_size) {
1238   assert_at_safepoint(true /* should_be_vm_thread */);
1239 
1240   if (GC_locker::check_active_before_gc()) {
1241     return false;
1242   }
1243 
1244   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1245   gc_timer->register_gc_start();
1246 
1247   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1248   GCIdMark gc_id_mark;
1249   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1250 
1251   SvcGCMarker sgcm(SvcGCMarker::FULL);
1252   ResourceMark rm;
1253 
1254   G1Log::update_level();
1255   print_heap_before_gc();
1256   trace_heap_before_gc(gc_tracer);
1257 
1258   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1259 
1260   verify_region_sets_optional();
1261 
1262   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1263                            collector_policy()->should_clear_all_soft_refs();
1264 
1265   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1266 
1267   {
1268     IsGCActiveMark x;
1269 
1270     // Timing
1271     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1272     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1273 
1274     {
1275       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1276       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1277       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1278 
1279       g1_policy()->record_full_collection_start();
1280 
1281       // Note: When we have a more flexible GC logging framework that
1282       // allows us to add optional attributes to a GC log record we
1283       // could consider timing and reporting how long we wait in the
1284       // following two methods.
1285       wait_while_free_regions_coming();
1286       // If we start the compaction before the CM threads finish
1287       // scanning the root regions we might trip them over as we'll
1288       // be moving objects / updating references. So let's wait until
1289       // they are done. By telling them to abort, they should complete
1290       // early.
1291       _cm->root_regions()->abort();
1292       _cm->root_regions()->wait_until_scan_finished();
1293       append_secondary_free_list_if_not_empty_with_lock();
1294 
1295       gc_prologue(true);


1306 #if defined(COMPILER2) || INCLUDE_JVMCI
1307       DerivedPointerTable::clear();
1308 #endif
1309 
1310       // Disable discovery and empty the discovered lists
1311       // for the CM ref processor.
1312       ref_processor_cm()->disable_discovery();
1313       ref_processor_cm()->abandon_partial_discovery();
1314       ref_processor_cm()->verify_no_references_recorded();
1315 
1316       // Abandon current iterations of concurrent marking and concurrent
1317       // refinement, if any are in progress. We have to do this before
1318       // wait_until_scan_finished() below.
1319       concurrent_mark()->abort();
1320 
1321       // Make sure we'll choose a new allocation region afterwards.
1322       _allocator->release_mutator_alloc_region();
1323       _allocator->abandon_gc_alloc_regions();
1324       g1_rem_set()->cleanupHRRS();
1325 
1326       // We should call this after we retire any currently active alloc
1327       // regions so that all the ALLOC / RETIRE events are generated
1328       // before the start GC event.
1329       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1330 
1331       // We may have added regions to the current incremental collection
1332       // set between the last GC or pause and now. We need to clear the
1333       // incremental collection set and then start rebuilding it afresh
1334       // after this full GC.
1335       abandon_collection_set(g1_policy()->inc_cset_head());
1336       g1_policy()->clear_incremental_cset();
1337       g1_policy()->stop_incremental_cset_building();
1338 
1339       tear_down_region_sets(false /* free_list_only */);
1340       collector_state()->set_gcs_are_young(true);
1341 
1342       // See the comments in g1CollectedHeap.hpp and
1343       // G1CollectedHeap::ref_processing_init() about
1344       // how reference processing currently works in G1.
1345 
1346       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1347       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1348 
1349       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1350       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);


1378       ClassLoaderDataGraph::purge();
1379       MetaspaceAux::verify_metrics();
1380 
1381       // Note: since we've just done a full GC, concurrent
1382       // marking is no longer active. Therefore we need not
1383       // re-enable reference discovery for the CM ref processor.
1384       // That will be done at the start of the next marking cycle.
1385       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1386       ref_processor_cm()->verify_no_references_recorded();
1387 
1388       reset_gc_time_stamp();
1389       // Since everything potentially moved, we will clear all remembered
1390       // sets, and clear all cards.  Later we will rebuild remembered
1391       // sets. We will also reset the GC time stamps of the regions.
1392       clear_rsets_post_compaction();
1393       check_gc_time_stamps();
1394 
1395       // Resize the heap if necessary.
1396       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1397 
1398       if (_hr_printer.is_active()) {
1399         // We should do this after we potentially resize the heap so
1400         // that all the COMMIT / UNCOMMIT events are generated before
1401         // the end GC event.
1402 
1403         print_hrm_post_compaction();
1404         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1405       }
1406 
1407       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1408       if (hot_card_cache->use_cache()) {
1409         hot_card_cache->reset_card_counts();
1410         hot_card_cache->reset_hot_cache();
1411       }
1412 
1413       // Rebuild remembered sets of all regions.
1414       uint n_workers =
1415         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1416                                                 workers()->active_workers(),
1417                                                 Threads::number_of_non_daemon_threads());
1418       workers()->set_active_workers(n_workers);
1419 
1420       ParRebuildRSTask rebuild_rs_task(this);
1421       workers()->run_task(&rebuild_rs_task);
1422 
1423       // Rebuild the strong code root lists for each region
1424       rebuild_strong_code_roots();
1425 


1454       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1455       // objects marked during a full GC against the previous bitmap.
1456       // But we need to clear it before calling check_bitmaps below since
1457       // the full GC has compacted objects and updated TAMS but not updated
1458       // the prev bitmap.
1459       if (G1VerifyBitmaps) {
1460         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1461       }
1462       check_bitmaps("Full GC End");
1463 
1464       // Start a new incremental collection set for the next pause
1465       assert(g1_policy()->collection_set() == NULL, "must be");
1466       g1_policy()->start_incremental_cset_building();
1467 
1468       clear_cset_fast_test();
1469 
1470       _allocator->init_mutator_alloc_region();
1471 
1472       g1_policy()->record_full_collection_end();
1473 
1474       if (G1Log::fine()) {
1475         g1_policy()->print_heap_transition();
1476       }
1477 
1478       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1479       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1480       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1481       // before any GC notifications are raised.
1482       g1mm()->update_sizes();
1483 
1484       gc_epilogue(true);
1485     }
1486 
1487     if (G1Log::finer()) {
1488       g1_policy()->print_detailed_heap_transition(true /* full */);
1489     }
1490 
1491     print_heap_after_gc();
1492     trace_heap_after_gc(gc_tracer);
1493 
1494     post_full_gc_dump(gc_timer);
1495 
1496     gc_timer->register_gc_end();
1497     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1498   }
1499 
1500   return true;
1501 }
1502 
1503 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1504   // do_collection() will return whether it succeeded in performing
1505   // the GC. Currently, there is no facility on the
1506   // do_full_collection() API to notify the caller than the collection
1507   // did not succeed (e.g., because it was locked out by the GC
1508   // locker). So, right now, we'll ignore the return value.
1509   bool dummy = do_collection(true,                /* explicit_gc */


1554 
1555   // This assert only makes sense here, before we adjust them
1556   // with respect to the min and max heap size.
1557   assert(minimum_desired_capacity <= maximum_desired_capacity,
1558          "minimum_desired_capacity = " SIZE_FORMAT ", "
1559          "maximum_desired_capacity = " SIZE_FORMAT,
1560          minimum_desired_capacity, maximum_desired_capacity);
1561 
1562   // Should not be greater than the heap max size. No need to adjust
1563   // it with respect to the heap min size as it's a lower bound (i.e.,
1564   // we'll try to make the capacity larger than it, not smaller).
1565   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1566   // Should not be less than the heap min size. No need to adjust it
1567   // with respect to the heap max size as it's an upper bound (i.e.,
1568   // we'll try to make the capacity smaller than it, not greater).
1569   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1570 
1571   if (capacity_after_gc < minimum_desired_capacity) {
1572     // Don't expand unless it's significant
1573     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1574     ergo_verbose4(ErgoHeapSizing,
1575                   "attempt heap expansion",
1576                   ergo_format_reason("capacity lower than "
1577                                      "min desired capacity after Full GC")
1578                   ergo_format_byte("capacity")
1579                   ergo_format_byte("occupancy")
1580                   ergo_format_byte_perc("min desired capacity"),
1581                   capacity_after_gc, used_after_gc,
1582                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1583     expand(expand_bytes);
1584 
1585     // No expansion, now see if we want to shrink
1586   } else if (capacity_after_gc > maximum_desired_capacity) {
1587     // Capacity too large, compute shrinking size
1588     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1589     ergo_verbose4(ErgoHeapSizing,
1590                   "attempt heap shrinking",
1591                   ergo_format_reason("capacity higher than "
1592                                      "max desired capacity after Full GC")
1593                   ergo_format_byte("capacity")
1594                   ergo_format_byte("occupancy")
1595                   ergo_format_byte_perc("max desired capacity"),
1596                   capacity_after_gc, used_after_gc,
1597                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1598     shrink(shrink_bytes);
1599   }
1600 }
1601 
1602 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1603                                                             AllocationContext_t context,
1604                                                             bool do_gc,
1605                                                             bool clear_all_soft_refs,
1606                                                             bool expect_null_mutator_alloc_region,
1607                                                             bool* gc_succeeded) {
1608   *gc_succeeded = true;
1609   // Let's attempt the allocation first.
1610   HeapWord* result =
1611     attempt_allocation_at_safepoint(word_size,
1612                                     context,
1613                                     expect_null_mutator_alloc_region);
1614   if (result != NULL) {
1615     assert(*gc_succeeded, "sanity");
1616     return result;
1617   }


1684 
1685   // What else?  We might try synchronous finalization later.  If the total
1686   // space available is large enough for the allocation, then a more
1687   // complete compaction phase than we've tried so far might be
1688   // appropriate.
1689   assert(*succeeded, "sanity");
1690   return NULL;
1691 }
1692 
1693 // Attempting to expand the heap sufficiently
1694 // to support an allocation of the given "word_size".  If
1695 // successful, perform the allocation and return the address of the
1696 // allocated block, or else "NULL".
1697 
1698 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1699   assert_at_safepoint(true /* should_be_vm_thread */);
1700 
1701   verify_region_sets_optional();
1702 
1703   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1704   ergo_verbose1(ErgoHeapSizing,
1705                 "attempt heap expansion",
1706                 ergo_format_reason("allocation request failed")
1707                 ergo_format_byte("allocation request"),
1708                 word_size * HeapWordSize);


1709   if (expand(expand_bytes)) {
1710     _hrm.verify_optional();
1711     verify_region_sets_optional();
1712     return attempt_allocation_at_safepoint(word_size,
1713                                            context,
1714                                            false /* expect_null_mutator_alloc_region */);
1715   }
1716   return NULL;
1717 }
1718 
1719 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1720   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1721   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1722                                        HeapRegion::GrainBytes);
1723   ergo_verbose2(ErgoHeapSizing,
1724                 "expand the heap",
1725                 ergo_format_byte("requested expansion amount")
1726                 ergo_format_byte("attempted expansion amount"),
1727                 expand_bytes, aligned_expand_bytes);
1728 
1729   if (is_maximal_no_gc()) {
1730     ergo_verbose0(ErgoHeapSizing,
1731                       "did not expand the heap",
1732                       ergo_format_reason("heap already fully expanded"));
1733     return false;
1734   }
1735 
1736   double expand_heap_start_time_sec = os::elapsedTime();
1737   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1738   assert(regions_to_expand > 0, "Must expand by at least one region");
1739 
1740   uint expanded_by = _hrm.expand_by(regions_to_expand);
1741   if (expand_time_ms != NULL) {
1742     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1743   }
1744 
1745   if (expanded_by > 0) {
1746     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1747     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1748     g1_policy()->record_new_heap_size(num_regions());
1749   } else {
1750     ergo_verbose0(ErgoHeapSizing,
1751                   "did not expand the heap",
1752                   ergo_format_reason("heap expansion operation failed"));
1753     // The expansion of the virtual storage space was unsuccessful.
1754     // Let's see if it was because we ran out of swap.
1755     if (G1ExitOnExpansionFailure &&
1756         _hrm.available() >= regions_to_expand) {
1757       // We had head room...
1758       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1759     }
1760   }
1761   return regions_to_expand > 0;
1762 }
1763 
1764 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1765   size_t aligned_shrink_bytes =
1766     ReservedSpace::page_align_size_down(shrink_bytes);
1767   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1768                                          HeapRegion::GrainBytes);
1769   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1770 
1771   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1772   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1773 
1774   ergo_verbose3(ErgoHeapSizing,
1775                 "shrink the heap",
1776                 ergo_format_byte("requested shrinking amount")
1777                 ergo_format_byte("aligned shrinking amount")
1778                 ergo_format_byte("attempted shrinking amount"),
1779                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1780   if (num_regions_removed > 0) {
1781     g1_policy()->record_new_heap_size(num_regions());
1782   } else {
1783     ergo_verbose0(ErgoHeapSizing,
1784                   "did not shrink the heap",
1785                   ergo_format_reason("heap shrinking operation failed"));
1786   }
1787 }
1788 
1789 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1790   verify_region_sets_optional();
1791 
1792   // We should only reach here at the end of a Full GC which means we
1793   // should not not be holding to any GC alloc regions. The method
1794   // below will make sure of that and do any remaining clean up.
1795   _allocator->abandon_gc_alloc_regions();
1796 
1797   // Instead of tearing down / rebuilding the free lists here, we
1798   // could instead use the remove_all_pending() method on free_list to
1799   // remove only the ones that we need to remove.
1800   tear_down_region_sets(true /* free_list_only */);
1801   shrink_helper(shrink_bytes);
1802   rebuild_region_sets(true /* free_list_only */);
1803 
1804   _hrm.verify_optional();
1805   verify_region_sets_optional();


1878   // Initialize the G1EvacuationFailureALot counters and flags.
1879   NOT_PRODUCT(reset_evacuation_should_fail();)
1880 
1881   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1882 }
1883 
1884 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1885                                                                  size_t size,
1886                                                                  size_t translation_factor) {
1887   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1888   // Allocate a new reserved space, preferring to use large pages.
1889   ReservedSpace rs(size, preferred_page_size);
1890   G1RegionToSpaceMapper* result  =
1891     G1RegionToSpaceMapper::create_mapper(rs,
1892                                          size,
1893                                          rs.alignment(),
1894                                          HeapRegion::GrainBytes,
1895                                          translation_factor,
1896                                          mtGC);
1897   if (TracePageSizes) {
1898     gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1899                            description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1900   }
1901   return result;
1902 }
1903 
1904 jint G1CollectedHeap::initialize() {
1905   CollectedHeap::pre_initialize();
1906   os::enable_vtime();
1907 
1908   G1Log::init();
1909 
1910   // Necessary to satisfy locking discipline assertions.
1911 
1912   MutexLocker x(Heap_lock);
1913 
1914   // We have to initialize the printer before committing the heap, as
1915   // it will be used then.
1916   _hr_printer.set_active(G1PrintHeapRegions);
1917 
1918   // While there are no constraints in the GC code that HeapWordSize
1919   // be any particular value, there are multiple other areas in the
1920   // system which believe this to be true (e.g. oop->object_size in some
1921   // cases incorrectly returns the size in wordSize units rather than
1922   // HeapWordSize).
1923   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1924 
1925   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1926   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1927   size_t heap_alignment = collector_policy()->heap_alignment();
1928 
1929   // Ensure that the sizes are properly aligned.
1930   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1931   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1932   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1933 
1934   _refine_cte_cl = new RefineCardTableEntryClosure();
1935 
1936   jint ecode = JNI_OK;
1937   _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);


2100   G1AllocRegion::setup(this, dummy_region);
2101 
2102   _allocator->init_mutator_alloc_region();
2103 
2104   // Do create of the monitoring and management support so that
2105   // values in the heap have been properly initialized.
2106   _g1mm = new G1MonitoringSupport(this);
2107 
2108   G1StringDedup::initialize();
2109 
2110   _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2111   for (uint i = 0; i < ParallelGCThreads; i++) {
2112     new (&_preserved_objs[i]) OopAndMarkOopStack();
2113   }
2114 
2115   return JNI_OK;
2116 }
2117 
2118 void G1CollectedHeap::stop() {
2119   // Stop all concurrent threads. We do this to make sure these threads
2120   // do not continue to execute and access resources (e.g. gclog_or_tty)
2121   // that are destroyed during shutdown.
2122   _cg1r->stop();
2123   _cmThread->stop();
2124   if (G1StringDedup::is_enabled()) {
2125     G1StringDedup::stop();
2126   }
2127 }
2128 
2129 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2130   return HeapRegion::max_region_size();
2131 }
2132 
2133 void G1CollectedHeap::post_initialize() {
2134   CollectedHeap::post_initialize();
2135   ref_processing_init();
2136 }
2137 
2138 void G1CollectedHeap::ref_processing_init() {
2139   // Reference processing in G1 currently works as follows:
2140   //


2227       assert(chr->is_continues_humongous(), "sanity");
2228       chr->reset_gc_time_stamp();
2229     }
2230   }
2231 }
2232 
2233 #ifndef PRODUCT
2234 
2235 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2236 private:
2237   unsigned _gc_time_stamp;
2238   bool _failures;
2239 
2240 public:
2241   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2242     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2243 
2244   virtual bool doHeapRegion(HeapRegion* hr) {
2245     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2246     if (_gc_time_stamp != region_gc_time_stamp) {
2247       gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2248                              "expected %d", HR_FORMAT_PARAMS(hr),
2249                              region_gc_time_stamp, _gc_time_stamp);
2250       _failures = true;
2251     }
2252     return false;
2253   }
2254 
2255   bool failures() { return _failures; }
2256 };
2257 
2258 void G1CollectedHeap::check_gc_time_stamps() {
2259   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2260   heap_region_iterate(&cl);
2261   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2262 }
2263 #endif // PRODUCT
2264 
2265 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2266   _cg1r->hot_card_cache()->drain(cl, worker_i);
2267 }
2268 


2814 private:
2815   G1CollectedHeap* _g1h;
2816   VerifyOption     _vo;
2817   bool             _failures;
2818 public:
2819   // _vo == UsePrevMarking -> use "prev" marking information,
2820   // _vo == UseNextMarking -> use "next" marking information,
2821   // _vo == UseMarkWord    -> use mark word from object header.
2822   VerifyRootsClosure(VerifyOption vo) :
2823     _g1h(G1CollectedHeap::heap()),
2824     _vo(vo),
2825     _failures(false) { }
2826 
2827   bool failures() { return _failures; }
2828 
2829   template <class T> void do_oop_nv(T* p) {
2830     T heap_oop = oopDesc::load_heap_oop(p);
2831     if (!oopDesc::is_null(heap_oop)) {
2832       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2833       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2834         gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
2835                                "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2836         if (_vo == VerifyOption_G1UseMarkWord) {
2837           gclog_or_tty->print_cr("  Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2838         }
2839         obj->print_on(gclog_or_tty);

2840         _failures = true;
2841       }
2842     }
2843   }
2844 
2845   void do_oop(oop* p)       { do_oop_nv(p); }
2846   void do_oop(narrowOop* p) { do_oop_nv(p); }
2847 };
2848 
2849 class G1VerifyCodeRootOopClosure: public OopClosure {
2850   G1CollectedHeap* _g1h;
2851   OopClosure* _root_cl;
2852   nmethod* _nm;
2853   VerifyOption _vo;
2854   bool _failures;
2855 
2856   template <class T> void do_oop_work(T* p) {
2857     // First verify that this root is live
2858     _root_cl->do_oop(p);
2859 


2864 
2865     // Don't check the code roots during marking verification in a full GC
2866     if (_vo == VerifyOption_G1UseMarkWord) {
2867       return;
2868     }
2869 
2870     // Now verify that the current nmethod (which contains p) is
2871     // in the code root list of the heap region containing the
2872     // object referenced by p.
2873 
2874     T heap_oop = oopDesc::load_heap_oop(p);
2875     if (!oopDesc::is_null(heap_oop)) {
2876       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2877 
2878       // Now fetch the region containing the object
2879       HeapRegion* hr = _g1h->heap_region_containing(obj);
2880       HeapRegionRemSet* hrrs = hr->rem_set();
2881       // Verify that the strong code root list for this region
2882       // contains the nmethod
2883       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2884         gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
2885                                "from nmethod " PTR_FORMAT " not in strong "
2886                                "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2887                                p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2888         _failures = true;
2889       }
2890     }
2891   }
2892 
2893 public:
2894   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2895     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2896 
2897   void do_oop(oop* p) { do_oop_work(p); }
2898   void do_oop(narrowOop* p) { do_oop_work(p); }
2899 
2900   void set_nmethod(nmethod* nm) { _nm = nm; }
2901   bool failures() { return _failures; }
2902 };
2903 
2904 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {


3045   }
3046 
3047   bool doHeapRegion(HeapRegion* r) {
3048     // For archive regions, verify there are no heap pointers to
3049     // non-pinned regions. For all others, verify liveness info.
3050     if (r->is_archive()) {
3051       VerifyArchiveRegionClosure verify_oop_pointers(r);
3052       r->object_iterate(&verify_oop_pointers);
3053       return true;
3054     }
3055     if (!r->is_continues_humongous()) {
3056       bool failures = false;
3057       r->verify(_vo, &failures);
3058       if (failures) {
3059         _failures = true;
3060       } else {
3061         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3062         r->object_iterate(&not_dead_yet_cl);
3063         if (_vo != VerifyOption_G1UseNextMarking) {
3064           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3065             gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3066                                    "max_live_bytes " SIZE_FORMAT " "
3067                                    "< calculated " SIZE_FORMAT,
3068                                    p2i(r->bottom()), p2i(r->end()),
3069                                    r->max_live_bytes(),
3070                                  not_dead_yet_cl.live_bytes());
3071             _failures = true;
3072           }
3073         } else {
3074           // When vo == UseNextMarking we cannot currently do a sanity
3075           // check on the live bytes as the calculation has not been
3076           // finalized yet.
3077         }
3078       }
3079     }
3080     return false; // stop the region iteration if we hit a failure
3081   }
3082 };
3083 
3084 // This is the task used for parallel verification of the heap regions
3085 
3086 class G1ParVerifyTask: public AbstractGangTask {
3087 private:
3088   G1CollectedHeap*  _g1h;
3089   VerifyOption      _vo;
3090   bool              _failures;


3098       AbstractGangTask("Parallel verify task"),
3099       _g1h(g1h),
3100       _vo(vo),
3101       _failures(false),
3102       _hrclaimer(g1h->workers()->active_workers()) {}
3103 
3104   bool failures() {
3105     return _failures;
3106   }
3107 
3108   void work(uint worker_id) {
3109     HandleMark hm;
3110     VerifyRegionClosure blk(true, _vo);
3111     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3112     if (blk.failures()) {
3113       _failures = true;
3114     }
3115   }
3116 };
3117 
3118 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3119   if (SafepointSynchronize::is_at_safepoint()) {



3120     assert(Thread::current()->is_VM_thread(),
3121            "Expected to be executed serially by the VM thread at this point");
3122 
3123     if (!silent) { gclog_or_tty->print("Roots "); }
3124     VerifyRootsClosure rootsCl(vo);
3125     VerifyKlassClosure klassCl(this, &rootsCl);
3126     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3127 
3128     // We apply the relevant closures to all the oops in the
3129     // system dictionary, class loader data graph, the string table
3130     // and the nmethods in the code cache.
3131     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3132     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3133 
3134     {
3135       G1RootProcessor root_processor(this, 1);
3136       root_processor.process_all_roots(&rootsCl,
3137                                        &cldCl,
3138                                        &blobsCl);
3139     }
3140 
3141     bool failures = rootsCl.failures() || codeRootsCl.failures();
3142 
3143     if (vo != VerifyOption_G1UseMarkWord) {
3144       // If we're verifying during a full GC then the region sets
3145       // will have been torn down at the start of the GC. Therefore
3146       // verifying the region sets will fail. So we only verify
3147       // the region sets when not in a full GC.
3148       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3149       verify_region_sets();
3150     }
3151 
3152     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3153     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3154 
3155       G1ParVerifyTask task(this, vo);
3156       workers()->run_task(&task);
3157       if (task.failures()) {
3158         failures = true;
3159       }
3160 
3161     } else {
3162       VerifyRegionClosure blk(false, vo);
3163       heap_region_iterate(&blk);
3164       if (blk.failures()) {
3165         failures = true;
3166       }
3167     }
3168 
3169     if (G1StringDedup::is_enabled()) {
3170       if (!silent) gclog_or_tty->print("StrDedup ");
3171       G1StringDedup::verify();
3172     }
3173 
3174     if (failures) {
3175       gclog_or_tty->print_cr("Heap:");
3176       // It helps to have the per-region information in the output to
3177       // help us track down what went wrong. This is why we call
3178       // print_extended_on() instead of print_on().
3179       print_extended_on(gclog_or_tty);
3180       gclog_or_tty->cr();
3181       gclog_or_tty->flush();
3182     }
3183     guarantee(!failures, "there should not have been any failures");
3184   } else {
3185     if (!silent) {
3186       gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3187       if (G1StringDedup::is_enabled()) {
3188         gclog_or_tty->print(", StrDedup");
3189       }
3190       gclog_or_tty->print(") ");
3191     }
3192   }
3193 }
3194 
3195 void G1CollectedHeap::verify(bool silent) {
3196   verify(silent, VerifyOption_G1UsePrevMarking);
3197 }
3198 
3199 double G1CollectedHeap::verify(bool guard, const char* msg) {
3200   double verify_time_ms = 0.0;
3201 
3202   if (guard && total_collections() >= VerifyGCStartAt) {
3203     double verify_start = os::elapsedTime();
3204     HandleMark hm;  // Discard invalid handles created during verification
3205     prepare_for_verify();
3206     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3207     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3208   }
3209 
3210   return verify_time_ms;
3211 }
3212 
3213 void G1CollectedHeap::verify_before_gc() {
3214   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3215   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3216 }
3217 
3218 void G1CollectedHeap::verify_after_gc() {
3219   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3220   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3221 }
3222 
3223 class PrintRegionClosure: public HeapRegionClosure {
3224   outputStream* _st;
3225 public:
3226   PrintRegionClosure(outputStream* st) : _st(st) {}
3227   bool doHeapRegion(HeapRegion* r) {
3228     r->print_on(_st);
3229     return false;
3230   }
3231 };
3232 
3233 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3234                                        const HeapRegion* hr,
3235                                        const VerifyOption vo) const {
3236   switch (vo) {
3237   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3238   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3239   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();


3309     G1StringDedup::print_worker_threads_on(st);
3310   }
3311 }
3312 
3313 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3314   workers()->threads_do(tc);
3315   tc->do_thread(_cmThread);
3316   _cg1r->threads_do(tc);
3317   if (G1StringDedup::is_enabled()) {
3318     G1StringDedup::threads_do(tc);
3319   }
3320 }
3321 
3322 void G1CollectedHeap::print_tracing_info() const {
3323   // We'll overload this to mean "trace GC pause statistics."
3324   if (TraceYoungGenTime || TraceOldGenTime) {
3325     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3326     // to that.
3327     g1_policy()->print_tracing_info();
3328   }
3329   if (G1SummarizeRSetStats) {
3330     g1_rem_set()->print_summary_info();
3331   }
3332   if (G1SummarizeConcMark) {
3333     concurrent_mark()->print_summary_info();
3334   }
3335   g1_policy()->print_yg_surv_rate_info();
3336 }
3337 
3338 #ifndef PRODUCT
3339 // Helpful for debugging RSet issues.
3340 
3341 class PrintRSetsClosure : public HeapRegionClosure {
3342 private:
3343   const char* _msg;
3344   size_t _occupied_sum;
3345 
3346 public:
3347   bool doHeapRegion(HeapRegion* r) {
3348     HeapRegionRemSet* hrrs = r->rem_set();
3349     size_t occupied = hrrs->occupied();
3350     _occupied_sum += occupied;
3351 
3352     gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3353                            HR_FORMAT_PARAMS(r));
3354     if (occupied == 0) {
3355       gclog_or_tty->print_cr("  RSet is empty");
3356     } else {
3357       hrrs->print();
3358     }
3359     gclog_or_tty->print_cr("----------");
3360     return false;
3361   }
3362 
3363   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3364     gclog_or_tty->cr();
3365     gclog_or_tty->print_cr("========================================");
3366     gclog_or_tty->print_cr("%s", msg);
3367     gclog_or_tty->cr();
3368   }
3369 
3370   ~PrintRSetsClosure() {
3371     gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3372     gclog_or_tty->print_cr("========================================");
3373     gclog_or_tty->cr();
3374   }
3375 };
3376 
3377 void G1CollectedHeap::print_cset_rsets() {
3378   PrintRSetsClosure cl("Printing CSet RSets");
3379   collection_set_iterate(&cl);
3380 }
3381 
3382 void G1CollectedHeap::print_all_rsets() {
3383   PrintRSetsClosure cl("Printing All RSets");;
3384   heap_region_iterate(&cl);
3385 }
3386 #endif // PRODUCT
3387 
3388 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3389   YoungList* young_list = heap()->young_list();
3390 
3391   size_t eden_used_bytes = young_list->eden_used_bytes();
3392   size_t survivor_used_bytes = young_list->survivor_used_bytes();
3393 


3411 
3412   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3413   gc_tracer->report_metaspace_summary(when, metaspace_summary);
3414 }
3415 
3416 
3417 G1CollectedHeap* G1CollectedHeap::heap() {
3418   CollectedHeap* heap = Universe::heap();
3419   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3420   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3421   return (G1CollectedHeap*)heap;
3422 }
3423 
3424 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3425   // always_do_update_barrier = false;
3426   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3427   // Fill TLAB's and such
3428   accumulate_statistics_all_tlabs();
3429   ensure_parsability(true);
3430 
3431   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3432       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3433     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3434   }
3435 }
3436 
3437 void G1CollectedHeap::gc_epilogue(bool full) {
3438 
3439   if (G1SummarizeRSetStats &&
3440       (G1SummarizeRSetStatsPeriod > 0) &&
3441       // we are at the end of the GC. Total collections has already been increased.
3442       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3443     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3444   }
3445 
3446   // FIXME: what is this about?
3447   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3448   // is set.
3449 #if defined(COMPILER2) || INCLUDE_JVMCI
3450   assert(DerivedPointerTable::is_empty(), "derived pointer present");
3451 #endif
3452   // always_do_update_barrier = true;
3453 
3454   resize_all_tlabs();
3455   allocation_context_stats().update(full);
3456 
3457   // We have just completed a GC. Update the soft reference
3458   // policy with the new heap occupancy
3459   Universe::update_heap_info_at_gc();
3460 }
3461 
3462 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3463                                                uint gc_count_before,
3464                                                bool* succeeded,


3667     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3668 
3669     // Here's a good place to add any other checks we'd like to
3670     // perform on CSet regions.
3671     return false;
3672   }
3673 };
3674 #endif // ASSERT
3675 
3676 uint G1CollectedHeap::num_task_queues() const {
3677   return _task_queues->size();
3678 }
3679 
3680 #if TASKQUEUE_STATS
3681 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3682   st->print_raw_cr("GC Task Stats");
3683   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3684   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3685 }
3686 
3687 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {







3688   print_taskqueue_stats_hdr(st);
3689 
3690   TaskQueueStats totals;
3691   const uint n = num_task_queues();
3692   for (uint i = 0; i < n; ++i) {
3693     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3694     totals += task_queue(i)->stats;
3695   }
3696   st->print_raw("tot "); totals.print(st); st->cr();
3697 
3698   DEBUG_ONLY(totals.verify());
3699 }
3700 
3701 void G1CollectedHeap::reset_taskqueue_stats() {
3702   const uint n = num_task_queues();
3703   for (uint i = 0; i < n; ++i) {
3704     task_queue(i)->stats.reset();
3705   }
3706 }
3707 #endif // TASKQUEUE_STATS
3708 
3709 void G1CollectedHeap::log_gc_header() {
3710   if (!G1Log::fine()) {
3711     return;
3712   }
3713 
3714   gclog_or_tty->gclog_stamp();
3715 
3716   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3717     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3718     .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3719 
3720   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3721 }
3722 
3723 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3724   if (!G1Log::fine()) {
3725     return;
3726   }
3727 
3728   if (G1Log::finer()) {
3729     if (evacuation_failed()) {
3730       gclog_or_tty->print(" (to-space exhausted)");
3731     }
3732     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);

3733     g1_policy()->print_phases(pause_time_sec);

3734     g1_policy()->print_detailed_heap_transition();
3735   } else {
3736     if (evacuation_failed()) {
3737       gclog_or_tty->print("--");
3738     }
3739     g1_policy()->print_heap_transition();
3740     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3741   }
3742   gclog_or_tty->flush();
3743 }
3744 

3745 void G1CollectedHeap::wait_for_root_region_scanning() {
3746   double scan_wait_start = os::elapsedTime();
3747   // We have to wait until the CM threads finish scanning the
3748   // root regions as it's the only way to ensure that all the
3749   // objects on them have been correctly scanned before we start
3750   // moving them during the GC.
3751   bool waited = _cm->root_regions()->wait_until_scan_finished();
3752   double wait_time_ms = 0.0;
3753   if (waited) {
3754     double scan_wait_end = os::elapsedTime();
3755     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3756   }
3757   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3758 }
3759 
3760 bool
3761 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3762   assert_at_safepoint(true /* should_be_vm_thread */);
3763   guarantee(!is_gc_active(), "collection is not reentrant");
3764 
3765   if (GC_locker::check_active_before_gc()) {
3766     return false;
3767   }
3768 
3769   _gc_timer_stw->register_gc_start();
3770 
3771   GCIdMark gc_id_mark;
3772   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3773 
3774   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3775   ResourceMark rm;
3776 
3777   wait_for_root_region_scanning();
3778 
3779   G1Log::update_level();
3780   print_heap_before_gc();
3781   trace_heap_before_gc(_gc_tracer_stw);
3782 
3783   verify_region_sets_optional();
3784   verify_dirty_young_regions();
3785 
3786   // This call will decide whether this pause is an initial-mark
3787   // pause. If it is, during_initial_mark_pause() will return true
3788   // for the duration of this pause.
3789   g1_policy()->decide_on_conc_mark_initiation();
3790 
3791   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3792   assert(!collector_state()->during_initial_mark_pause() ||
3793           collector_state()->gcs_are_young(), "sanity");
3794 
3795   // We also do not allow mixed GCs during marking.
3796   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3797 
3798   // Record whether this pause is an initial mark. When the current
3799   // thread has completed its logging output and it's safe to signal
3800   // the CM thread, the flag's value in the policy has been reset.
3801   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3802 
3803   // Inner scope for scope based logging, timers, and stats collection
3804   {
3805     EvacuationInfo evacuation_info;
3806 
3807     if (collector_state()->during_initial_mark_pause()) {
3808       // We are about to start a marking cycle, so we increment the
3809       // full collection counter.
3810       increment_old_marking_cycles_started();
3811       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3812     }
3813 
3814     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3815 
3816     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3817 
3818     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3819                                                                   workers()->active_workers(),
3820                                                                   Threads::number_of_non_daemon_threads());
3821     workers()->set_active_workers(active_workers);






3822 
3823     double pause_start_sec = os::elapsedTime();

3824     g1_policy()->note_gc_start(active_workers);
3825     log_gc_header();
3826 
3827     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3828     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3829 
3830     // If the secondary_free_list is not empty, append it to the
3831     // free_list. No need to wait for the cleanup operation to finish;
3832     // the region allocation code will check the secondary_free_list
3833     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3834     // set, skip this step so that the region allocation code has to
3835     // get entries from the secondary_free_list.
3836     if (!G1StressConcRegionFreeing) {
3837       append_secondary_free_list_if_not_empty_with_lock();
3838     }
3839 
3840     assert(check_young_list_well_formed(), "young list should be well formed");
3841 
3842     // Don't dynamically change the number of GC threads this early.  A value of
3843     // 0 is used to indicate serial work.  When parallel work is done,
3844     // it will be set.
3845 


3859 #endif
3860 
3861       // Please see comment in g1CollectedHeap.hpp and
3862       // G1CollectedHeap::ref_processing_init() to see how
3863       // reference processing currently works in G1.
3864 
3865       // Enable discovery in the STW reference processor
3866       ref_processor_stw()->enable_discovery();
3867 
3868       {
3869         // We want to temporarily turn off discovery by the
3870         // CM ref processor, if necessary, and turn it back on
3871         // on again later if we do. Using a scoped
3872         // NoRefDiscovery object will do this.
3873         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3874 
3875         // Forget the current alloc region (we might even choose it to be part
3876         // of the collection set!).
3877         _allocator->release_mutator_alloc_region();
3878 
3879         // We should call this after we retire the mutator alloc
3880         // region(s) so that all the ALLOC / RETIRE events are generated
3881         // before the start GC event.
3882         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3883 
3884         // This timing is only used by the ergonomics to handle our pause target.
3885         // It is unclear why this should not include the full pause. We will
3886         // investigate this in CR 7178365.
3887         //
3888         // Preserving the old comment here if that helps the investigation:
3889         //
3890         // The elapsed time induced by the start time below deliberately elides
3891         // the possible verification above.
3892         double sample_start_time_sec = os::elapsedTime();
3893 
3894         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3895 
3896         if (collector_state()->during_initial_mark_pause()) {
3897           concurrent_mark()->checkpointRootsInitialPre();
3898         }
3899 
3900         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3901         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3902 
3903         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());


3980 
3981         if (collector_state()->during_initial_mark_pause()) {
3982           // We have to do this before we notify the CM threads that
3983           // they can start working to make sure that all the
3984           // appropriate initialization is done on the CM object.
3985           concurrent_mark()->checkpointRootsInitialPost();
3986           collector_state()->set_mark_in_progress(true);
3987           // Note that we don't actually trigger the CM thread at
3988           // this point. We do that later when we're sure that
3989           // the current thread has completed its logging output.
3990         }
3991 
3992         allocate_dummy_regions();
3993 
3994         _allocator->init_mutator_alloc_region();
3995 
3996         {
3997           size_t expand_bytes = g1_policy()->expansion_amount();
3998           if (expand_bytes > 0) {
3999             size_t bytes_before = capacity();
4000             // No need for an ergo verbose message here,
4001             // expansion_amount() does this when it returns a value > 0.
4002             double expand_ms;
4003             if (!expand(expand_bytes, &expand_ms)) {
4004               // We failed to expand the heap. Cannot do anything about it.
4005             }
4006             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
4007           }
4008         }
4009 
4010         // We redo the verification but now wrt to the new CSet which
4011         // has just got initialized after the previous CSet was freed.
4012         _cm->verify_no_cset_oops();
4013         _cm->note_end_of_gc();
4014 
4015         // This timing is only used by the ergonomics to handle our pause target.
4016         // It is unclear why this should not include the full pause. We will
4017         // investigate this in CR 7178365.
4018         double sample_end_time_sec = os::elapsedTime();
4019         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4020         size_t total_cards_scanned = per_thread_states.total_cards_scanned();


4040         // stamp here we invalidate all the GC time stamps on all the
4041         // regions and saved_mark_word() will simply return top() for
4042         // all the regions. This is a nicer way of ensuring this rather
4043         // than iterating over the regions and fixing them. In fact, the
4044         // GC time stamp increment here also ensures that
4045         // saved_mark_word() will return top() between pauses, i.e.,
4046         // during concurrent refinement. So we don't need the
4047         // is_gc_active() check to decided which top to use when
4048         // scanning cards (see CR 7039627).
4049         increment_gc_time_stamp();
4050 
4051         verify_after_gc();
4052         check_bitmaps("GC End");
4053 
4054         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4055         ref_processor_stw()->verify_no_references_recorded();
4056 
4057         // CM reference discovery will be re-enabled if necessary.
4058       }
4059 
4060       // We should do this after we potentially expand the heap so
4061       // that all the COMMIT events are generated before the end GC
4062       // event, and after we retire the GC alloc regions so that all
4063       // RETIRE events are generated before the end GC event.
4064       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4065 
4066 #ifdef TRACESPINNING
4067       ParallelTaskTerminator::print_termination_counts();
4068 #endif
4069 
4070       gc_epilogue(false);
4071     }
4072 
4073     // Print the remainder of the GC log output.
4074     log_gc_footer(os::elapsedTime() - pause_start_sec);
4075 
4076     // It is not yet to safe to tell the concurrent mark to
4077     // start as we have some optional output below. We don't want the
4078     // output from the concurrent mark thread interfering with this
4079     // logging output either.
4080 
4081     _hrm.verify_optional();
4082     verify_region_sets_optional();
4083 
4084     TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4085     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4086 
4087     print_heap_after_gc();
4088     trace_heap_after_gc(_gc_tracer_stw);
4089 
4090     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4091     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4092     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4093     // before any GC notifications are raised.
4094     g1mm()->update_sizes();
4095 
4096     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4097     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4098     _gc_timer_stw->register_gc_end();
4099     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4100   }
4101   // It should now be safe to tell the concurrent mark thread to start
4102   // without its logging output interfering with the logging output
4103   // that came from the pause.
4104 


4255 
4256       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4257 
4258       double term_sec = 0.0;
4259       size_t evac_term_attempts = 0;
4260       {
4261         double start = os::elapsedTime();
4262         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4263         evac.do_void();
4264 
4265         evac_term_attempts = evac.term_attempts();
4266         term_sec = evac.term_time();
4267         double elapsed_sec = os::elapsedTime() - start;
4268         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4269         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4270         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4271       }
4272 
4273       assert(pss->queue_is_empty(), "should be empty");
4274 
4275       if (PrintTerminationStats) {
4276         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4277         size_t lab_waste;
4278         size_t lab_undo_waste;
4279         pss->waste(lab_waste, lab_undo_waste);
4280         _g1h->print_termination_stats(gclog_or_tty,
4281                                       worker_id,
4282                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
4283                                       strong_roots_sec * 1000.0,                  /* strong roots time */
4284                                       term_sec * 1000.0,                          /* evac term time */
4285                                       evac_term_attempts,                         /* evac term attempts */
4286                                       lab_waste,                                  /* alloc buffer waste */
4287                                       lab_undo_waste                              /* undo waste */
4288                                       );
4289       }
4290 
4291       // Close the inner scope so that the ResourceMark and HandleMark
4292       // destructors are executed here and are included as part of the
4293       // "GC Worker Time".
4294     }
4295     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4296   }
4297 };
4298 
4299 void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) {
4300   st->print_raw_cr("GC Termination Stats");
4301   st->print_raw_cr("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
4302   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts  total   alloc    undo");
4303   st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");




4304 }
4305 
4306 void G1CollectedHeap::print_termination_stats(outputStream* const st,
4307                                               uint worker_id,
4308                                               double elapsed_ms,
4309                                               double strong_roots_ms,
4310                                               double term_ms,
4311                                               size_t term_attempts,
4312                                               size_t alloc_buffer_waste,
4313                                               size_t undo_waste) const {
4314   st->print_cr("%3d %9.2f %9.2f %6.2f "

4315                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4316                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4317                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4318                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4319                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4320                alloc_buffer_waste * HeapWordSize / K,
4321                undo_waste * HeapWordSize / K);
4322 }
4323 
4324 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4325 private:
4326   BoolObjectClosure* _is_alive;
4327   int _initial_string_table_size;
4328   int _initial_symbol_table_size;
4329 
4330   bool  _process_strings;
4331   int _strings_processed;
4332   int _strings_removed;
4333 
4334   bool  _process_symbols;


4343     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4344 
4345     _initial_string_table_size = StringTable::the_table()->table_size();
4346     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4347     if (process_strings) {
4348       StringTable::clear_parallel_claimed_index();
4349     }
4350     if (process_symbols) {
4351       SymbolTable::clear_parallel_claimed_index();
4352     }
4353   }
4354 
4355   ~G1StringSymbolTableUnlinkTask() {
4356     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4357               "claim value %d after unlink less than initial string table size %d",
4358               StringTable::parallel_claimed_index(), _initial_string_table_size);
4359     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4360               "claim value %d after unlink less than initial symbol table size %d",
4361               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4362 
4363     if (G1TraceStringSymbolTableScrubbing) {
4364       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4365                              "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4366                              "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4367                              strings_processed(), strings_removed(),
4368                              symbols_processed(), symbols_removed());
4369     }
4370   }
4371 
4372   void work(uint worker_id) {
4373     int strings_processed = 0;
4374     int strings_removed = 0;
4375     int symbols_processed = 0;
4376     int symbols_removed = 0;
4377     if (_process_strings) {
4378       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4379       Atomic::add(strings_processed, &_strings_processed);
4380       Atomic::add(strings_removed, &_strings_removed);
4381     }
4382     if (_process_symbols) {
4383       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4384       Atomic::add(symbols_processed, &_symbols_processed);
4385       Atomic::add(symbols_removed, &_symbols_removed);
4386     }
4387   }
4388 
4389   size_t strings_processed() const { return (size_t)_strings_processed; }
4390   size_t strings_removed()   const { return (size_t)_strings_removed; }


5177 
5178 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5179   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5180 
5181   // Should G1EvacuationFailureALot be in effect for this GC?
5182   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5183 
5184   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5185   double start_par_time_sec = os::elapsedTime();
5186   double end_par_time_sec;
5187 
5188   {
5189     const uint n_workers = workers()->active_workers();
5190     G1RootProcessor root_processor(this, n_workers);
5191     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5192     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5193     if (collector_state()->during_initial_mark_pause()) {
5194       ClassLoaderDataGraph::clear_claimed_marks();
5195     }
5196 
5197     // The individual threads will set their evac-failure closures.
5198     if (PrintTerminationStats) {
5199       print_termination_stats_hdr(gclog_or_tty);
5200     }
5201 
5202     workers()->run_task(&g1_par_task);
5203     end_par_time_sec = os::elapsedTime();
5204 
5205     // Closing the inner scope will execute the destructor
5206     // for the G1RootProcessor object. We record the current
5207     // elapsed time before closing the scope so that time
5208     // taken for the destructor is NOT included in the
5209     // reported parallel time.
5210   }
5211 
5212   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5213 
5214   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5215   phase_times->record_par_time(par_time_ms);
5216 
5217   double code_root_fixup_time_ms =
5218         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5219   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5220 


5419   }
5420 }
5421 
5422 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5423   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5424   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5425     verify_dirty_region(hr);
5426   }
5427 }
5428 
5429 void G1CollectedHeap::verify_dirty_young_regions() {
5430   verify_dirty_young_list(_young_list->first_region());
5431 }
5432 
5433 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5434                                                HeapWord* tams, HeapWord* end) {
5435   guarantee(tams <= end,
5436             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5437   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5438   if (result < end) {
5439     gclog_or_tty->cr();
5440     gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5441                            bitmap_name, p2i(result));
5442     gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5443                            bitmap_name, p2i(tams), p2i(end));
5444     return false;
5445   }
5446   return true;
5447 }
5448 
5449 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5450   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5451   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5452 
5453   HeapWord* bottom = hr->bottom();
5454   HeapWord* ptams  = hr->prev_top_at_mark_start();
5455   HeapWord* ntams  = hr->next_top_at_mark_start();
5456   HeapWord* end    = hr->end();
5457 
5458   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5459 
5460   bool res_n = true;
5461   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5462   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5463   // if we happen to be in that state.
5464   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5465     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5466   }
5467   if (!res_p || !res_n) {
5468     gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
5469                            HR_FORMAT_PARAMS(hr));
5470     gclog_or_tty->print_cr("#### Caller: %s", caller);
5471     return false;
5472   }
5473   return true;
5474 }
5475 
5476 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5477   if (!G1VerifyBitmaps) return;
5478 
5479   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5480 }
5481 
5482 class G1VerifyBitmapClosure : public HeapRegionClosure {
5483 private:
5484   const char* _caller;
5485   G1CollectedHeap* _g1h;
5486   bool _failures;
5487 
5488 public:
5489   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5490     _caller(caller), _g1h(g1h), _failures(false) { }


5504 
5505 void G1CollectedHeap::check_bitmaps(const char* caller) {
5506   if (!G1VerifyBitmaps) return;
5507 
5508   G1VerifyBitmapClosure cl(caller, this);
5509   heap_region_iterate(&cl);
5510   guarantee(!cl.failures(), "bitmap verification");
5511 }
5512 
5513 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5514  private:
5515   bool _failures;
5516  public:
5517   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5518 
5519   virtual bool doHeapRegion(HeapRegion* hr) {
5520     uint i = hr->hrm_index();
5521     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5522     if (hr->is_humongous()) {
5523       if (hr->in_collection_set()) {
5524         gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
5525         _failures = true;
5526         return true;
5527       }
5528       if (cset_state.is_in_cset()) {
5529         gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5530         _failures = true;
5531         return true;
5532       }
5533       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5534         gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5535         _failures = true;
5536         return true;
5537       }
5538     } else {
5539       if (cset_state.is_humongous()) {
5540         gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5541         _failures = true;
5542         return true;
5543       }
5544       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5545         gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
5546                                hr->in_collection_set(), cset_state.value(), i);
5547         _failures = true;
5548         return true;
5549       }
5550       if (cset_state.is_in_cset()) {
5551         if (hr->is_young() != (cset_state.is_young())) {
5552           gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
5553                                  hr->is_young(), cset_state.value(), i);
5554           _failures = true;
5555           return true;
5556         }
5557         if (hr->is_old() != (cset_state.is_old())) {
5558           gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
5559                                  hr->is_old(), cset_state.value(), i);
5560           _failures = true;
5561           return true;
5562         }
5563       }
5564     }
5565     return false;
5566   }
5567 
5568   bool failures() const { return _failures; }
5569 };
5570 
5571 bool G1CollectedHeap::check_cset_fast_test() {
5572   G1CheckCSetFastTableClosure cl;
5573   _hrm.iterate(&cl);
5574   return !cl.failures();
5575 }
5576 #endif // PRODUCT
5577 
5578 void G1CollectedHeap::cleanUpCardTable() {


5749     // until the end of a concurrent mark.
5750     //
5751     // It is not required to check whether the object has been found dead by marking
5752     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5753     // all objects allocated during that time are considered live.
5754     // SATB marking is even more conservative than the remembered set.
5755     // So if at this point in the collection there is no remembered set entry,
5756     // nobody has a reference to it.
5757     // At the start of collection we flush all refinement logs, and remembered sets
5758     // are completely up-to-date wrt to references to the humongous object.
5759     //
5760     // Other implementation considerations:
5761     // - never consider object arrays at this time because they would pose
5762     // considerable effort for cleaning up the the remembered sets. This is
5763     // required because stale remembered sets might reference locations that
5764     // are currently allocated into.
5765     uint region_idx = r->hrm_index();
5766     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5767         !r->rem_set()->is_empty()) {
5768 
5769       if (G1TraceEagerReclaimHumongousObjects) {
5770         gclog_or_tty->print_cr("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5771                                region_idx,
5772                                (size_t)obj->size() * HeapWordSize,
5773                                p2i(r->bottom()),
5774                                r->region_num(),
5775                                r->rem_set()->occupied(),
5776                                r->rem_set()->strong_code_roots_list_length(),
5777                                next_bitmap->isMarked(r->bottom()),
5778                                g1h->is_humongous_reclaim_candidate(region_idx),
5779                                obj->is_typeArray()
5780                               );
5781       }
5782 
5783       return false;
5784     }
5785 
5786     guarantee(obj->is_typeArray(),
5787               "Only eagerly reclaiming type arrays is supported, but the object "
5788               PTR_FORMAT " is not.", p2i(r->bottom()));
5789 
5790     if (G1TraceEagerReclaimHumongousObjects) {
5791       gclog_or_tty->print_cr("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5792                              region_idx,
5793                              (size_t)obj->size() * HeapWordSize,
5794                              p2i(r->bottom()),
5795                              r->region_num(),
5796                              r->rem_set()->occupied(),
5797                              r->rem_set()->strong_code_roots_list_length(),
5798                              next_bitmap->isMarked(r->bottom()),
5799                              g1h->is_humongous_reclaim_candidate(region_idx),
5800                              obj->is_typeArray()
5801                             );
5802     }
5803     // Need to clear mark bit of the humongous object if already set.
5804     if (next_bitmap->isMarked(r->bottom())) {
5805       next_bitmap->clear(r->bottom());
5806     }
5807     _freed_bytes += r->used();
5808     r->set_containing_set(NULL);
5809     _humongous_regions_removed.increment(1u, r->capacity());
5810     g1h->free_humongous_region(r, _free_region_list, false);
5811 
5812     return false;
5813   }
5814 
5815   HeapRegionSetCount& humongous_free_count() {
5816     return _humongous_regions_removed;
5817   }
5818 
5819   size_t bytes_freed() const {
5820     return _freed_bytes;
5821   }
5822 
5823   size_t humongous_reclaimed() const {
5824     return _humongous_regions_removed.length();
5825   }
5826 };
5827 
5828 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5829   assert_at_safepoint(true);
5830 
5831   if (!G1EagerReclaimHumongousObjects ||
5832       (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
5833     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5834     return;
5835   }
5836 
5837   double start_time = os::elapsedTime();
5838 
5839   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5840 
5841   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5842   heap_region_iterate(&cl);
5843 
5844   HeapRegionSetCount empty_set;
5845   remove_from_old_sets(empty_set, cl.humongous_free_count());
5846 
5847   G1HRPrinter* hrp = hr_printer();
5848   if (hrp->is_active()) {
5849     FreeRegionListIterator iter(&local_cleanup_list);
5850     while (iter.more_available()) {
5851       HeapRegion* hr = iter.get_next();
5852       hrp->cleanup(hr);


5865 // the current incremental collection set in preparation of a
5866 // full collection. After the full GC we will start to build up
5867 // the incremental collection set again.
5868 // This is only called when we're doing a full collection
5869 // and is immediately followed by the tearing down of the young list.
5870 
5871 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5872   HeapRegion* cur = cs_head;
5873 
5874   while (cur != NULL) {
5875     HeapRegion* next = cur->next_in_collection_set();
5876     assert(cur->in_collection_set(), "bad CS");
5877     cur->set_next_in_collection_set(NULL);
5878     clear_in_cset(cur);
5879     cur->set_young_index_in_cset(-1);
5880     cur = next;
5881   }
5882 }
5883 
5884 void G1CollectedHeap::set_free_regions_coming() {
5885   if (G1ConcRegionFreeingVerbose) {
5886     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5887                            "setting free regions coming");
5888   }
5889 
5890   assert(!free_regions_coming(), "pre-condition");
5891   _free_regions_coming = true;
5892 }
5893 
5894 void G1CollectedHeap::reset_free_regions_coming() {
5895   assert(free_regions_coming(), "pre-condition");
5896 
5897   {
5898     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5899     _free_regions_coming = false;
5900     SecondaryFreeList_lock->notify_all();
5901   }
5902 
5903   if (G1ConcRegionFreeingVerbose) {
5904     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5905                            "reset free regions coming");
5906   }
5907 }
5908 
5909 void G1CollectedHeap::wait_while_free_regions_coming() {
5910   // Most of the time we won't have to wait, so let's do a quick test
5911   // first before we take the lock.
5912   if (!free_regions_coming()) {
5913     return;
5914   }
5915 
5916   if (G1ConcRegionFreeingVerbose) {
5917     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5918                            "waiting for free regions");
5919   }
5920 
5921   {
5922     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5923     while (free_regions_coming()) {
5924       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5925     }
5926   }
5927 
5928   if (G1ConcRegionFreeingVerbose) {
5929     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5930                            "done waiting for free regions");
5931   }
5932 }
5933 
5934 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5935   return _allocator->is_retained_old_region(hr);
5936 }
5937 
5938 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5939   _young_list->push_region(hr);
5940 }
5941 
5942 class NoYoungRegionsClosure: public HeapRegionClosure {
5943 private:
5944   bool _success;
5945 public:
5946   NoYoungRegionsClosure() : _success(true) { }
5947   bool doHeapRegion(HeapRegion* r) {
5948     if (r->is_young()) {
5949       gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5950                              p2i(r->bottom()), p2i(r->end()));
5951       _success = false;
5952     }
5953     return false;
5954   }
5955   bool success() { return _success; }
5956 };
5957 
5958 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5959   bool ret = _young_list->check_list_empty(check_sample);
5960 
5961   if (check_heap) {
5962     NoYoungRegionsClosure closure;
5963     heap_region_iterate(&closure);
5964     ret = ret && closure.success();
5965   }
5966 
5967   return ret;
5968 }
5969 


6184 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6185                                              size_t allocated_bytes,
6186                                              InCSetState dest) {
6187   bool during_im = collector_state()->during_initial_mark_pause();
6188   alloc_region->note_end_of_copying(during_im);
6189   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6190   if (dest.is_young()) {
6191     young_list()->add_survivor_region(alloc_region);
6192   } else {
6193     _old_set.add(alloc_region);
6194   }
6195   _hr_printer.retire(alloc_region);
6196 }
6197 
6198 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6199   bool expanded = false;
6200   uint index = _hrm.find_highest_free(&expanded);
6201 
6202   if (index != G1_NO_HRM_INDEX) {
6203     if (expanded) {
6204       ergo_verbose1(ErgoHeapSizing,
6205                     "attempt heap expansion",
6206                     ergo_format_reason("requested address range outside heap bounds")
6207                     ergo_format_byte("region size"),
6208                     HeapRegion::GrainWords * HeapWordSize);
6209     }
6210     _hrm.allocate_free_regions_starting_at(index, 1);
6211     return region_at(index);
6212   }
6213   return NULL;
6214 }
6215 
6216 // Heap region set verification
6217 
6218 class VerifyRegionListsClosure : public HeapRegionClosure {
6219 private:
6220   HeapRegionSet*   _old_set;
6221   HeapRegionSet*   _humongous_set;
6222   HeapRegionManager*   _hrm;
6223 
6224 public:
6225   HeapRegionSetCount _old_count;
6226   HeapRegionSetCount _humongous_count;
6227   HeapRegionSetCount _free_count;




  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc/g1/bufferingOopClosure.hpp"
  31 #include "gc/g1/concurrentG1Refine.hpp"
  32 #include "gc/g1/concurrentG1RefineThread.hpp"
  33 #include "gc/g1/concurrentMarkThread.inline.hpp"
  34 #include "gc/g1/g1Allocator.inline.hpp"
  35 #include "gc/g1/g1CollectedHeap.inline.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"

  38 #include "gc/g1/g1EvacFailure.hpp"
  39 #include "gc/g1/g1GCPhaseTimes.hpp"

  40 #include "gc/g1/g1MarkSweep.hpp"
  41 #include "gc/g1/g1OopClosures.inline.hpp"
  42 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  43 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  44 #include "gc/g1/g1RemSet.inline.hpp"
  45 #include "gc/g1/g1RootClosures.hpp"
  46 #include "gc/g1/g1RootProcessor.hpp"
  47 #include "gc/g1/g1StringDedup.hpp"
  48 #include "gc/g1/g1YCTypes.hpp"
  49 #include "gc/g1/heapRegion.inline.hpp"
  50 #include "gc/g1/heapRegionRemSet.hpp"
  51 #include "gc/g1/heapRegionSet.inline.hpp"
  52 #include "gc/g1/suspendibleThreadSet.hpp"
  53 #include "gc/g1/vm_operations_g1.hpp"
  54 #include "gc/shared/gcHeapSummary.hpp"
  55 #include "gc/shared/gcId.hpp"
  56 #include "gc/shared/gcLocker.inline.hpp"
  57 #include "gc/shared/gcTimer.hpp"
  58 #include "gc/shared/gcTrace.hpp"
  59 #include "gc/shared/gcTraceTime.hpp"
  60 #include "gc/shared/generationSpec.hpp"
  61 #include "gc/shared/isGCActiveMark.hpp"
  62 #include "gc/shared/referenceProcessor.hpp"
  63 #include "gc/shared/taskqueue.inline.hpp"
  64 #include "logging/log.hpp"
  65 #include "memory/allocation.hpp"
  66 #include "memory/iterator.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "runtime/atomic.inline.hpp"
  69 #include "runtime/init.hpp"
  70 #include "runtime/orderAccess.inline.hpp"
  71 #include "runtime/vmThread.hpp"
  72 #include "utilities/globalDefinitions.hpp"
  73 #include "utilities/stack.inline.hpp"
  74 
  75 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  76 
  77 // INVARIANTS/NOTES
  78 //
  79 // All allocation activity covered by the G1CollectedHeap interface is
  80 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  81 // and allocate_new_tlab, which are the "entry" points to the
  82 // allocation code from the rest of the JVM.  (Note that this does not
  83 // apply to TLAB allocation, which is not part of this interface: it
  84 // is done by clients of this interface.)


 186   } while (hr != head);
 187   assert(hr != NULL, "invariant");
 188   hr->set_next_dirty_cards_region(NULL);
 189   return hr;
 190 }
 191 
 192 // Returns true if the reference points to an object that
 193 // can move in an incremental collection.
 194 bool G1CollectedHeap::is_scavengable(const void* p) {
 195   HeapRegion* hr = heap_region_containing(p);
 196   return !hr->is_pinned();
 197 }
 198 
 199 // Private methods.
 200 
 201 HeapRegion*
 202 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 203   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 204   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 205     if (!_secondary_free_list.is_empty()) {
 206       log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 207                                 "secondary_free_list has %u entries",
 208                                 _secondary_free_list.length());

 209       // It looks as if there are free regions available on the
 210       // secondary_free_list. Let's move them to the free_list and try
 211       // again to allocate from it.
 212       append_secondary_free_list();
 213 
 214       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 215              "empty we should have moved at least one entry to the free_list");
 216       HeapRegion* res = _hrm.allocate_free_region(is_old);
 217       log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 218                                 "allocated " HR_FORMAT " from secondary_free_list",
 219                                 HR_FORMAT_PARAMS(res));

 220       return res;
 221     }
 222 
 223     // Wait here until we get notified either when (a) there are no
 224     // more free regions coming or (b) some regions have been moved on
 225     // the secondary_free_list.
 226     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 227   }
 228 
 229   log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 230                             "could not allocate from secondary_free_list");

 231   return NULL;
 232 }
 233 
 234 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 235   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 236          "the only time we use this to allocate a humongous region is "
 237          "when we are allocating a single humongous region");
 238 
 239   HeapRegion* res;
 240   if (G1StressConcRegionFreeing) {
 241     if (!_secondary_free_list.is_empty()) {
 242       log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 243                                 "forced to look at the secondary_free_list");

 244       res = new_region_try_secondary_free_list(is_old);
 245       if (res != NULL) {
 246         return res;
 247       }
 248     }
 249   }
 250 
 251   res = _hrm.allocate_free_region(is_old);
 252 
 253   if (res == NULL) {
 254     log_develop(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 255                               "res == NULL, trying the secondary_free_list");

 256     res = new_region_try_secondary_free_list(is_old);
 257   }
 258   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 259     // Currently, only attempts to allocate GC alloc regions set
 260     // do_expand to true. So, we should only reach here during a
 261     // safepoint. If this assumption changes we might have to
 262     // reconsider the use of _expand_heap_after_alloc_failure.
 263     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 264 
 265     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",



 266                               word_size * HeapWordSize);
 267 
 268     if (expand(word_size * HeapWordSize)) {
 269       // Given that expand() succeeded in expanding the heap, and we
 270       // always expand the heap by an amount aligned to the heap
 271       // region size, the free list should in theory not be empty.
 272       // In either case allocate_free_region() will check for NULL.
 273       res = _hrm.allocate_free_region(is_old);
 274     } else {
 275       _expand_heap_after_alloc_failure = false;
 276     }
 277   }
 278   return res;
 279 }
 280 
 281 HeapWord*
 282 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 283                                                            uint num_regions,
 284                                                            size_t word_size,
 285                                                            AllocationContext_t context) {
 286   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 287   assert(is_humongous(word_size), "word_size should be humongous");


 444     // potentially waits for regions from the secondary free list.
 445     wait_while_free_regions_coming();
 446     append_secondary_free_list_if_not_empty_with_lock();
 447 
 448     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 449     // are lucky enough to find some.
 450     first = _hrm.find_contiguous_only_empty(obj_regions);
 451     if (first != G1_NO_HRM_INDEX) {
 452       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 453     }
 454   }
 455 
 456   if (first == G1_NO_HRM_INDEX) {
 457     // Policy: We could not find enough regions for the humongous object in the
 458     // free list. Look through the heap to find a mix of free and uncommitted regions.
 459     // If so, try expansion.
 460     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 461     if (first != G1_NO_HRM_INDEX) {
 462       // We found something. Make sure these regions are committed, i.e. expand
 463       // the heap. Alternatively we could do a defragmentation GC.
 464       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",



 465                                     word_size * HeapWordSize);
 466 
 467 
 468       _hrm.expand_at(first, obj_regions);
 469       g1_policy()->record_new_heap_size(num_regions());
 470 
 471 #ifdef ASSERT
 472       for (uint i = first; i < first + obj_regions; ++i) {
 473         HeapRegion* hr = region_at(i);
 474         assert(hr->is_free(), "sanity");
 475         assert(hr->is_empty(), "sanity");
 476         assert(is_on_master_free_list(hr), "sanity");
 477       }
 478 #endif
 479       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 480     } else {
 481       // Policy: Potentially trigger a defragmentation GC.
 482     }
 483   }
 484 
 485   HeapWord* result = NULL;
 486   if (first != G1_NO_HRM_INDEX) {
 487     result = humongous_obj_allocate_initialize_regions(first, obj_regions,


 765     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 766     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 767       start_address = start_region->end();
 768       if (start_address > last_address) {
 769         increase_used(word_size * HeapWordSize);
 770         start_region->set_top(last_address + 1);
 771         continue;
 772       }
 773       start_region->set_top(start_address);
 774       curr_range = MemRegion(start_address, last_address + 1);
 775       start_region = _hrm.addr_to_region(start_address);
 776     }
 777 
 778     // Perform the actual region allocation, exiting if it fails.
 779     // Then note how much new space we have allocated.
 780     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 781       return false;
 782     }
 783     increase_used(word_size * HeapWordSize);
 784     if (commits != 0) {
 785       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",



 786                                 HeapRegion::GrainWords * HeapWordSize * commits);
 787 
 788     }
 789 
 790     // Mark each G1 region touched by the range as archive, add it to the old set,
 791     // and set the allocation context and top.
 792     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 793     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 794     prev_last_region = last_region;
 795 
 796     while (curr_region != NULL) {
 797       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 798              "Region already in use (index %u)", curr_region->hrm_index());
 799       _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
 800       curr_region->set_allocation_context(AllocationContext::system());
 801       curr_region->set_archive();
 802       _old_set.add(curr_region);
 803       if (curr_region != last_region) {
 804         curr_region->set_top(curr_region->end());
 805         curr_region = _hrm.next_region_in_heap(curr_region);
 806       } else {
 807         curr_region->set_top(last_address + 1);


 948       guarantee(curr_region->is_archive(),
 949                 "Expected archive region at index %u", curr_region->hrm_index());
 950       uint curr_index = curr_region->hrm_index();
 951       _old_set.remove(curr_region);
 952       curr_region->set_free();
 953       curr_region->set_top(curr_region->bottom());
 954       if (curr_region != last_region) {
 955         curr_region = _hrm.next_region_in_heap(curr_region);
 956       } else {
 957         curr_region = NULL;
 958       }
 959       _hrm.shrink_at(curr_index, 1);
 960       uncommitted_regions++;
 961     }
 962 
 963     // Notify mark-sweep that this is no longer an archive range.
 964     G1MarkSweep::set_range_archive(ranges[i], false);
 965   }
 966 
 967   if (uncommitted_regions != 0) {
 968     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",



 969                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 970   }
 971   decrease_used(size_used);
 972 }
 973 
 974 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 975                                                         uint* gc_count_before_ret,
 976                                                         uint* gclocker_retry_count_ret) {
 977   // The structure of this method has a lot of similarities to
 978   // attempt_allocation_slow(). The reason these two were not merged
 979   // into a single one is that such a method would require several "if
 980   // allocation is not humongous do this, otherwise do that"
 981   // conditional paths which would obscure its flow. In fact, an early
 982   // version of this code did use a unified method which was harder to
 983   // follow and, as a result, it had subtle bugs that were hard to
 984   // track down. So keeping these two methods separate allows each to
 985   // be more readable. It will be good to keep these two in sync as
 986   // much as possible.
 987 
 988   assert_heap_not_locked_and_not_at_safepoint();


1191       } else {
1192         _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1193       }
1194     } else if (hr->is_continues_humongous()) {
1195       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1196     } else if (hr->is_archive()) {
1197       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1198     } else if (hr->is_old()) {
1199       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1200     } else {
1201       ShouldNotReachHere();
1202     }
1203     return false;
1204   }
1205 
1206   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1207     : _hr_printer(hr_printer) { }
1208 };
1209 
1210 void G1CollectedHeap::print_hrm_post_compaction() {
1211   if (_hr_printer.is_active()) {
1212     PostCompactionPrinterClosure cl(hr_printer());
1213     heap_region_iterate(&cl);
1214   }
1215 
1216 }
1217 
1218 bool G1CollectedHeap::do_collection(bool explicit_gc,
1219                                     bool clear_all_soft_refs,
1220                                     size_t word_size) {
1221   assert_at_safepoint(true /* should_be_vm_thread */);
1222 
1223   if (GC_locker::check_active_before_gc()) {
1224     return false;
1225   }
1226 
1227   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1228   gc_timer->register_gc_start();
1229 
1230   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1231   GCIdMark gc_id_mark;
1232   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1233 
1234   SvcGCMarker sgcm(SvcGCMarker::FULL);
1235   ResourceMark rm;
1236 

1237   print_heap_before_gc();
1238   trace_heap_before_gc(gc_tracer);
1239 
1240   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1241 
1242   verify_region_sets_optional();
1243 
1244   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1245                            collector_policy()->should_clear_all_soft_refs();
1246 
1247   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1248 
1249   {
1250     IsGCActiveMark x;
1251 
1252     // Timing
1253     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1254     GCTraceCPUTime tcpu;
1255 
1256     {
1257       GCTraceTime(Info, gc) tm("Full GC", NULL, gc_cause(), true);
1258       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1259       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1260 
1261       g1_policy()->record_full_collection_start();
1262 
1263       // Note: When we have a more flexible GC logging framework that
1264       // allows us to add optional attributes to a GC log record we
1265       // could consider timing and reporting how long we wait in the
1266       // following two methods.
1267       wait_while_free_regions_coming();
1268       // If we start the compaction before the CM threads finish
1269       // scanning the root regions we might trip them over as we'll
1270       // be moving objects / updating references. So let's wait until
1271       // they are done. By telling them to abort, they should complete
1272       // early.
1273       _cm->root_regions()->abort();
1274       _cm->root_regions()->wait_until_scan_finished();
1275       append_secondary_free_list_if_not_empty_with_lock();
1276 
1277       gc_prologue(true);


1288 #if defined(COMPILER2) || INCLUDE_JVMCI
1289       DerivedPointerTable::clear();
1290 #endif
1291 
1292       // Disable discovery and empty the discovered lists
1293       // for the CM ref processor.
1294       ref_processor_cm()->disable_discovery();
1295       ref_processor_cm()->abandon_partial_discovery();
1296       ref_processor_cm()->verify_no_references_recorded();
1297 
1298       // Abandon current iterations of concurrent marking and concurrent
1299       // refinement, if any are in progress. We have to do this before
1300       // wait_until_scan_finished() below.
1301       concurrent_mark()->abort();
1302 
1303       // Make sure we'll choose a new allocation region afterwards.
1304       _allocator->release_mutator_alloc_region();
1305       _allocator->abandon_gc_alloc_regions();
1306       g1_rem_set()->cleanupHRRS();
1307 





1308       // We may have added regions to the current incremental collection
1309       // set between the last GC or pause and now. We need to clear the
1310       // incremental collection set and then start rebuilding it afresh
1311       // after this full GC.
1312       abandon_collection_set(g1_policy()->inc_cset_head());
1313       g1_policy()->clear_incremental_cset();
1314       g1_policy()->stop_incremental_cset_building();
1315 
1316       tear_down_region_sets(false /* free_list_only */);
1317       collector_state()->set_gcs_are_young(true);
1318 
1319       // See the comments in g1CollectedHeap.hpp and
1320       // G1CollectedHeap::ref_processing_init() about
1321       // how reference processing currently works in G1.
1322 
1323       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1324       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1325 
1326       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1327       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);


1355       ClassLoaderDataGraph::purge();
1356       MetaspaceAux::verify_metrics();
1357 
1358       // Note: since we've just done a full GC, concurrent
1359       // marking is no longer active. Therefore we need not
1360       // re-enable reference discovery for the CM ref processor.
1361       // That will be done at the start of the next marking cycle.
1362       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1363       ref_processor_cm()->verify_no_references_recorded();
1364 
1365       reset_gc_time_stamp();
1366       // Since everything potentially moved, we will clear all remembered
1367       // sets, and clear all cards.  Later we will rebuild remembered
1368       // sets. We will also reset the GC time stamps of the regions.
1369       clear_rsets_post_compaction();
1370       check_gc_time_stamps();
1371 
1372       // Resize the heap if necessary.
1373       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1374 

1375       // We should do this after we potentially resize the heap so
1376       // that all the COMMIT / UNCOMMIT events are generated before
1377       // the compaction events.

1378       print_hrm_post_compaction();


1379 
1380       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1381       if (hot_card_cache->use_cache()) {
1382         hot_card_cache->reset_card_counts();
1383         hot_card_cache->reset_hot_cache();
1384       }
1385 
1386       // Rebuild remembered sets of all regions.
1387       uint n_workers =
1388         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1389                                                 workers()->active_workers(),
1390                                                 Threads::number_of_non_daemon_threads());
1391       workers()->set_active_workers(n_workers);
1392 
1393       ParRebuildRSTask rebuild_rs_task(this);
1394       workers()->run_task(&rebuild_rs_task);
1395 
1396       // Rebuild the strong code root lists for each region
1397       rebuild_strong_code_roots();
1398 


1427       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1428       // objects marked during a full GC against the previous bitmap.
1429       // But we need to clear it before calling check_bitmaps below since
1430       // the full GC has compacted objects and updated TAMS but not updated
1431       // the prev bitmap.
1432       if (G1VerifyBitmaps) {
1433         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1434       }
1435       check_bitmaps("Full GC End");
1436 
1437       // Start a new incremental collection set for the next pause
1438       assert(g1_policy()->collection_set() == NULL, "must be");
1439       g1_policy()->start_incremental_cset_building();
1440 
1441       clear_cset_fast_test();
1442 
1443       _allocator->init_mutator_alloc_region();
1444 
1445       g1_policy()->record_full_collection_end();
1446 




1447       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1448       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1449       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1450       // before any GC notifications are raised.
1451       g1mm()->update_sizes();
1452 
1453       gc_epilogue(true);
1454     }
1455 
1456     g1_policy()->print_detailed_heap_transition();


1457 
1458     print_heap_after_gc();
1459     trace_heap_after_gc(gc_tracer);
1460 
1461     post_full_gc_dump(gc_timer);
1462 
1463     gc_timer->register_gc_end();
1464     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1465   }
1466 
1467   return true;
1468 }
1469 
1470 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1471   // do_collection() will return whether it succeeded in performing
1472   // the GC. Currently, there is no facility on the
1473   // do_full_collection() API to notify the caller than the collection
1474   // did not succeed (e.g., because it was locked out by the GC
1475   // locker). So, right now, we'll ignore the return value.
1476   bool dummy = do_collection(true,                /* explicit_gc */


1521 
1522   // This assert only makes sense here, before we adjust them
1523   // with respect to the min and max heap size.
1524   assert(minimum_desired_capacity <= maximum_desired_capacity,
1525          "minimum_desired_capacity = " SIZE_FORMAT ", "
1526          "maximum_desired_capacity = " SIZE_FORMAT,
1527          minimum_desired_capacity, maximum_desired_capacity);
1528 
1529   // Should not be greater than the heap max size. No need to adjust
1530   // it with respect to the heap min size as it's a lower bound (i.e.,
1531   // we'll try to make the capacity larger than it, not smaller).
1532   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1533   // Should not be less than the heap min size. No need to adjust it
1534   // with respect to the heap max size as it's an upper bound (i.e.,
1535   // we'll try to make the capacity smaller than it, not greater).
1536   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1537 
1538   if (capacity_after_gc < minimum_desired_capacity) {
1539     // Don't expand unless it's significant
1540     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1541 
1542     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1543                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1544                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1545 




1546     expand(expand_bytes);
1547 
1548     // No expansion, now see if we want to shrink
1549   } else if (capacity_after_gc > maximum_desired_capacity) {
1550     // Capacity too large, compute shrinking size
1551     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1552 
1553     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1554                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1555                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1556 




1557     shrink(shrink_bytes);
1558   }
1559 }
1560 
1561 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1562                                                             AllocationContext_t context,
1563                                                             bool do_gc,
1564                                                             bool clear_all_soft_refs,
1565                                                             bool expect_null_mutator_alloc_region,
1566                                                             bool* gc_succeeded) {
1567   *gc_succeeded = true;
1568   // Let's attempt the allocation first.
1569   HeapWord* result =
1570     attempt_allocation_at_safepoint(word_size,
1571                                     context,
1572                                     expect_null_mutator_alloc_region);
1573   if (result != NULL) {
1574     assert(*gc_succeeded, "sanity");
1575     return result;
1576   }


1643 
1644   // What else?  We might try synchronous finalization later.  If the total
1645   // space available is large enough for the allocation, then a more
1646   // complete compaction phase than we've tried so far might be
1647   // appropriate.
1648   assert(*succeeded, "sanity");
1649   return NULL;
1650 }
1651 
1652 // Attempting to expand the heap sufficiently
1653 // to support an allocation of the given "word_size".  If
1654 // successful, perform the allocation and return the address of the
1655 // allocated block, or else "NULL".
1656 
1657 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1658   assert_at_safepoint(true /* should_be_vm_thread */);
1659 
1660   verify_region_sets_optional();
1661 
1662   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1663   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",



1664                             word_size * HeapWordSize);
1665 
1666 
1667   if (expand(expand_bytes)) {
1668     _hrm.verify_optional();
1669     verify_region_sets_optional();
1670     return attempt_allocation_at_safepoint(word_size,
1671                                            context,
1672                                            false /* expect_null_mutator_alloc_region */);
1673   }
1674   return NULL;
1675 }
1676 
1677 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1678   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1679   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1680                                        HeapRegion::GrainBytes);
1681 
1682   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",


1683                             expand_bytes, aligned_expand_bytes);
1684 
1685   if (is_maximal_no_gc()) {
1686     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");


1687     return false;
1688   }
1689 
1690   double expand_heap_start_time_sec = os::elapsedTime();
1691   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1692   assert(regions_to_expand > 0, "Must expand by at least one region");
1693 
1694   uint expanded_by = _hrm.expand_by(regions_to_expand);
1695   if (expand_time_ms != NULL) {
1696     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1697   }
1698 
1699   if (expanded_by > 0) {
1700     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1701     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1702     g1_policy()->record_new_heap_size(num_regions());
1703   } else {
1704     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1705 

1706     // The expansion of the virtual storage space was unsuccessful.
1707     // Let's see if it was because we ran out of swap.
1708     if (G1ExitOnExpansionFailure &&
1709         _hrm.available() >= regions_to_expand) {
1710       // We had head room...
1711       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1712     }
1713   }
1714   return regions_to_expand > 0;
1715 }
1716 
1717 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1718   size_t aligned_shrink_bytes =
1719     ReservedSpace::page_align_size_down(shrink_bytes);
1720   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1721                                          HeapRegion::GrainBytes);
1722   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1723 
1724   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1725   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1726 
1727 
1728   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",



1729                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1730   if (num_regions_removed > 0) {
1731     g1_policy()->record_new_heap_size(num_regions());
1732   } else {
1733     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");


1734   }
1735 }
1736 
1737 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1738   verify_region_sets_optional();
1739 
1740   // We should only reach here at the end of a Full GC which means we
1741   // should not not be holding to any GC alloc regions. The method
1742   // below will make sure of that and do any remaining clean up.
1743   _allocator->abandon_gc_alloc_regions();
1744 
1745   // Instead of tearing down / rebuilding the free lists here, we
1746   // could instead use the remove_all_pending() method on free_list to
1747   // remove only the ones that we need to remove.
1748   tear_down_region_sets(true /* free_list_only */);
1749   shrink_helper(shrink_bytes);
1750   rebuild_region_sets(true /* free_list_only */);
1751 
1752   _hrm.verify_optional();
1753   verify_region_sets_optional();


1826   // Initialize the G1EvacuationFailureALot counters and flags.
1827   NOT_PRODUCT(reset_evacuation_should_fail();)
1828 
1829   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1830 }
1831 
1832 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1833                                                                  size_t size,
1834                                                                  size_t translation_factor) {
1835   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1836   // Allocate a new reserved space, preferring to use large pages.
1837   ReservedSpace rs(size, preferred_page_size);
1838   G1RegionToSpaceMapper* result  =
1839     G1RegionToSpaceMapper::create_mapper(rs,
1840                                          size,
1841                                          rs.alignment(),
1842                                          HeapRegion::GrainBytes,
1843                                          translation_factor,
1844                                          mtGC);
1845   if (TracePageSizes) {
1846     tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1847                   description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1848   }
1849   return result;
1850 }
1851 
1852 jint G1CollectedHeap::initialize() {
1853   CollectedHeap::pre_initialize();
1854   os::enable_vtime();
1855 


1856   // Necessary to satisfy locking discipline assertions.
1857 
1858   MutexLocker x(Heap_lock);
1859 




1860   // While there are no constraints in the GC code that HeapWordSize
1861   // be any particular value, there are multiple other areas in the
1862   // system which believe this to be true (e.g. oop->object_size in some
1863   // cases incorrectly returns the size in wordSize units rather than
1864   // HeapWordSize).
1865   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1866 
1867   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1868   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1869   size_t heap_alignment = collector_policy()->heap_alignment();
1870 
1871   // Ensure that the sizes are properly aligned.
1872   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1873   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1874   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1875 
1876   _refine_cte_cl = new RefineCardTableEntryClosure();
1877 
1878   jint ecode = JNI_OK;
1879   _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);


2042   G1AllocRegion::setup(this, dummy_region);
2043 
2044   _allocator->init_mutator_alloc_region();
2045 
2046   // Do create of the monitoring and management support so that
2047   // values in the heap have been properly initialized.
2048   _g1mm = new G1MonitoringSupport(this);
2049 
2050   G1StringDedup::initialize();
2051 
2052   _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2053   for (uint i = 0; i < ParallelGCThreads; i++) {
2054     new (&_preserved_objs[i]) OopAndMarkOopStack();
2055   }
2056 
2057   return JNI_OK;
2058 }
2059 
2060 void G1CollectedHeap::stop() {
2061   // Stop all concurrent threads. We do this to make sure these threads
2062   // do not continue to execute and access resources (e.g. logging)
2063   // that are destroyed during shutdown.
2064   _cg1r->stop();
2065   _cmThread->stop();
2066   if (G1StringDedup::is_enabled()) {
2067     G1StringDedup::stop();
2068   }
2069 }
2070 
2071 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2072   return HeapRegion::max_region_size();
2073 }
2074 
2075 void G1CollectedHeap::post_initialize() {
2076   CollectedHeap::post_initialize();
2077   ref_processing_init();
2078 }
2079 
2080 void G1CollectedHeap::ref_processing_init() {
2081   // Reference processing in G1 currently works as follows:
2082   //


2169       assert(chr->is_continues_humongous(), "sanity");
2170       chr->reset_gc_time_stamp();
2171     }
2172   }
2173 }
2174 
2175 #ifndef PRODUCT
2176 
2177 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2178 private:
2179   unsigned _gc_time_stamp;
2180   bool _failures;
2181 
2182 public:
2183   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2184     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2185 
2186   virtual bool doHeapRegion(HeapRegion* hr) {
2187     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2188     if (_gc_time_stamp != region_gc_time_stamp) {
2189       log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),

2190                            region_gc_time_stamp, _gc_time_stamp);
2191       _failures = true;
2192     }
2193     return false;
2194   }
2195 
2196   bool failures() { return _failures; }
2197 };
2198 
2199 void G1CollectedHeap::check_gc_time_stamps() {
2200   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2201   heap_region_iterate(&cl);
2202   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2203 }
2204 #endif // PRODUCT
2205 
2206 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2207   _cg1r->hot_card_cache()->drain(cl, worker_i);
2208 }
2209 


2755 private:
2756   G1CollectedHeap* _g1h;
2757   VerifyOption     _vo;
2758   bool             _failures;
2759 public:
2760   // _vo == UsePrevMarking -> use "prev" marking information,
2761   // _vo == UseNextMarking -> use "next" marking information,
2762   // _vo == UseMarkWord    -> use mark word from object header.
2763   VerifyRootsClosure(VerifyOption vo) :
2764     _g1h(G1CollectedHeap::heap()),
2765     _vo(vo),
2766     _failures(false) { }
2767 
2768   bool failures() { return _failures; }
2769 
2770   template <class T> void do_oop_nv(T* p) {
2771     T heap_oop = oopDesc::load_heap_oop(p);
2772     if (!oopDesc::is_null(heap_oop)) {
2773       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2774       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2775         LogHandle(gc, verify) log;
2776         log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2777         if (_vo == VerifyOption_G1UseMarkWord) {
2778           log.info("  Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2779         }
2780         ResourceMark rm;
2781         obj->print_on(log.info_stream());
2782         _failures = true;
2783       }
2784     }
2785   }
2786 
2787   void do_oop(oop* p)       { do_oop_nv(p); }
2788   void do_oop(narrowOop* p) { do_oop_nv(p); }
2789 };
2790 
2791 class G1VerifyCodeRootOopClosure: public OopClosure {
2792   G1CollectedHeap* _g1h;
2793   OopClosure* _root_cl;
2794   nmethod* _nm;
2795   VerifyOption _vo;
2796   bool _failures;
2797 
2798   template <class T> void do_oop_work(T* p) {
2799     // First verify that this root is live
2800     _root_cl->do_oop(p);
2801 


2806 
2807     // Don't check the code roots during marking verification in a full GC
2808     if (_vo == VerifyOption_G1UseMarkWord) {
2809       return;
2810     }
2811 
2812     // Now verify that the current nmethod (which contains p) is
2813     // in the code root list of the heap region containing the
2814     // object referenced by p.
2815 
2816     T heap_oop = oopDesc::load_heap_oop(p);
2817     if (!oopDesc::is_null(heap_oop)) {
2818       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2819 
2820       // Now fetch the region containing the object
2821       HeapRegion* hr = _g1h->heap_region_containing(obj);
2822       HeapRegionRemSet* hrrs = hr->rem_set();
2823       // Verify that the strong code root list for this region
2824       // contains the nmethod
2825       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2826         log_info(gc, verify)("Code root location " PTR_FORMAT " "
2827                              "from nmethod " PTR_FORMAT " not in strong "
2828                              "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2829                              p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2830         _failures = true;
2831       }
2832     }
2833   }
2834 
2835 public:
2836   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2837     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2838 
2839   void do_oop(oop* p) { do_oop_work(p); }
2840   void do_oop(narrowOop* p) { do_oop_work(p); }
2841 
2842   void set_nmethod(nmethod* nm) { _nm = nm; }
2843   bool failures() { return _failures; }
2844 };
2845 
2846 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {


2987   }
2988 
2989   bool doHeapRegion(HeapRegion* r) {
2990     // For archive regions, verify there are no heap pointers to
2991     // non-pinned regions. For all others, verify liveness info.
2992     if (r->is_archive()) {
2993       VerifyArchiveRegionClosure verify_oop_pointers(r);
2994       r->object_iterate(&verify_oop_pointers);
2995       return true;
2996     }
2997     if (!r->is_continues_humongous()) {
2998       bool failures = false;
2999       r->verify(_vo, &failures);
3000       if (failures) {
3001         _failures = true;
3002       } else {
3003         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3004         r->object_iterate(&not_dead_yet_cl);
3005         if (_vo != VerifyOption_G1UseNextMarking) {
3006           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3007             log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
3008                                    p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());




3009             _failures = true;
3010           }
3011         } else {
3012           // When vo == UseNextMarking we cannot currently do a sanity
3013           // check on the live bytes as the calculation has not been
3014           // finalized yet.
3015         }
3016       }
3017     }
3018     return false; // stop the region iteration if we hit a failure
3019   }
3020 };
3021 
3022 // This is the task used for parallel verification of the heap regions
3023 
3024 class G1ParVerifyTask: public AbstractGangTask {
3025 private:
3026   G1CollectedHeap*  _g1h;
3027   VerifyOption      _vo;
3028   bool              _failures;


3036       AbstractGangTask("Parallel verify task"),
3037       _g1h(g1h),
3038       _vo(vo),
3039       _failures(false),
3040       _hrclaimer(g1h->workers()->active_workers()) {}
3041 
3042   bool failures() {
3043     return _failures;
3044   }
3045 
3046   void work(uint worker_id) {
3047     HandleMark hm;
3048     VerifyRegionClosure blk(true, _vo);
3049     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3050     if (blk.failures()) {
3051       _failures = true;
3052     }
3053   }
3054 };
3055 
3056 void G1CollectedHeap::verify(VerifyOption vo) {
3057   if (!SafepointSynchronize::is_at_safepoint()) {
3058     log_info(gc, verify)("Skipping verification. Not at safepoint.");
3059   }
3060 
3061   assert(Thread::current()->is_VM_thread(),
3062          "Expected to be executed serially by the VM thread at this point");
3063 
3064   log_debug(gc, verify)("Roots");
3065   VerifyRootsClosure rootsCl(vo);
3066   VerifyKlassClosure klassCl(this, &rootsCl);
3067   CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3068 
3069   // We apply the relevant closures to all the oops in the
3070   // system dictionary, class loader data graph, the string table
3071   // and the nmethods in the code cache.
3072   G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3073   G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3074 
3075   {
3076     G1RootProcessor root_processor(this, 1);
3077     root_processor.process_all_roots(&rootsCl,
3078                                      &cldCl,
3079                                      &blobsCl);
3080   }
3081 
3082   bool failures = rootsCl.failures() || codeRootsCl.failures();
3083 
3084   if (vo != VerifyOption_G1UseMarkWord) {
3085     // If we're verifying during a full GC then the region sets
3086     // will have been torn down at the start of the GC. Therefore
3087     // verifying the region sets will fail. So we only verify
3088     // the region sets when not in a full GC.
3089     log_debug(gc, verify)("HeapRegionSets");
3090     verify_region_sets();
3091   }
3092 
3093   log_debug(gc, verify)("HeapRegions");
3094   if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3095 
3096     G1ParVerifyTask task(this, vo);
3097     workers()->run_task(&task);
3098     if (task.failures()) {
3099       failures = true;
3100     }
3101 
3102   } else {
3103     VerifyRegionClosure blk(false, vo);
3104     heap_region_iterate(&blk);
3105     if (blk.failures()) {
3106       failures = true;
3107     }
3108   }
3109 
3110   if (G1StringDedup::is_enabled()) {
3111     log_debug(gc, verify)("StrDedup");
3112     G1StringDedup::verify();
3113   }
3114 
3115   if (failures) {
3116     log_info(gc, verify)("Heap after failed verification:");
3117     // It helps to have the per-region information in the output to
3118     // help us track down what went wrong. This is why we call
3119     // print_extended_on() instead of print_on().
3120     LogHandle(gc, verify) log;
3121     ResourceMark rm;
3122     print_extended_on(log.info_stream());
3123   }
3124   guarantee(!failures, "there should not have been any failures");













3125 }
3126 
3127 double G1CollectedHeap::verify(bool guard, const char* msg) {
3128   double verify_time_ms = 0.0;
3129 
3130   if (guard && total_collections() >= VerifyGCStartAt) {
3131     double verify_start = os::elapsedTime();
3132     HandleMark hm;  // Discard invalid handles created during verification
3133     prepare_for_verify();
3134     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3135     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3136   }
3137 
3138   return verify_time_ms;
3139 }
3140 
3141 void G1CollectedHeap::verify_before_gc() {
3142   double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
3143   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3144 }
3145 
3146 void G1CollectedHeap::verify_after_gc() {
3147   double verify_time_ms = verify(VerifyAfterGC, "After GC");
3148   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3149 }
3150 
3151 class PrintRegionClosure: public HeapRegionClosure {
3152   outputStream* _st;
3153 public:
3154   PrintRegionClosure(outputStream* st) : _st(st) {}
3155   bool doHeapRegion(HeapRegion* r) {
3156     r->print_on(_st);
3157     return false;
3158   }
3159 };
3160 
3161 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3162                                        const HeapRegion* hr,
3163                                        const VerifyOption vo) const {
3164   switch (vo) {
3165   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3166   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3167   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();


3237     G1StringDedup::print_worker_threads_on(st);
3238   }
3239 }
3240 
3241 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3242   workers()->threads_do(tc);
3243   tc->do_thread(_cmThread);
3244   _cg1r->threads_do(tc);
3245   if (G1StringDedup::is_enabled()) {
3246     G1StringDedup::threads_do(tc);
3247   }
3248 }
3249 
3250 void G1CollectedHeap::print_tracing_info() const {
3251   // We'll overload this to mean "trace GC pause statistics."
3252   if (TraceYoungGenTime || TraceOldGenTime) {
3253     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3254     // to that.
3255     g1_policy()->print_tracing_info();
3256   }

3257   g1_rem_set()->print_summary_info();


3258   concurrent_mark()->print_summary_info();

3259   g1_policy()->print_yg_surv_rate_info();
3260 }
3261 
3262 #ifndef PRODUCT
3263 // Helpful for debugging RSet issues.
3264 
3265 class PrintRSetsClosure : public HeapRegionClosure {
3266 private:
3267   const char* _msg;
3268   size_t _occupied_sum;
3269 
3270 public:
3271   bool doHeapRegion(HeapRegion* r) {
3272     HeapRegionRemSet* hrrs = r->rem_set();
3273     size_t occupied = hrrs->occupied();
3274     _occupied_sum += occupied;
3275 
3276     tty->print_cr("Printing RSet for region " HR_FORMAT,
3277                            HR_FORMAT_PARAMS(r));
3278     if (occupied == 0) {
3279       tty->print_cr("  RSet is empty");
3280     } else {
3281       hrrs->print();
3282     }
3283     tty->print_cr("----------");
3284     return false;
3285   }
3286 
3287   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3288     tty->cr();
3289     tty->print_cr("========================================");
3290     tty->print_cr("%s", msg);
3291     tty->cr();
3292   }
3293 
3294   ~PrintRSetsClosure() {
3295     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3296     tty->print_cr("========================================");
3297     tty->cr();
3298   }
3299 };
3300 
3301 void G1CollectedHeap::print_cset_rsets() {
3302   PrintRSetsClosure cl("Printing CSet RSets");
3303   collection_set_iterate(&cl);
3304 }
3305 
3306 void G1CollectedHeap::print_all_rsets() {
3307   PrintRSetsClosure cl("Printing All RSets");;
3308   heap_region_iterate(&cl);
3309 }
3310 #endif // PRODUCT
3311 
3312 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3313   YoungList* young_list = heap()->young_list();
3314 
3315   size_t eden_used_bytes = young_list->eden_used_bytes();
3316   size_t survivor_used_bytes = young_list->survivor_used_bytes();
3317 


3335 
3336   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3337   gc_tracer->report_metaspace_summary(when, metaspace_summary);
3338 }
3339 
3340 
3341 G1CollectedHeap* G1CollectedHeap::heap() {
3342   CollectedHeap* heap = Universe::heap();
3343   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3344   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3345   return (G1CollectedHeap*)heap;
3346 }
3347 
3348 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3349   // always_do_update_barrier = false;
3350   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3351   // Fill TLAB's and such
3352   accumulate_statistics_all_tlabs();
3353   ensure_parsability(true);
3354 
3355   g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());



3356 }
3357 
3358 void G1CollectedHeap::gc_epilogue(bool full) {



3359   // we are at the end of the GC. Total collections has already been increased.
3360   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);


3361 
3362   // FIXME: what is this about?
3363   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3364   // is set.
3365 #if defined(COMPILER2) || INCLUDE_JVMCI
3366   assert(DerivedPointerTable::is_empty(), "derived pointer present");
3367 #endif
3368   // always_do_update_barrier = true;
3369 
3370   resize_all_tlabs();
3371   allocation_context_stats().update(full);
3372 
3373   // We have just completed a GC. Update the soft reference
3374   // policy with the new heap occupancy
3375   Universe::update_heap_info_at_gc();
3376 }
3377 
3378 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3379                                                uint gc_count_before,
3380                                                bool* succeeded,


3583     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3584 
3585     // Here's a good place to add any other checks we'd like to
3586     // perform on CSet regions.
3587     return false;
3588   }
3589 };
3590 #endif // ASSERT
3591 
3592 uint G1CollectedHeap::num_task_queues() const {
3593   return _task_queues->size();
3594 }
3595 
3596 #if TASKQUEUE_STATS
3597 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3598   st->print_raw_cr("GC Task Stats");
3599   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3600   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3601 }
3602 
3603 void G1CollectedHeap::print_taskqueue_stats() const {
3604   LogHandle(gc, task, stats) log;
3605   if (!log.is_develop()) {
3606     return;
3607   }
3608   ResourceMark rm;
3609   outputStream* st = log.develop_stream();
3610 
3611   print_taskqueue_stats_hdr(st);
3612 
3613   TaskQueueStats totals;
3614   const uint n = num_task_queues();
3615   for (uint i = 0; i < n; ++i) {
3616     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3617     totals += task_queue(i)->stats;
3618   }
3619   st->print_raw("tot "); totals.print(st); st->cr();
3620 
3621   DEBUG_ONLY(totals.verify());
3622 }
3623 
3624 void G1CollectedHeap::reset_taskqueue_stats() {
3625   const uint n = num_task_queues();
3626   for (uint i = 0; i < n; ++i) {
3627     task_queue(i)->stats.reset();
3628   }
3629 }
3630 #endif // TASKQUEUE_STATS
3631 
3632 void G1CollectedHeap::log_gc_footer(double pause_time_counter) {



















3633   if (evacuation_failed()) {
3634     log_info(gc)("To-space exhausted");
3635   }
3636 
3637   double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter);
3638   g1_policy()->print_phases(pause_time_sec);
3639 
3640   g1_policy()->print_detailed_heap_transition();








3641 }
3642 
3643 
3644 void G1CollectedHeap::wait_for_root_region_scanning() {
3645   double scan_wait_start = os::elapsedTime();
3646   // We have to wait until the CM threads finish scanning the
3647   // root regions as it's the only way to ensure that all the
3648   // objects on them have been correctly scanned before we start
3649   // moving them during the GC.
3650   bool waited = _cm->root_regions()->wait_until_scan_finished();
3651   double wait_time_ms = 0.0;
3652   if (waited) {
3653     double scan_wait_end = os::elapsedTime();
3654     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3655   }
3656   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3657 }
3658 
3659 bool
3660 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3661   assert_at_safepoint(true /* should_be_vm_thread */);
3662   guarantee(!is_gc_active(), "collection is not reentrant");
3663 
3664   if (GC_locker::check_active_before_gc()) {
3665     return false;
3666   }
3667 
3668   _gc_timer_stw->register_gc_start();
3669 
3670   GCIdMark gc_id_mark;
3671   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3672 
3673   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3674   ResourceMark rm;
3675 
3676   wait_for_root_region_scanning();
3677 

3678   print_heap_before_gc();
3679   trace_heap_before_gc(_gc_tracer_stw);
3680 
3681   verify_region_sets_optional();
3682   verify_dirty_young_regions();
3683 
3684   // This call will decide whether this pause is an initial-mark
3685   // pause. If it is, during_initial_mark_pause() will return true
3686   // for the duration of this pause.
3687   g1_policy()->decide_on_conc_mark_initiation();
3688 
3689   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3690   assert(!collector_state()->during_initial_mark_pause() ||
3691           collector_state()->gcs_are_young(), "sanity");
3692 
3693   // We also do not allow mixed GCs during marking.
3694   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3695 
3696   // Record whether this pause is an initial mark. When the current
3697   // thread has completed its logging output and it's safe to signal
3698   // the CM thread, the flag's value in the policy has been reset.
3699   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3700 
3701   // Inner scope for scope based logging, timers, and stats collection
3702   {
3703     EvacuationInfo evacuation_info;
3704 
3705     if (collector_state()->during_initial_mark_pause()) {
3706       // We are about to start a marking cycle, so we increment the
3707       // full collection counter.
3708       increment_old_marking_cycles_started();
3709       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3710     }
3711 
3712     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3713 
3714     GCTraceCPUTime tcpu;
3715 
3716     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3717                                                                   workers()->active_workers(),
3718                                                                   Threads::number_of_non_daemon_threads());
3719     workers()->set_active_workers(active_workers);
3720     FormatBuffer<> gc_string("GC %s%s",
3721         collector_state()->gcs_are_young() ? "young" : "mixed",
3722         collector_state()->during_initial_mark_pause() ? ", initial-mark" : "");
3723 
3724     GCTraceTime(Info, gc) tm5(gc_string, NULL, gc_cause(), true);
3725 
3726 
3727     double pause_start_sec = os::elapsedTime();
3728     double pause_start_counter = os::elapsed_counter();
3729     g1_policy()->note_gc_start(active_workers);

3730 
3731     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3732     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3733 
3734     // If the secondary_free_list is not empty, append it to the
3735     // free_list. No need to wait for the cleanup operation to finish;
3736     // the region allocation code will check the secondary_free_list
3737     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3738     // set, skip this step so that the region allocation code has to
3739     // get entries from the secondary_free_list.
3740     if (!G1StressConcRegionFreeing) {
3741       append_secondary_free_list_if_not_empty_with_lock();
3742     }
3743 
3744     assert(check_young_list_well_formed(), "young list should be well formed");
3745 
3746     // Don't dynamically change the number of GC threads this early.  A value of
3747     // 0 is used to indicate serial work.  When parallel work is done,
3748     // it will be set.
3749 


3763 #endif
3764 
3765       // Please see comment in g1CollectedHeap.hpp and
3766       // G1CollectedHeap::ref_processing_init() to see how
3767       // reference processing currently works in G1.
3768 
3769       // Enable discovery in the STW reference processor
3770       ref_processor_stw()->enable_discovery();
3771 
3772       {
3773         // We want to temporarily turn off discovery by the
3774         // CM ref processor, if necessary, and turn it back on
3775         // on again later if we do. Using a scoped
3776         // NoRefDiscovery object will do this.
3777         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3778 
3779         // Forget the current alloc region (we might even choose it to be part
3780         // of the collection set!).
3781         _allocator->release_mutator_alloc_region();
3782 





3783         // This timing is only used by the ergonomics to handle our pause target.
3784         // It is unclear why this should not include the full pause. We will
3785         // investigate this in CR 7178365.
3786         //
3787         // Preserving the old comment here if that helps the investigation:
3788         //
3789         // The elapsed time induced by the start time below deliberately elides
3790         // the possible verification above.
3791         double sample_start_time_sec = os::elapsedTime();
3792 
3793         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3794 
3795         if (collector_state()->during_initial_mark_pause()) {
3796           concurrent_mark()->checkpointRootsInitialPre();
3797         }
3798 
3799         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3800         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3801 
3802         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());


3879 
3880         if (collector_state()->during_initial_mark_pause()) {
3881           // We have to do this before we notify the CM threads that
3882           // they can start working to make sure that all the
3883           // appropriate initialization is done on the CM object.
3884           concurrent_mark()->checkpointRootsInitialPost();
3885           collector_state()->set_mark_in_progress(true);
3886           // Note that we don't actually trigger the CM thread at
3887           // this point. We do that later when we're sure that
3888           // the current thread has completed its logging output.
3889         }
3890 
3891         allocate_dummy_regions();
3892 
3893         _allocator->init_mutator_alloc_region();
3894 
3895         {
3896           size_t expand_bytes = g1_policy()->expansion_amount();
3897           if (expand_bytes > 0) {
3898             size_t bytes_before = capacity();
3899             // No need for an ergo logging here,
3900             // expansion_amount() does this when it returns a value > 0.
3901             double expand_ms;
3902             if (!expand(expand_bytes, &expand_ms)) {
3903               // We failed to expand the heap. Cannot do anything about it.
3904             }
3905             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3906           }
3907         }
3908 
3909         // We redo the verification but now wrt to the new CSet which
3910         // has just got initialized after the previous CSet was freed.
3911         _cm->verify_no_cset_oops();
3912         _cm->note_end_of_gc();
3913 
3914         // This timing is only used by the ergonomics to handle our pause target.
3915         // It is unclear why this should not include the full pause. We will
3916         // investigate this in CR 7178365.
3917         double sample_end_time_sec = os::elapsedTime();
3918         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3919         size_t total_cards_scanned = per_thread_states.total_cards_scanned();


3939         // stamp here we invalidate all the GC time stamps on all the
3940         // regions and saved_mark_word() will simply return top() for
3941         // all the regions. This is a nicer way of ensuring this rather
3942         // than iterating over the regions and fixing them. In fact, the
3943         // GC time stamp increment here also ensures that
3944         // saved_mark_word() will return top() between pauses, i.e.,
3945         // during concurrent refinement. So we don't need the
3946         // is_gc_active() check to decided which top to use when
3947         // scanning cards (see CR 7039627).
3948         increment_gc_time_stamp();
3949 
3950         verify_after_gc();
3951         check_bitmaps("GC End");
3952 
3953         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3954         ref_processor_stw()->verify_no_references_recorded();
3955 
3956         // CM reference discovery will be re-enabled if necessary.
3957       }
3958 






3959 #ifdef TRACESPINNING
3960       ParallelTaskTerminator::print_termination_counts();
3961 #endif
3962 
3963       gc_epilogue(false);
3964     }
3965 
3966     // Print the remainder of the GC log output.
3967     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3968 
3969     // It is not yet to safe to tell the concurrent mark to
3970     // start as we have some optional output below. We don't want the
3971     // output from the concurrent mark thread interfering with this
3972     // logging output either.
3973 
3974     _hrm.verify_optional();
3975     verify_region_sets_optional();
3976 
3977     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3978     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3979 
3980     print_heap_after_gc();
3981     trace_heap_after_gc(_gc_tracer_stw);
3982 
3983     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3984     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3985     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3986     // before any GC notifications are raised.
3987     g1mm()->update_sizes();
3988 
3989     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3990     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3991     _gc_timer_stw->register_gc_end();
3992     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3993   }
3994   // It should now be safe to tell the concurrent mark thread to start
3995   // without its logging output interfering with the logging output
3996   // that came from the pause.
3997 


4148 
4149       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4150 
4151       double term_sec = 0.0;
4152       size_t evac_term_attempts = 0;
4153       {
4154         double start = os::elapsedTime();
4155         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4156         evac.do_void();
4157 
4158         evac_term_attempts = evac.term_attempts();
4159         term_sec = evac.term_time();
4160         double elapsed_sec = os::elapsedTime() - start;
4161         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4162         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4163         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4164       }
4165 
4166       assert(pss->queue_is_empty(), "should be empty");
4167 
4168       if (log_is_enabled(Debug, gc, task, stats)) {
4169         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4170         size_t lab_waste;
4171         size_t lab_undo_waste;
4172         pss->waste(lab_waste, lab_undo_waste);
4173         _g1h->print_termination_stats(worker_id,

4174                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
4175                                       strong_roots_sec * 1000.0,                  /* strong roots time */
4176                                       term_sec * 1000.0,                          /* evac term time */
4177                                       evac_term_attempts,                         /* evac term attempts */
4178                                       lab_waste,                                  /* alloc buffer waste */
4179                                       lab_undo_waste                              /* undo waste */
4180                                       );
4181       }
4182 
4183       // Close the inner scope so that the ResourceMark and HandleMark
4184       // destructors are executed here and are included as part of the
4185       // "GC Worker Time".
4186     }
4187     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4188   }
4189 };
4190 
4191 void G1CollectedHeap::print_termination_stats_hdr() {
4192   LogHandle(gc, task, stats) log;
4193   if (!log.is_debug()) {
4194     return;
4195   }
4196   log.debug("GC Termination Stats");
4197   log.debug("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
4198   log.debug("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
4199   log.debug("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4200 }
4201 
4202 void G1CollectedHeap::print_termination_stats(uint worker_id,

4203                                               double elapsed_ms,
4204                                               double strong_roots_ms,
4205                                               double term_ms,
4206                                               size_t term_attempts,
4207                                               size_t alloc_buffer_waste,
4208                                               size_t undo_waste) const {
4209   log_debug(gc, task, stats)
4210               ("%3d %9.2f %9.2f %6.2f "
4211                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4212                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4213                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4214                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4215                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4216                alloc_buffer_waste * HeapWordSize / K,
4217                undo_waste * HeapWordSize / K);
4218 }
4219 
4220 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4221 private:
4222   BoolObjectClosure* _is_alive;
4223   int _initial_string_table_size;
4224   int _initial_symbol_table_size;
4225 
4226   bool  _process_strings;
4227   int _strings_processed;
4228   int _strings_removed;
4229 
4230   bool  _process_symbols;


4239     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4240 
4241     _initial_string_table_size = StringTable::the_table()->table_size();
4242     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4243     if (process_strings) {
4244       StringTable::clear_parallel_claimed_index();
4245     }
4246     if (process_symbols) {
4247       SymbolTable::clear_parallel_claimed_index();
4248     }
4249   }
4250 
4251   ~G1StringSymbolTableUnlinkTask() {
4252     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4253               "claim value %d after unlink less than initial string table size %d",
4254               StringTable::parallel_claimed_index(), _initial_string_table_size);
4255     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4256               "claim value %d after unlink less than initial symbol table size %d",
4257               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4258 
4259     log_trace(gc, stringdedup)("Cleaned string and symbol table, "

4260                                "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4261                                "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4262                                strings_processed(), strings_removed(),
4263                                symbols_processed(), symbols_removed());
4264   }

4265 
4266   void work(uint worker_id) {
4267     int strings_processed = 0;
4268     int strings_removed = 0;
4269     int symbols_processed = 0;
4270     int symbols_removed = 0;
4271     if (_process_strings) {
4272       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4273       Atomic::add(strings_processed, &_strings_processed);
4274       Atomic::add(strings_removed, &_strings_removed);
4275     }
4276     if (_process_symbols) {
4277       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4278       Atomic::add(symbols_processed, &_symbols_processed);
4279       Atomic::add(symbols_removed, &_symbols_removed);
4280     }
4281   }
4282 
4283   size_t strings_processed() const { return (size_t)_strings_processed; }
4284   size_t strings_removed()   const { return (size_t)_strings_removed; }


5071 
5072 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5073   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5074 
5075   // Should G1EvacuationFailureALot be in effect for this GC?
5076   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5077 
5078   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5079   double start_par_time_sec = os::elapsedTime();
5080   double end_par_time_sec;
5081 
5082   {
5083     const uint n_workers = workers()->active_workers();
5084     G1RootProcessor root_processor(this, n_workers);
5085     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5086     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5087     if (collector_state()->during_initial_mark_pause()) {
5088       ClassLoaderDataGraph::clear_claimed_marks();
5089     }
5090 
5091     print_termination_stats_hdr();



5092 
5093     workers()->run_task(&g1_par_task);
5094     end_par_time_sec = os::elapsedTime();
5095 
5096     // Closing the inner scope will execute the destructor
5097     // for the G1RootProcessor object. We record the current
5098     // elapsed time before closing the scope so that time
5099     // taken for the destructor is NOT included in the
5100     // reported parallel time.
5101   }
5102 
5103   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5104 
5105   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5106   phase_times->record_par_time(par_time_ms);
5107 
5108   double code_root_fixup_time_ms =
5109         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5110   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5111 


5310   }
5311 }
5312 
5313 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5314   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5315   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5316     verify_dirty_region(hr);
5317   }
5318 }
5319 
5320 void G1CollectedHeap::verify_dirty_young_regions() {
5321   verify_dirty_young_list(_young_list->first_region());
5322 }
5323 
5324 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5325                                                HeapWord* tams, HeapWord* end) {
5326   guarantee(tams <= end,
5327             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5328   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5329   if (result < end) {
5330     log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
5331     log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));



5332     return false;
5333   }
5334   return true;
5335 }
5336 
5337 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5338   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5339   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5340 
5341   HeapWord* bottom = hr->bottom();
5342   HeapWord* ptams  = hr->prev_top_at_mark_start();
5343   HeapWord* ntams  = hr->next_top_at_mark_start();
5344   HeapWord* end    = hr->end();
5345 
5346   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5347 
5348   bool res_n = true;
5349   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5350   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5351   // if we happen to be in that state.
5352   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5353     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5354   }
5355   if (!res_p || !res_n) {
5356     log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
5357     log_info(gc, verify)("#### Caller: %s", caller);

5358     return false;
5359   }
5360   return true;
5361 }
5362 
5363 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5364   if (!G1VerifyBitmaps) return;
5365 
5366   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5367 }
5368 
5369 class G1VerifyBitmapClosure : public HeapRegionClosure {
5370 private:
5371   const char* _caller;
5372   G1CollectedHeap* _g1h;
5373   bool _failures;
5374 
5375 public:
5376   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5377     _caller(caller), _g1h(g1h), _failures(false) { }


5391 
5392 void G1CollectedHeap::check_bitmaps(const char* caller) {
5393   if (!G1VerifyBitmaps) return;
5394 
5395   G1VerifyBitmapClosure cl(caller, this);
5396   heap_region_iterate(&cl);
5397   guarantee(!cl.failures(), "bitmap verification");
5398 }
5399 
5400 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5401  private:
5402   bool _failures;
5403  public:
5404   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5405 
5406   virtual bool doHeapRegion(HeapRegion* hr) {
5407     uint i = hr->hrm_index();
5408     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5409     if (hr->is_humongous()) {
5410       if (hr->in_collection_set()) {
5411         log_info(gc, verify)("\n## humongous region %u in CSet", i);
5412         _failures = true;
5413         return true;
5414       }
5415       if (cset_state.is_in_cset()) {
5416         log_info(gc, verify)("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5417         _failures = true;
5418         return true;
5419       }
5420       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5421         log_info(gc, verify)("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5422         _failures = true;
5423         return true;
5424       }
5425     } else {
5426       if (cset_state.is_humongous()) {
5427         log_info(gc, verify)("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5428         _failures = true;
5429         return true;
5430       }
5431       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5432         log_info(gc, verify)("\n## in CSet %d / cset state %d inconsistency for region %u",
5433                              hr->in_collection_set(), cset_state.value(), i);
5434         _failures = true;
5435         return true;
5436       }
5437       if (cset_state.is_in_cset()) {
5438         if (hr->is_young() != (cset_state.is_young())) {
5439           log_info(gc, verify)("\n## is_young %d / cset state %d inconsistency for region %u",
5440                                hr->is_young(), cset_state.value(), i);
5441           _failures = true;
5442           return true;
5443         }
5444         if (hr->is_old() != (cset_state.is_old())) {
5445           log_info(gc, verify)("\n## is_old %d / cset state %d inconsistency for region %u",
5446                                hr->is_old(), cset_state.value(), i);
5447           _failures = true;
5448           return true;
5449         }
5450       }
5451     }
5452     return false;
5453   }
5454 
5455   bool failures() const { return _failures; }
5456 };
5457 
5458 bool G1CollectedHeap::check_cset_fast_test() {
5459   G1CheckCSetFastTableClosure cl;
5460   _hrm.iterate(&cl);
5461   return !cl.failures();
5462 }
5463 #endif // PRODUCT
5464 
5465 void G1CollectedHeap::cleanUpCardTable() {


5636     // until the end of a concurrent mark.
5637     //
5638     // It is not required to check whether the object has been found dead by marking
5639     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5640     // all objects allocated during that time are considered live.
5641     // SATB marking is even more conservative than the remembered set.
5642     // So if at this point in the collection there is no remembered set entry,
5643     // nobody has a reference to it.
5644     // At the start of collection we flush all refinement logs, and remembered sets
5645     // are completely up-to-date wrt to references to the humongous object.
5646     //
5647     // Other implementation considerations:
5648     // - never consider object arrays at this time because they would pose
5649     // considerable effort for cleaning up the the remembered sets. This is
5650     // required because stale remembered sets might reference locations that
5651     // are currently allocated into.
5652     uint region_idx = r->hrm_index();
5653     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5654         !r->rem_set()->is_empty()) {
5655 
5656       log_trace(gc, humongous)("Live humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",

5657                                region_idx,
5658                                (size_t)obj->size() * HeapWordSize,
5659                                p2i(r->bottom()),
5660                                r->region_num(),
5661                                r->rem_set()->occupied(),
5662                                r->rem_set()->strong_code_roots_list_length(),
5663                                next_bitmap->isMarked(r->bottom()),
5664                                g1h->is_humongous_reclaim_candidate(region_idx),
5665                                obj->is_typeArray()
5666                               );

5667 
5668       return false;
5669     }
5670 
5671     guarantee(obj->is_typeArray(),
5672               "Only eagerly reclaiming type arrays is supported, but the object "
5673               PTR_FORMAT " is not.", p2i(r->bottom()));
5674 
5675     log_trace(gc, humongous)("Dead humongous region %u size " SIZE_FORMAT " start " PTR_FORMAT " length %u with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",

5676                              region_idx,
5677                              (size_t)obj->size() * HeapWordSize,
5678                              p2i(r->bottom()),
5679                              r->region_num(),
5680                              r->rem_set()->occupied(),
5681                              r->rem_set()->strong_code_roots_list_length(),
5682                              next_bitmap->isMarked(r->bottom()),
5683                              g1h->is_humongous_reclaim_candidate(region_idx),
5684                              obj->is_typeArray()
5685                             );
5686 
5687     // Need to clear mark bit of the humongous object if already set.
5688     if (next_bitmap->isMarked(r->bottom())) {
5689       next_bitmap->clear(r->bottom());
5690     }
5691     _freed_bytes += r->used();
5692     r->set_containing_set(NULL);
5693     _humongous_regions_removed.increment(1u, r->capacity());
5694     g1h->free_humongous_region(r, _free_region_list, false);
5695 
5696     return false;
5697   }
5698 
5699   HeapRegionSetCount& humongous_free_count() {
5700     return _humongous_regions_removed;
5701   }
5702 
5703   size_t bytes_freed() const {
5704     return _freed_bytes;
5705   }
5706 
5707   size_t humongous_reclaimed() const {
5708     return _humongous_regions_removed.length();
5709   }
5710 };
5711 
5712 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5713   assert_at_safepoint(true);
5714 
5715   if (!G1EagerReclaimHumongousObjects ||
5716       (!_has_humongous_reclaim_candidates && !log_is_enabled(Trace, gc, humongous))) {
5717     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5718     return;
5719   }
5720 
5721   double start_time = os::elapsedTime();
5722 
5723   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5724 
5725   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5726   heap_region_iterate(&cl);
5727 
5728   HeapRegionSetCount empty_set;
5729   remove_from_old_sets(empty_set, cl.humongous_free_count());
5730 
5731   G1HRPrinter* hrp = hr_printer();
5732   if (hrp->is_active()) {
5733     FreeRegionListIterator iter(&local_cleanup_list);
5734     while (iter.more_available()) {
5735       HeapRegion* hr = iter.get_next();
5736       hrp->cleanup(hr);


5749 // the current incremental collection set in preparation of a
5750 // full collection. After the full GC we will start to build up
5751 // the incremental collection set again.
5752 // This is only called when we're doing a full collection
5753 // and is immediately followed by the tearing down of the young list.
5754 
5755 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5756   HeapRegion* cur = cs_head;
5757 
5758   while (cur != NULL) {
5759     HeapRegion* next = cur->next_in_collection_set();
5760     assert(cur->in_collection_set(), "bad CS");
5761     cur->set_next_in_collection_set(NULL);
5762     clear_in_cset(cur);
5763     cur->set_young_index_in_cset(-1);
5764     cur = next;
5765   }
5766 }
5767 
5768 void G1CollectedHeap::set_free_regions_coming() {
5769   log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : "

5770                             "setting free regions coming");

5771 
5772   assert(!free_regions_coming(), "pre-condition");
5773   _free_regions_coming = true;
5774 }
5775 
5776 void G1CollectedHeap::reset_free_regions_coming() {
5777   assert(free_regions_coming(), "pre-condition");
5778 
5779   {
5780     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5781     _free_regions_coming = false;
5782     SecondaryFreeList_lock->notify_all();
5783   }
5784 
5785   log_develop(gc, freelist)("G1ConcRegionFreeing [cm thread] : "

5786                             "reset free regions coming");

5787 }
5788 
5789 void G1CollectedHeap::wait_while_free_regions_coming() {
5790   // Most of the time we won't have to wait, so let's do a quick test
5791   // first before we take the lock.
5792   if (!free_regions_coming()) {
5793     return;
5794   }
5795 
5796   log_develop(gc, freelist)("G1ConcRegionFreeing [other] : "

5797                             "waiting for free regions");

5798 
5799   {
5800     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5801     while (free_regions_coming()) {
5802       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5803     }
5804   }
5805 
5806   log_develop(gc, freelist)("G1ConcRegionFreeing [other] : "

5807                             "done waiting for free regions");

5808 }
5809 
5810 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5811   return _allocator->is_retained_old_region(hr);
5812 }
5813 
5814 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5815   _young_list->push_region(hr);
5816 }
5817 
5818 class NoYoungRegionsClosure: public HeapRegionClosure {
5819 private:
5820   bool _success;
5821 public:
5822   NoYoungRegionsClosure() : _success(true) { }
5823   bool doHeapRegion(HeapRegion* r) {
5824     if (r->is_young()) {
5825       log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5826                            p2i(r->bottom()), p2i(r->end()));
5827       _success = false;
5828     }
5829     return false;
5830   }
5831   bool success() { return _success; }
5832 };
5833 
5834 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5835   bool ret = _young_list->check_list_empty(check_sample);
5836 
5837   if (check_heap) {
5838     NoYoungRegionsClosure closure;
5839     heap_region_iterate(&closure);
5840     ret = ret && closure.success();
5841   }
5842 
5843   return ret;
5844 }
5845 


6060 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6061                                              size_t allocated_bytes,
6062                                              InCSetState dest) {
6063   bool during_im = collector_state()->during_initial_mark_pause();
6064   alloc_region->note_end_of_copying(during_im);
6065   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6066   if (dest.is_young()) {
6067     young_list()->add_survivor_region(alloc_region);
6068   } else {
6069     _old_set.add(alloc_region);
6070   }
6071   _hr_printer.retire(alloc_region);
6072 }
6073 
6074 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6075   bool expanded = false;
6076   uint index = _hrm.find_highest_free(&expanded);
6077 
6078   if (index != G1_NO_HRM_INDEX) {
6079     if (expanded) {
6080       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",



6081                                 HeapRegion::GrainWords * HeapWordSize);
6082     }
6083     _hrm.allocate_free_regions_starting_at(index, 1);
6084     return region_at(index);
6085   }
6086   return NULL;
6087 }
6088 
6089 // Heap region set verification
6090 
6091 class VerifyRegionListsClosure : public HeapRegionClosure {
6092 private:
6093   HeapRegionSet*   _old_set;
6094   HeapRegionSet*   _humongous_set;
6095   HeapRegionManager*   _hrm;
6096 
6097 public:
6098   HeapRegionSetCount _old_count;
6099   HeapRegionSetCount _humongous_count;
6100   HeapRegionSetCount _free_count;


< prev index next >