< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentG1Refine.hpp"
  33 #include "gc/g1/concurrentG1RefineThread.hpp"
  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectorPolicy.hpp"
  38 #include "gc/g1/g1CollectorState.hpp"
  39 #include "gc/g1/g1ErgoVerbose.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1GCPhaseTimes.hpp"
  42 #include "gc/g1/g1Log.hpp"
  43 #include "gc/g1/g1MarkSweep.hpp"
  44 #include "gc/g1/g1OopClosures.inline.hpp"
  45 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  46 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  47 #include "gc/g1/g1RemSet.inline.hpp"
  48 #include "gc/g1/g1RootClosures.hpp"
  49 #include "gc/g1/g1RootProcessor.hpp"
  50 #include "gc/g1/g1StringDedup.hpp"
  51 #include "gc/g1/g1YCTypes.hpp"
  52 #include "gc/g1/heapRegion.inline.hpp"
  53 #include "gc/g1/heapRegionRemSet.hpp"
  54 #include "gc/g1/heapRegionSet.inline.hpp"
  55 #include "gc/g1/suspendibleThreadSet.hpp"
  56 #include "gc/g1/vm_operations_g1.hpp"
  57 #include "gc/shared/gcHeapSummary.hpp"
  58 #include "gc/shared/gcId.hpp"
  59 #include "gc/shared/gcLocker.inline.hpp"
  60 #include "gc/shared/gcTimer.hpp"
  61 #include "gc/shared/gcTrace.hpp"
  62 #include "gc/shared/gcTraceTime.hpp"
  63 #include "gc/shared/generationSpec.hpp"
  64 #include "gc/shared/isGCActiveMark.hpp"
  65 #include "gc/shared/referenceProcessor.hpp"
  66 #include "gc/shared/taskqueue.inline.hpp"

  67 #include "memory/allocation.hpp"
  68 #include "memory/iterator.hpp"
  69 #include "oops/oop.inline.hpp"
  70 #include "runtime/atomic.inline.hpp"
  71 #include "runtime/init.hpp"
  72 #include "runtime/orderAccess.inline.hpp"
  73 #include "runtime/vmThread.hpp"
  74 #include "utilities/globalDefinitions.hpp"
  75 #include "utilities/stack.inline.hpp"
  76 
  77 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  78 
  79 // INVARIANTS/NOTES
  80 //
  81 // All allocation activity covered by the G1CollectedHeap interface is
  82 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  83 // and allocate_new_tlab, which are the "entry" points to the
  84 // allocation code from the rest of the JVM.  (Note that this does not
  85 // apply to TLAB allocation, which is not part of this interface: it
  86 // is done by clients of this interface.)


 207   } while (hr != head);
 208   assert(hr != NULL, "invariant");
 209   hr->set_next_dirty_cards_region(NULL);
 210   return hr;
 211 }
 212 
 213 // Returns true if the reference points to an object that
 214 // can move in an incremental collection.
 215 bool G1CollectedHeap::is_scavengable(const void* p) {
 216   HeapRegion* hr = heap_region_containing(p);
 217   return !hr->is_pinned();
 218 }
 219 
 220 // Private methods.
 221 
 222 HeapRegion*
 223 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 224   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 225   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 226     if (!_secondary_free_list.is_empty()) {
 227       if (G1ConcRegionFreeingVerbose) {
 228         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 229                                "secondary_free_list has %u entries",
 230                                _secondary_free_list.length());
 231       }
 232       // It looks as if there are free regions available on the
 233       // secondary_free_list. Let's move them to the free_list and try
 234       // again to allocate from it.
 235       append_secondary_free_list();
 236 
 237       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 238              "empty we should have moved at least one entry to the free_list");
 239       HeapRegion* res = _hrm.allocate_free_region(is_old);
 240       if (G1ConcRegionFreeingVerbose) {
 241         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 242                                "allocated " HR_FORMAT " from secondary_free_list",
 243                                HR_FORMAT_PARAMS(res));
 244       }
 245       return res;
 246     }
 247 
 248     // Wait here until we get notified either when (a) there are no
 249     // more free regions coming or (b) some regions have been moved on
 250     // the secondary_free_list.
 251     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 252   }
 253 
 254   if (G1ConcRegionFreeingVerbose) {
 255     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 256                            "could not allocate from secondary_free_list");
 257   }
 258   return NULL;
 259 }
 260 
 261 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 262   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 263          "the only time we use this to allocate a humongous region is "
 264          "when we are allocating a single humongous region");
 265 
 266   HeapRegion* res;
 267   if (G1StressConcRegionFreeing) {
 268     if (!_secondary_free_list.is_empty()) {
 269       if (G1ConcRegionFreeingVerbose) {
 270         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 271                                "forced to look at the secondary_free_list");
 272       }
 273       res = new_region_try_secondary_free_list(is_old);
 274       if (res != NULL) {
 275         return res;
 276       }
 277     }
 278   }
 279 
 280   res = _hrm.allocate_free_region(is_old);
 281 
 282   if (res == NULL) {
 283     if (G1ConcRegionFreeingVerbose) {
 284       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 285                              "res == NULL, trying the secondary_free_list");
 286     }
 287     res = new_region_try_secondary_free_list(is_old);
 288   }
 289   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 290     // Currently, only attempts to allocate GC alloc regions set
 291     // do_expand to true. So, we should only reach here during a
 292     // safepoint. If this assumption changes we might have to
 293     // reconsider the use of _expand_heap_after_alloc_failure.
 294     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 295 
 296     ergo_verbose1(ErgoHeapSizing,
 297                   "attempt heap expansion",
 298                   ergo_format_reason("region allocation request failed")
 299                   ergo_format_byte("allocation request"),
 300                   word_size * HeapWordSize);

 301     if (expand(word_size * HeapWordSize)) {
 302       // Given that expand() succeeded in expanding the heap, and we
 303       // always expand the heap by an amount aligned to the heap
 304       // region size, the free list should in theory not be empty.
 305       // In either case allocate_free_region() will check for NULL.
 306       res = _hrm.allocate_free_region(is_old);
 307     } else {
 308       _expand_heap_after_alloc_failure = false;
 309     }
 310   }
 311   return res;
 312 }
 313 
 314 HeapWord*
 315 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 316                                                            uint num_regions,
 317                                                            size_t word_size,
 318                                                            AllocationContext_t context) {
 319   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 320   assert(is_humongous(word_size), "word_size should be humongous");


 468     // potentially waits for regions from the secondary free list.
 469     wait_while_free_regions_coming();
 470     append_secondary_free_list_if_not_empty_with_lock();
 471 
 472     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 473     // are lucky enough to find some.
 474     first = _hrm.find_contiguous_only_empty(obj_regions);
 475     if (first != G1_NO_HRM_INDEX) {
 476       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 477     }
 478   }
 479 
 480   if (first == G1_NO_HRM_INDEX) {
 481     // Policy: We could not find enough regions for the humongous object in the
 482     // free list. Look through the heap to find a mix of free and uncommitted regions.
 483     // If so, try expansion.
 484     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 485     if (first != G1_NO_HRM_INDEX) {
 486       // We found something. Make sure these regions are committed, i.e. expand
 487       // the heap. Alternatively we could do a defragmentation GC.
 488       ergo_verbose1(ErgoHeapSizing,
 489                     "attempt heap expansion",
 490                     ergo_format_reason("humongous allocation request failed")
 491                     ergo_format_byte("allocation request"),
 492                     word_size * HeapWordSize);
 493 

 494       _hrm.expand_at(first, obj_regions);
 495       g1_policy()->record_new_heap_size(num_regions());
 496 
 497 #ifdef ASSERT
 498       for (uint i = first; i < first + obj_regions; ++i) {
 499         HeapRegion* hr = region_at(i);
 500         assert(hr->is_free(), "sanity");
 501         assert(hr->is_empty(), "sanity");
 502         assert(is_on_master_free_list(hr), "sanity");
 503       }
 504 #endif
 505       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 506     } else {
 507       // Policy: Potentially trigger a defragmentation GC.
 508     }
 509   }
 510 
 511   HeapWord* result = NULL;
 512   if (first != G1_NO_HRM_INDEX) {
 513     result = humongous_obj_allocate_initialize_regions(first, obj_regions,


 791     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 792     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 793       start_address = start_region->end();
 794       if (start_address > last_address) {
 795         increase_used(word_size * HeapWordSize);
 796         start_region->set_top(last_address + 1);
 797         continue;
 798       }
 799       start_region->set_top(start_address);
 800       curr_range = MemRegion(start_address, last_address + 1);
 801       start_region = _hrm.addr_to_region(start_address);
 802     }
 803 
 804     // Perform the actual region allocation, exiting if it fails.
 805     // Then note how much new space we have allocated.
 806     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 807       return false;
 808     }
 809     increase_used(word_size * HeapWordSize);
 810     if (commits != 0) {
 811       ergo_verbose1(ErgoHeapSizing,
 812                     "attempt heap expansion",
 813                     ergo_format_reason("allocate archive regions")
 814                     ergo_format_byte("total size"),
 815                     HeapRegion::GrainWords * HeapWordSize * commits);

 816     }
 817 
 818     // Mark each G1 region touched by the range as archive, add it to the old set,
 819     // and set the allocation context and top.
 820     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 821     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 822     prev_last_region = last_region;
 823 
 824     while (curr_region != NULL) {
 825       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 826              "Region already in use (index %u)", curr_region->hrm_index());
 827       _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
 828       curr_region->set_allocation_context(AllocationContext::system());
 829       curr_region->set_archive();
 830       _old_set.add(curr_region);
 831       if (curr_region != last_region) {
 832         curr_region->set_top(curr_region->end());
 833         curr_region = _hrm.next_region_in_heap(curr_region);
 834       } else {
 835         curr_region->set_top(last_address + 1);


 976       guarantee(curr_region->is_archive(),
 977                 "Expected archive region at index %u", curr_region->hrm_index());
 978       uint curr_index = curr_region->hrm_index();
 979       _old_set.remove(curr_region);
 980       curr_region->set_free();
 981       curr_region->set_top(curr_region->bottom());
 982       if (curr_region != last_region) {
 983         curr_region = _hrm.next_region_in_heap(curr_region);
 984       } else {
 985         curr_region = NULL;
 986       }
 987       _hrm.shrink_at(curr_index, 1);
 988       uncommitted_regions++;
 989     }
 990 
 991     // Notify mark-sweep that this is no longer an archive range.
 992     G1MarkSweep::set_range_archive(ranges[i], false);
 993   }
 994 
 995   if (uncommitted_regions != 0) {
 996     ergo_verbose1(ErgoHeapSizing,
 997                   "attempt heap shrinking",
 998                   ergo_format_reason("uncommitted archive regions")
 999                   ergo_format_byte("total size"),
1000                   HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
1001   }
1002   decrease_used(size_used);
1003 }
1004 
1005 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1006                                                         uint* gc_count_before_ret,
1007                                                         uint* gclocker_retry_count_ret) {
1008   // The structure of this method has a lot of similarities to
1009   // attempt_allocation_slow(). The reason these two were not merged
1010   // into a single one is that such a method would require several "if
1011   // allocation is not humongous do this, otherwise do that"
1012   // conditional paths which would obscure its flow. In fact, an early
1013   // version of this code did use a unified method which was harder to
1014   // follow and, as a result, it had subtle bugs that were hard to
1015   // track down. So keeping these two methods separate allows each to
1016   // be more readable. It will be good to keep these two in sync as
1017   // much as possible.
1018 
1019   assert_heap_not_locked_and_not_at_safepoint();


1219       // We only generate output for non-empty regions.
1220     } else if (hr->is_starts_humongous()) {
1221       _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1222     } else if (hr->is_continues_humongous()) {
1223       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1224     } else if (hr->is_archive()) {
1225       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1226     } else if (hr->is_old()) {
1227       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1228     } else {
1229       ShouldNotReachHere();
1230     }
1231     return false;
1232   }
1233 
1234   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1235     : _hr_printer(hr_printer) { }
1236 };
1237 
1238 void G1CollectedHeap::print_hrm_post_compaction() {

1239   PostCompactionPrinterClosure cl(hr_printer());
1240   heap_region_iterate(&cl);


1241 }
1242 
1243 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1244                                          bool clear_all_soft_refs) {
1245   assert_at_safepoint(true /* should_be_vm_thread */);
1246 
1247   if (GC_locker::check_active_before_gc()) {
1248     return false;
1249   }
1250 
1251   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1252   gc_timer->register_gc_start();
1253 
1254   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1255   GCIdMark gc_id_mark;
1256   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1257 
1258   SvcGCMarker sgcm(SvcGCMarker::FULL);
1259   ResourceMark rm;
1260 
1261   G1Log::update_level();
1262   print_heap_before_gc();
1263   trace_heap_before_gc(gc_tracer);
1264 
1265   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1266 
1267   verify_region_sets_optional();
1268 
1269   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1270                            collector_policy()->should_clear_all_soft_refs();
1271 
1272   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1273 
1274   {
1275     IsGCActiveMark x;
1276 
1277     // Timing
1278     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1279     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1280 
1281     {
1282       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1283       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1284       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1285 
1286       g1_policy()->record_full_collection_start();
1287 
1288       // Note: When we have a more flexible GC logging framework that
1289       // allows us to add optional attributes to a GC log record we
1290       // could consider timing and reporting how long we wait in the
1291       // following two methods.
1292       wait_while_free_regions_coming();
1293       // If we start the compaction before the CM threads finish
1294       // scanning the root regions we might trip them over as we'll
1295       // be moving objects / updating references. So let's wait until
1296       // they are done. By telling them to abort, they should complete
1297       // early.
1298       _cm->root_regions()->abort();
1299       _cm->root_regions()->wait_until_scan_finished();
1300       append_secondary_free_list_if_not_empty_with_lock();
1301 
1302       gc_prologue(true);


1313 #if defined(COMPILER2) || INCLUDE_JVMCI
1314       DerivedPointerTable::clear();
1315 #endif
1316 
1317       // Disable discovery and empty the discovered lists
1318       // for the CM ref processor.
1319       ref_processor_cm()->disable_discovery();
1320       ref_processor_cm()->abandon_partial_discovery();
1321       ref_processor_cm()->verify_no_references_recorded();
1322 
1323       // Abandon current iterations of concurrent marking and concurrent
1324       // refinement, if any are in progress. We have to do this before
1325       // wait_until_scan_finished() below.
1326       concurrent_mark()->abort();
1327 
1328       // Make sure we'll choose a new allocation region afterwards.
1329       _allocator->release_mutator_alloc_region();
1330       _allocator->abandon_gc_alloc_regions();
1331       g1_rem_set()->cleanupHRRS();
1332 
1333       // We should call this after we retire any currently active alloc
1334       // regions so that all the ALLOC / RETIRE events are generated
1335       // before the start GC event.
1336       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1337 
1338       // We may have added regions to the current incremental collection
1339       // set between the last GC or pause and now. We need to clear the
1340       // incremental collection set and then start rebuilding it afresh
1341       // after this full GC.
1342       abandon_collection_set(g1_policy()->inc_cset_head());
1343       g1_policy()->clear_incremental_cset();
1344       g1_policy()->stop_incremental_cset_building();
1345 
1346       tear_down_region_sets(false /* free_list_only */);
1347       collector_state()->set_gcs_are_young(true);
1348 
1349       // See the comments in g1CollectedHeap.hpp and
1350       // G1CollectedHeap::ref_processing_init() about
1351       // how reference processing currently works in G1.
1352 
1353       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1354       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1355 
1356       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1357       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);


1384       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1385       ClassLoaderDataGraph::purge();
1386       MetaspaceAux::verify_metrics();
1387 
1388       // Note: since we've just done a full GC, concurrent
1389       // marking is no longer active. Therefore we need not
1390       // re-enable reference discovery for the CM ref processor.
1391       // That will be done at the start of the next marking cycle.
1392       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1393       ref_processor_cm()->verify_no_references_recorded();
1394 
1395       reset_gc_time_stamp();
1396       // Since everything potentially moved, we will clear all remembered
1397       // sets, and clear all cards.  Later we will rebuild remembered
1398       // sets. We will also reset the GC time stamps of the regions.
1399       clear_rsets_post_compaction();
1400       check_gc_time_stamps();
1401 
1402       resize_if_necessary_after_full_collection();
1403 
1404       if (_hr_printer.is_active()) {
1405         // We should do this after we potentially resize the heap so
1406         // that all the COMMIT / UNCOMMIT events are generated before
1407         // the end GC event.
1408 
1409         print_hrm_post_compaction();
1410         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1411       }
1412 
1413       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1414       if (hot_card_cache->use_cache()) {
1415         hot_card_cache->reset_card_counts();
1416         hot_card_cache->reset_hot_cache();
1417       }
1418 
1419       // Rebuild remembered sets of all regions.
1420       uint n_workers =
1421         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1422                                                 workers()->active_workers(),
1423                                                 Threads::number_of_non_daemon_threads());
1424       workers()->set_active_workers(n_workers);
1425 
1426       ParRebuildRSTask rebuild_rs_task(this);
1427       workers()->run_task(&rebuild_rs_task);
1428 
1429       // Rebuild the strong code root lists for each region
1430       rebuild_strong_code_roots();
1431 


1460       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1461       // objects marked during a full GC against the previous bitmap.
1462       // But we need to clear it before calling check_bitmaps below since
1463       // the full GC has compacted objects and updated TAMS but not updated
1464       // the prev bitmap.
1465       if (G1VerifyBitmaps) {
1466         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1467       }
1468       check_bitmaps("Full GC End");
1469 
1470       // Start a new incremental collection set for the next pause
1471       assert(g1_policy()->collection_set() == NULL, "must be");
1472       g1_policy()->start_incremental_cset_building();
1473 
1474       clear_cset_fast_test();
1475 
1476       _allocator->init_mutator_alloc_region();
1477 
1478       g1_policy()->record_full_collection_end();
1479 
1480       if (G1Log::fine()) {
1481         g1_policy()->print_heap_transition();
1482       }
1483 
1484       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1485       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1486       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1487       // before any GC notifications are raised.
1488       g1mm()->update_sizes();
1489 
1490       gc_epilogue(true);
1491     }
1492 
1493     if (G1Log::finer()) {
1494       g1_policy()->print_detailed_heap_transition(true /* full */);
1495     }
1496 
1497     print_heap_after_gc();
1498     trace_heap_after_gc(gc_tracer);
1499 
1500     post_full_gc_dump(gc_timer);
1501 
1502     gc_timer->register_gc_end();
1503     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1504   }
1505 
1506   return true;
1507 }
1508 
1509 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1510   // Currently, there is no facility in the do_full_collection(bool) API to notify
1511   // the caller that the collection did not succeed (e.g., because it was locked
1512   // out by the GC locker). So, right now, we'll ignore the return value.
1513   bool dummy = do_full_collection(true,                /* explicit_gc */
1514                                   clear_all_soft_refs);
1515 }


1553 
1554   // This assert only makes sense here, before we adjust them
1555   // with respect to the min and max heap size.
1556   assert(minimum_desired_capacity <= maximum_desired_capacity,
1557          "minimum_desired_capacity = " SIZE_FORMAT ", "
1558          "maximum_desired_capacity = " SIZE_FORMAT,
1559          minimum_desired_capacity, maximum_desired_capacity);
1560 
1561   // Should not be greater than the heap max size. No need to adjust
1562   // it with respect to the heap min size as it's a lower bound (i.e.,
1563   // we'll try to make the capacity larger than it, not smaller).
1564   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1565   // Should not be less than the heap min size. No need to adjust it
1566   // with respect to the heap max size as it's an upper bound (i.e.,
1567   // we'll try to make the capacity smaller than it, not greater).
1568   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1569 
1570   if (capacity_after_gc < minimum_desired_capacity) {
1571     // Don't expand unless it's significant
1572     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1573     ergo_verbose4(ErgoHeapSizing,
1574                   "attempt heap expansion",
1575                   ergo_format_reason("capacity lower than "
1576                                      "min desired capacity after Full GC")
1577                   ergo_format_byte("capacity")
1578                   ergo_format_byte("occupancy")
1579                   ergo_format_byte_perc("min desired capacity"),
1580                   capacity_after_gc, used_after_gc,
1581                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1582     expand(expand_bytes);
1583 
1584     // No expansion, now see if we want to shrink
1585   } else if (capacity_after_gc > maximum_desired_capacity) {
1586     // Capacity too large, compute shrinking size
1587     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1588     ergo_verbose4(ErgoHeapSizing,
1589                   "attempt heap shrinking",
1590                   ergo_format_reason("capacity higher than "
1591                                      "max desired capacity after Full GC")
1592                   ergo_format_byte("capacity")
1593                   ergo_format_byte("occupancy")
1594                   ergo_format_byte_perc("max desired capacity"),
1595                   capacity_after_gc, used_after_gc,
1596                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1597     shrink(shrink_bytes);
1598   }
1599 }
1600 
1601 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1602                                                             AllocationContext_t context,
1603                                                             bool do_gc,
1604                                                             bool clear_all_soft_refs,
1605                                                             bool expect_null_mutator_alloc_region,
1606                                                             bool* gc_succeeded) {
1607   *gc_succeeded = true;
1608   // Let's attempt the allocation first.
1609   HeapWord* result =
1610     attempt_allocation_at_safepoint(word_size,
1611                                     context,
1612                                     expect_null_mutator_alloc_region);
1613   if (result != NULL) {
1614     assert(*gc_succeeded, "sanity");
1615     return result;
1616   }


1682 
1683   // What else?  We might try synchronous finalization later.  If the total
1684   // space available is large enough for the allocation, then a more
1685   // complete compaction phase than we've tried so far might be
1686   // appropriate.
1687   assert(*succeeded, "sanity");
1688   return NULL;
1689 }
1690 
1691 // Attempting to expand the heap sufficiently
1692 // to support an allocation of the given "word_size".  If
1693 // successful, perform the allocation and return the address of the
1694 // allocated block, or else "NULL".
1695 
1696 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1697   assert_at_safepoint(true /* should_be_vm_thread */);
1698 
1699   verify_region_sets_optional();
1700 
1701   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1702   ergo_verbose1(ErgoHeapSizing,
1703                 "attempt heap expansion",
1704                 ergo_format_reason("allocation request failed")
1705                 ergo_format_byte("allocation request"),
1706                 word_size * HeapWordSize);


1707   if (expand(expand_bytes)) {
1708     _hrm.verify_optional();
1709     verify_region_sets_optional();
1710     return attempt_allocation_at_safepoint(word_size,
1711                                            context,
1712                                            false /* expect_null_mutator_alloc_region */);
1713   }
1714   return NULL;
1715 }
1716 
1717 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1718   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1719   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1720                                        HeapRegion::GrainBytes);
1721   ergo_verbose2(ErgoHeapSizing,
1722                 "expand the heap",
1723                 ergo_format_byte("requested expansion amount")
1724                 ergo_format_byte("attempted expansion amount"),
1725                 expand_bytes, aligned_expand_bytes);
1726 
1727   if (is_maximal_no_gc()) {
1728     ergo_verbose0(ErgoHeapSizing,
1729                       "did not expand the heap",
1730                       ergo_format_reason("heap already fully expanded"));
1731     return false;
1732   }
1733 
1734   double expand_heap_start_time_sec = os::elapsedTime();
1735   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1736   assert(regions_to_expand > 0, "Must expand by at least one region");
1737 
1738   uint expanded_by = _hrm.expand_by(regions_to_expand);
1739   if (expand_time_ms != NULL) {
1740     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1741   }
1742 
1743   if (expanded_by > 0) {
1744     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1745     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1746     g1_policy()->record_new_heap_size(num_regions());
1747   } else {
1748     ergo_verbose0(ErgoHeapSizing,
1749                   "did not expand the heap",
1750                   ergo_format_reason("heap expansion operation failed"));
1751     // The expansion of the virtual storage space was unsuccessful.
1752     // Let's see if it was because we ran out of swap.
1753     if (G1ExitOnExpansionFailure &&
1754         _hrm.available() >= regions_to_expand) {
1755       // We had head room...
1756       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1757     }
1758   }
1759   return regions_to_expand > 0;
1760 }
1761 
1762 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1763   size_t aligned_shrink_bytes =
1764     ReservedSpace::page_align_size_down(shrink_bytes);
1765   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1766                                          HeapRegion::GrainBytes);
1767   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1768 
1769   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1770   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1771 
1772   ergo_verbose3(ErgoHeapSizing,
1773                 "shrink the heap",
1774                 ergo_format_byte("requested shrinking amount")
1775                 ergo_format_byte("aligned shrinking amount")
1776                 ergo_format_byte("attempted shrinking amount"),
1777                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1778   if (num_regions_removed > 0) {
1779     g1_policy()->record_new_heap_size(num_regions());
1780   } else {
1781     ergo_verbose0(ErgoHeapSizing,
1782                   "did not shrink the heap",
1783                   ergo_format_reason("heap shrinking operation failed"));
1784   }
1785 }
1786 
1787 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1788   verify_region_sets_optional();
1789 
1790   // We should only reach here at the end of a Full GC which means we
1791   // should not not be holding to any GC alloc regions. The method
1792   // below will make sure of that and do any remaining clean up.
1793   _allocator->abandon_gc_alloc_regions();
1794 
1795   // Instead of tearing down / rebuilding the free lists here, we
1796   // could instead use the remove_all_pending() method on free_list to
1797   // remove only the ones that we need to remove.
1798   tear_down_region_sets(true /* free_list_only */);
1799   shrink_helper(shrink_bytes);
1800   rebuild_region_sets(true /* free_list_only */);
1801 
1802   _hrm.verify_optional();
1803   verify_region_sets_optional();


1875   // Initialize the G1EvacuationFailureALot counters and flags.
1876   NOT_PRODUCT(reset_evacuation_should_fail();)
1877 
1878   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1879 }
1880 
1881 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1882                                                                  size_t size,
1883                                                                  size_t translation_factor) {
1884   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1885   // Allocate a new reserved space, preferring to use large pages.
1886   ReservedSpace rs(size, preferred_page_size);
1887   G1RegionToSpaceMapper* result  =
1888     G1RegionToSpaceMapper::create_mapper(rs,
1889                                          size,
1890                                          rs.alignment(),
1891                                          HeapRegion::GrainBytes,
1892                                          translation_factor,
1893                                          mtGC);
1894   if (TracePageSizes) {
1895     gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1896                            description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1897   }
1898   return result;
1899 }
1900 
1901 jint G1CollectedHeap::initialize() {
1902   CollectedHeap::pre_initialize();
1903   os::enable_vtime();
1904 
1905   G1Log::init();
1906 
1907   // Necessary to satisfy locking discipline assertions.
1908 
1909   MutexLocker x(Heap_lock);
1910 
1911   // We have to initialize the printer before committing the heap, as
1912   // it will be used then.
1913   _hr_printer.set_active(G1PrintHeapRegions);
1914 
1915   // While there are no constraints in the GC code that HeapWordSize
1916   // be any particular value, there are multiple other areas in the
1917   // system which believe this to be true (e.g. oop->object_size in some
1918   // cases incorrectly returns the size in wordSize units rather than
1919   // HeapWordSize).
1920   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1921 
1922   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1923   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1924   size_t heap_alignment = collector_policy()->heap_alignment();
1925 
1926   // Ensure that the sizes are properly aligned.
1927   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1928   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1929   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1930 
1931   _refine_cte_cl = new RefineCardTableEntryClosure();
1932 
1933   jint ecode = JNI_OK;
1934   _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);


2087   G1AllocRegion::setup(this, dummy_region);
2088 
2089   _allocator->init_mutator_alloc_region();
2090 
2091   // Do create of the monitoring and management support so that
2092   // values in the heap have been properly initialized.
2093   _g1mm = new G1MonitoringSupport(this);
2094 
2095   G1StringDedup::initialize();
2096 
2097   _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2098   for (uint i = 0; i < ParallelGCThreads; i++) {
2099     new (&_preserved_objs[i]) OopAndMarkOopStack();
2100   }
2101 
2102   return JNI_OK;
2103 }
2104 
2105 void G1CollectedHeap::stop() {
2106   // Stop all concurrent threads. We do this to make sure these threads
2107   // do not continue to execute and access resources (e.g. gclog_or_tty)
2108   // that are destroyed during shutdown.
2109   _cg1r->stop();
2110   _cmThread->stop();
2111   if (G1StringDedup::is_enabled()) {
2112     G1StringDedup::stop();
2113   }
2114 }
2115 
2116 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2117   return HeapRegion::max_region_size();
2118 }
2119 
2120 void G1CollectedHeap::post_initialize() {
2121   CollectedHeap::post_initialize();
2122   ref_processing_init();
2123 }
2124 
2125 void G1CollectedHeap::ref_processing_init() {
2126   // Reference processing in G1 currently works as follows:
2127   //


2204 }
2205 
2206 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2207   hr->reset_gc_time_stamp();
2208 }
2209 
2210 #ifndef PRODUCT
2211 
2212 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2213 private:
2214   unsigned _gc_time_stamp;
2215   bool _failures;
2216 
2217 public:
2218   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2219     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2220 
2221   virtual bool doHeapRegion(HeapRegion* hr) {
2222     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2223     if (_gc_time_stamp != region_gc_time_stamp) {
2224       gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, "
2225                              "expected %d", HR_FORMAT_PARAMS(hr),
2226                              region_gc_time_stamp, _gc_time_stamp);
2227       _failures = true;
2228     }
2229     return false;
2230   }
2231 
2232   bool failures() { return _failures; }
2233 };
2234 
2235 void G1CollectedHeap::check_gc_time_stamps() {
2236   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2237   heap_region_iterate(&cl);
2238   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2239 }
2240 #endif // PRODUCT
2241 
2242 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2243   _cg1r->hot_card_cache()->drain(cl, worker_i);
2244 }
2245 


2799 private:
2800   G1CollectedHeap* _g1h;
2801   VerifyOption     _vo;
2802   bool             _failures;
2803 public:
2804   // _vo == UsePrevMarking -> use "prev" marking information,
2805   // _vo == UseNextMarking -> use "next" marking information,
2806   // _vo == UseMarkWord    -> use mark word from object header.
2807   VerifyRootsClosure(VerifyOption vo) :
2808     _g1h(G1CollectedHeap::heap()),
2809     _vo(vo),
2810     _failures(false) { }
2811 
2812   bool failures() { return _failures; }
2813 
2814   template <class T> void do_oop_nv(T* p) {
2815     T heap_oop = oopDesc::load_heap_oop(p);
2816     if (!oopDesc::is_null(heap_oop)) {
2817       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2818       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2819         gclog_or_tty->print_cr("Root location " PTR_FORMAT " "
2820                                "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2821         if (_vo == VerifyOption_G1UseMarkWord) {
2822           gclog_or_tty->print_cr("  Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark());
2823         }
2824         obj->print_on(gclog_or_tty);

2825         _failures = true;
2826       }
2827     }
2828   }
2829 
2830   void do_oop(oop* p)       { do_oop_nv(p); }
2831   void do_oop(narrowOop* p) { do_oop_nv(p); }
2832 };
2833 
2834 class G1VerifyCodeRootOopClosure: public OopClosure {
2835   G1CollectedHeap* _g1h;
2836   OopClosure* _root_cl;
2837   nmethod* _nm;
2838   VerifyOption _vo;
2839   bool _failures;
2840 
2841   template <class T> void do_oop_work(T* p) {
2842     // First verify that this root is live
2843     _root_cl->do_oop(p);
2844 


2849 
2850     // Don't check the code roots during marking verification in a full GC
2851     if (_vo == VerifyOption_G1UseMarkWord) {
2852       return;
2853     }
2854 
2855     // Now verify that the current nmethod (which contains p) is
2856     // in the code root list of the heap region containing the
2857     // object referenced by p.
2858 
2859     T heap_oop = oopDesc::load_heap_oop(p);
2860     if (!oopDesc::is_null(heap_oop)) {
2861       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2862 
2863       // Now fetch the region containing the object
2864       HeapRegion* hr = _g1h->heap_region_containing(obj);
2865       HeapRegionRemSet* hrrs = hr->rem_set();
2866       // Verify that the strong code root list for this region
2867       // contains the nmethod
2868       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2869         gclog_or_tty->print_cr("Code root location " PTR_FORMAT " "
2870                                "from nmethod " PTR_FORMAT " not in strong "
2871                                "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2872                                p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2873         _failures = true;
2874       }
2875     }
2876   }
2877 
2878 public:
2879   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2880     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2881 
2882   void do_oop(oop* p) { do_oop_work(p); }
2883   void do_oop(narrowOop* p) { do_oop_work(p); }
2884 
2885   void set_nmethod(nmethod* nm) { _nm = nm; }
2886   bool failures() { return _failures; }
2887 };
2888 
2889 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {


3030   }
3031 
3032   bool doHeapRegion(HeapRegion* r) {
3033     // For archive regions, verify there are no heap pointers to
3034     // non-pinned regions. For all others, verify liveness info.
3035     if (r->is_archive()) {
3036       VerifyArchiveRegionClosure verify_oop_pointers(r);
3037       r->object_iterate(&verify_oop_pointers);
3038       return true;
3039     }
3040     if (!r->is_continues_humongous()) {
3041       bool failures = false;
3042       r->verify(_vo, &failures);
3043       if (failures) {
3044         _failures = true;
3045       } else if (!r->is_starts_humongous()) {
3046         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3047         r->object_iterate(&not_dead_yet_cl);
3048         if (_vo != VerifyOption_G1UseNextMarking) {
3049           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3050             gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] "
3051                                    "max_live_bytes " SIZE_FORMAT " "
3052                                    "< calculated " SIZE_FORMAT,
3053                                    p2i(r->bottom()), p2i(r->end()),
3054                                    r->max_live_bytes(),
3055                                  not_dead_yet_cl.live_bytes());
3056             _failures = true;
3057           }
3058         } else {
3059           // When vo == UseNextMarking we cannot currently do a sanity
3060           // check on the live bytes as the calculation has not been
3061           // finalized yet.
3062         }
3063       }
3064     }
3065     return false; // stop the region iteration if we hit a failure
3066   }
3067 };
3068 
3069 // This is the task used for parallel verification of the heap regions
3070 
3071 class G1ParVerifyTask: public AbstractGangTask {
3072 private:
3073   G1CollectedHeap*  _g1h;
3074   VerifyOption      _vo;
3075   bool              _failures;


3083       AbstractGangTask("Parallel verify task"),
3084       _g1h(g1h),
3085       _vo(vo),
3086       _failures(false),
3087       _hrclaimer(g1h->workers()->active_workers()) {}
3088 
3089   bool failures() {
3090     return _failures;
3091   }
3092 
3093   void work(uint worker_id) {
3094     HandleMark hm;
3095     VerifyRegionClosure blk(true, _vo);
3096     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3097     if (blk.failures()) {
3098       _failures = true;
3099     }
3100   }
3101 };
3102 
3103 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3104   if (SafepointSynchronize::is_at_safepoint()) {



3105     assert(Thread::current()->is_VM_thread(),
3106            "Expected to be executed serially by the VM thread at this point");
3107 
3108     if (!silent) { gclog_or_tty->print("Roots "); }
3109     VerifyRootsClosure rootsCl(vo);
3110     VerifyKlassClosure klassCl(this, &rootsCl);
3111     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3112 
3113     // We apply the relevant closures to all the oops in the
3114     // system dictionary, class loader data graph, the string table
3115     // and the nmethods in the code cache.
3116     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3117     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3118 
3119     {
3120       G1RootProcessor root_processor(this, 1);
3121       root_processor.process_all_roots(&rootsCl,
3122                                        &cldCl,
3123                                        &blobsCl);
3124     }
3125 
3126     bool failures = rootsCl.failures() || codeRootsCl.failures();
3127 
3128     if (vo != VerifyOption_G1UseMarkWord) {
3129       // If we're verifying during a full GC then the region sets
3130       // will have been torn down at the start of the GC. Therefore
3131       // verifying the region sets will fail. So we only verify
3132       // the region sets when not in a full GC.
3133       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3134       verify_region_sets();
3135     }
3136 
3137     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3138     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3139 
3140       G1ParVerifyTask task(this, vo);
3141       workers()->run_task(&task);
3142       if (task.failures()) {
3143         failures = true;
3144       }
3145 
3146     } else {
3147       VerifyRegionClosure blk(false, vo);
3148       heap_region_iterate(&blk);
3149       if (blk.failures()) {
3150         failures = true;
3151       }
3152     }
3153 
3154     if (G1StringDedup::is_enabled()) {
3155       if (!silent) gclog_or_tty->print("StrDedup ");
3156       G1StringDedup::verify();
3157     }
3158 
3159     if (failures) {
3160       gclog_or_tty->print_cr("Heap:");
3161       // It helps to have the per-region information in the output to
3162       // help us track down what went wrong. This is why we call
3163       // print_extended_on() instead of print_on().
3164       print_extended_on(gclog_or_tty);
3165       gclog_or_tty->cr();
3166       gclog_or_tty->flush();
3167     }
3168     guarantee(!failures, "there should not have been any failures");
3169   } else {
3170     if (!silent) {
3171       gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3172       if (G1StringDedup::is_enabled()) {
3173         gclog_or_tty->print(", StrDedup");
3174       }
3175       gclog_or_tty->print(") ");
3176     }
3177   }
3178 }
3179 
3180 void G1CollectedHeap::verify(bool silent) {
3181   verify(silent, VerifyOption_G1UsePrevMarking);
3182 }
3183 
3184 double G1CollectedHeap::verify(bool guard, const char* msg) {
3185   double verify_time_ms = 0.0;
3186 
3187   if (guard && total_collections() >= VerifyGCStartAt) {
3188     double verify_start = os::elapsedTime();
3189     HandleMark hm;  // Discard invalid handles created during verification
3190     prepare_for_verify();
3191     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3192     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3193   }
3194 
3195   return verify_time_ms;
3196 }
3197 
3198 void G1CollectedHeap::verify_before_gc() {
3199   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3200   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3201 }
3202 
3203 void G1CollectedHeap::verify_after_gc() {
3204   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3205   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3206 }
3207 
3208 class PrintRegionClosure: public HeapRegionClosure {
3209   outputStream* _st;
3210 public:
3211   PrintRegionClosure(outputStream* st) : _st(st) {}
3212   bool doHeapRegion(HeapRegion* r) {
3213     r->print_on(_st);
3214     return false;
3215   }
3216 };
3217 
3218 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3219                                        const HeapRegion* hr,
3220                                        const VerifyOption vo) const {
3221   switch (vo) {
3222   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3223   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3224   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();


3294     G1StringDedup::print_worker_threads_on(st);
3295   }
3296 }
3297 
3298 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3299   workers()->threads_do(tc);
3300   tc->do_thread(_cmThread);
3301   _cg1r->threads_do(tc);
3302   if (G1StringDedup::is_enabled()) {
3303     G1StringDedup::threads_do(tc);
3304   }
3305 }
3306 
3307 void G1CollectedHeap::print_tracing_info() const {
3308   // We'll overload this to mean "trace GC pause statistics."
3309   if (TraceYoungGenTime || TraceOldGenTime) {
3310     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3311     // to that.
3312     g1_policy()->print_tracing_info();
3313   }
3314   if (G1SummarizeRSetStats) {
3315     g1_rem_set()->print_summary_info();
3316   }
3317   if (G1SummarizeConcMark) {
3318     concurrent_mark()->print_summary_info();
3319   }
3320   g1_policy()->print_yg_surv_rate_info();
3321 }
3322 
3323 #ifndef PRODUCT
3324 // Helpful for debugging RSet issues.
3325 
3326 class PrintRSetsClosure : public HeapRegionClosure {
3327 private:
3328   const char* _msg;
3329   size_t _occupied_sum;
3330 
3331 public:
3332   bool doHeapRegion(HeapRegion* r) {
3333     HeapRegionRemSet* hrrs = r->rem_set();
3334     size_t occupied = hrrs->occupied();
3335     _occupied_sum += occupied;
3336 
3337     gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT,
3338                            HR_FORMAT_PARAMS(r));
3339     if (occupied == 0) {
3340       gclog_or_tty->print_cr("  RSet is empty");
3341     } else {
3342       hrrs->print();
3343     }
3344     gclog_or_tty->print_cr("----------");
3345     return false;
3346   }
3347 
3348   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3349     gclog_or_tty->cr();
3350     gclog_or_tty->print_cr("========================================");
3351     gclog_or_tty->print_cr("%s", msg);
3352     gclog_or_tty->cr();
3353   }
3354 
3355   ~PrintRSetsClosure() {
3356     gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3357     gclog_or_tty->print_cr("========================================");
3358     gclog_or_tty->cr();
3359   }
3360 };
3361 
3362 void G1CollectedHeap::print_cset_rsets() {
3363   PrintRSetsClosure cl("Printing CSet RSets");
3364   collection_set_iterate(&cl);
3365 }
3366 
3367 void G1CollectedHeap::print_all_rsets() {
3368   PrintRSetsClosure cl("Printing All RSets");;
3369   heap_region_iterate(&cl);
3370 }
3371 #endif // PRODUCT
3372 
3373 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3374   YoungList* young_list = heap()->young_list();
3375 
3376   size_t eden_used_bytes = young_list->eden_used_bytes();
3377   size_t survivor_used_bytes = young_list->survivor_used_bytes();
3378 


3396 
3397   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3398   gc_tracer->report_metaspace_summary(when, metaspace_summary);
3399 }
3400 
3401 
3402 G1CollectedHeap* G1CollectedHeap::heap() {
3403   CollectedHeap* heap = Universe::heap();
3404   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3405   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3406   return (G1CollectedHeap*)heap;
3407 }
3408 
3409 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3410   // always_do_update_barrier = false;
3411   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3412   // Fill TLAB's and such
3413   accumulate_statistics_all_tlabs();
3414   ensure_parsability(true);
3415 
3416   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3417       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3418     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3419   }
3420 }
3421 
3422 void G1CollectedHeap::gc_epilogue(bool full) {
3423 
3424   if (G1SummarizeRSetStats &&
3425       (G1SummarizeRSetStatsPeriod > 0) &&
3426       // we are at the end of the GC. Total collections has already been increased.
3427       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3428     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3429   }
3430 
3431   // FIXME: what is this about?
3432   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3433   // is set.
3434 #if defined(COMPILER2) || INCLUDE_JVMCI
3435   assert(DerivedPointerTable::is_empty(), "derived pointer present");
3436 #endif
3437   // always_do_update_barrier = true;
3438 
3439   resize_all_tlabs();
3440   allocation_context_stats().update(full);
3441 
3442   // We have just completed a GC. Update the soft reference
3443   // policy with the new heap occupancy
3444   Universe::update_heap_info_at_gc();
3445 }
3446 
3447 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3448                                                uint gc_count_before,
3449                                                bool* succeeded,


3655     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3656 
3657     // Here's a good place to add any other checks we'd like to
3658     // perform on CSet regions.
3659     return false;
3660   }
3661 };
3662 #endif // ASSERT
3663 
3664 uint G1CollectedHeap::num_task_queues() const {
3665   return _task_queues->size();
3666 }
3667 
3668 #if TASKQUEUE_STATS
3669 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3670   st->print_raw_cr("GC Task Stats");
3671   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3672   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3673 }
3674 
3675 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {







3676   print_taskqueue_stats_hdr(st);
3677 
3678   TaskQueueStats totals;
3679   const uint n = num_task_queues();
3680   for (uint i = 0; i < n; ++i) {
3681     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3682     totals += task_queue(i)->stats;
3683   }
3684   st->print_raw("tot "); totals.print(st); st->cr();
3685 
3686   DEBUG_ONLY(totals.verify());
3687 }
3688 
3689 void G1CollectedHeap::reset_taskqueue_stats() {
3690   const uint n = num_task_queues();
3691   for (uint i = 0; i < n; ++i) {
3692     task_queue(i)->stats.reset();
3693   }
3694 }
3695 #endif // TASKQUEUE_STATS
3696 
3697 void G1CollectedHeap::log_gc_header() {
3698   if (!G1Log::fine()) {
3699     return;
3700   }
3701 
3702   gclog_or_tty->gclog_stamp();
3703 
3704   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3705     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3706     .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3707 
3708   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3709 }
3710 
3711 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3712   if (!G1Log::fine()) {
3713     return;
3714   }
3715 
3716   if (G1Log::finer()) {
3717     if (evacuation_failed()) {
3718       gclog_or_tty->print(" (to-space exhausted)");
3719     }
3720     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);

3721     g1_policy()->print_phases(pause_time_sec);

3722     g1_policy()->print_detailed_heap_transition();
3723   } else {
3724     if (evacuation_failed()) {
3725       gclog_or_tty->print("--");
3726     }
3727     g1_policy()->print_heap_transition();
3728     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3729   }
3730   gclog_or_tty->flush();
3731 }
3732 

3733 void G1CollectedHeap::wait_for_root_region_scanning() {
3734   double scan_wait_start = os::elapsedTime();
3735   // We have to wait until the CM threads finish scanning the
3736   // root regions as it's the only way to ensure that all the
3737   // objects on them have been correctly scanned before we start
3738   // moving them during the GC.
3739   bool waited = _cm->root_regions()->wait_until_scan_finished();
3740   double wait_time_ms = 0.0;
3741   if (waited) {
3742     double scan_wait_end = os::elapsedTime();
3743     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3744   }
3745   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3746 }
3747 
3748 bool
3749 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3750   assert_at_safepoint(true /* should_be_vm_thread */);
3751   guarantee(!is_gc_active(), "collection is not reentrant");
3752 
3753   if (GC_locker::check_active_before_gc()) {
3754     return false;
3755   }
3756 
3757   _gc_timer_stw->register_gc_start();
3758 
3759   GCIdMark gc_id_mark;
3760   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3761 
3762   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3763   ResourceMark rm;
3764 
3765   wait_for_root_region_scanning();
3766 
3767   G1Log::update_level();
3768   print_heap_before_gc();
3769   trace_heap_before_gc(_gc_tracer_stw);
3770 
3771   verify_region_sets_optional();
3772   verify_dirty_young_regions();
3773 
3774   // This call will decide whether this pause is an initial-mark
3775   // pause. If it is, during_initial_mark_pause() will return true
3776   // for the duration of this pause.
3777   g1_policy()->decide_on_conc_mark_initiation();
3778 
3779   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3780   assert(!collector_state()->during_initial_mark_pause() ||
3781           collector_state()->gcs_are_young(), "sanity");
3782 
3783   // We also do not allow mixed GCs during marking.
3784   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3785 
3786   // Record whether this pause is an initial mark. When the current
3787   // thread has completed its logging output and it's safe to signal
3788   // the CM thread, the flag's value in the policy has been reset.
3789   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3790 
3791   // Inner scope for scope based logging, timers, and stats collection
3792   {
3793     EvacuationInfo evacuation_info;
3794 
3795     if (collector_state()->during_initial_mark_pause()) {
3796       // We are about to start a marking cycle, so we increment the
3797       // full collection counter.
3798       increment_old_marking_cycles_started();
3799       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3800     }
3801 
3802     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3803 
3804     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3805 
3806     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3807                                                                   workers()->active_workers(),
3808                                                                   Threads::number_of_non_daemon_threads());
3809     workers()->set_active_workers(active_workers);









3810 
3811     double pause_start_sec = os::elapsedTime();

3812     g1_policy()->note_gc_start(active_workers);
3813     log_gc_header();
3814 
3815     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3816     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3817 
3818     // If the secondary_free_list is not empty, append it to the
3819     // free_list. No need to wait for the cleanup operation to finish;
3820     // the region allocation code will check the secondary_free_list
3821     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3822     // set, skip this step so that the region allocation code has to
3823     // get entries from the secondary_free_list.
3824     if (!G1StressConcRegionFreeing) {
3825       append_secondary_free_list_if_not_empty_with_lock();
3826     }
3827 
3828     assert(check_young_list_well_formed(), "young list should be well formed");
3829 
3830     // Don't dynamically change the number of GC threads this early.  A value of
3831     // 0 is used to indicate serial work.  When parallel work is done,
3832     // it will be set.
3833 


3851       // reference processing currently works in G1.
3852 
3853       // Enable discovery in the STW reference processor
3854       if (g1_policy()->should_process_references()) {
3855         ref_processor_stw()->enable_discovery();
3856       } else {
3857         ref_processor_stw()->disable_discovery();
3858       }
3859 
3860       {
3861         // We want to temporarily turn off discovery by the
3862         // CM ref processor, if necessary, and turn it back on
3863         // on again later if we do. Using a scoped
3864         // NoRefDiscovery object will do this.
3865         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3866 
3867         // Forget the current alloc region (we might even choose it to be part
3868         // of the collection set!).
3869         _allocator->release_mutator_alloc_region();
3870 
3871         // We should call this after we retire the mutator alloc
3872         // region(s) so that all the ALLOC / RETIRE events are generated
3873         // before the start GC event.
3874         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3875 
3876         // This timing is only used by the ergonomics to handle our pause target.
3877         // It is unclear why this should not include the full pause. We will
3878         // investigate this in CR 7178365.
3879         //
3880         // Preserving the old comment here if that helps the investigation:
3881         //
3882         // The elapsed time induced by the start time below deliberately elides
3883         // the possible verification above.
3884         double sample_start_time_sec = os::elapsedTime();
3885 
3886         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3887 
3888         if (collector_state()->during_initial_mark_pause()) {
3889           concurrent_mark()->checkpointRootsInitialPre();
3890         }
3891 
3892         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3893         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3894 
3895         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());


3979 
3980         if (collector_state()->during_initial_mark_pause()) {
3981           // We have to do this before we notify the CM threads that
3982           // they can start working to make sure that all the
3983           // appropriate initialization is done on the CM object.
3984           concurrent_mark()->checkpointRootsInitialPost();
3985           collector_state()->set_mark_in_progress(true);
3986           // Note that we don't actually trigger the CM thread at
3987           // this point. We do that later when we're sure that
3988           // the current thread has completed its logging output.
3989         }
3990 
3991         allocate_dummy_regions();
3992 
3993         _allocator->init_mutator_alloc_region();
3994 
3995         {
3996           size_t expand_bytes = g1_policy()->expansion_amount();
3997           if (expand_bytes > 0) {
3998             size_t bytes_before = capacity();
3999             // No need for an ergo verbose message here,
4000             // expansion_amount() does this when it returns a value > 0.
4001             double expand_ms;
4002             if (!expand(expand_bytes, &expand_ms)) {
4003               // We failed to expand the heap. Cannot do anything about it.
4004             }
4005             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
4006           }
4007         }
4008 
4009         // We redo the verification but now wrt to the new CSet which
4010         // has just got initialized after the previous CSet was freed.
4011         _cm->verify_no_cset_oops();
4012         _cm->note_end_of_gc();
4013 
4014         // This timing is only used by the ergonomics to handle our pause target.
4015         // It is unclear why this should not include the full pause. We will
4016         // investigate this in CR 7178365.
4017         double sample_end_time_sec = os::elapsedTime();
4018         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4019         size_t total_cards_scanned = per_thread_states.total_cards_scanned();


4039         // stamp here we invalidate all the GC time stamps on all the
4040         // regions and saved_mark_word() will simply return top() for
4041         // all the regions. This is a nicer way of ensuring this rather
4042         // than iterating over the regions and fixing them. In fact, the
4043         // GC time stamp increment here also ensures that
4044         // saved_mark_word() will return top() between pauses, i.e.,
4045         // during concurrent refinement. So we don't need the
4046         // is_gc_active() check to decided which top to use when
4047         // scanning cards (see CR 7039627).
4048         increment_gc_time_stamp();
4049 
4050         verify_after_gc();
4051         check_bitmaps("GC End");
4052 
4053         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4054         ref_processor_stw()->verify_no_references_recorded();
4055 
4056         // CM reference discovery will be re-enabled if necessary.
4057       }
4058 
4059       // We should do this after we potentially expand the heap so
4060       // that all the COMMIT events are generated before the end GC
4061       // event, and after we retire the GC alloc regions so that all
4062       // RETIRE events are generated before the end GC event.
4063       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4064 
4065 #ifdef TRACESPINNING
4066       ParallelTaskTerminator::print_termination_counts();
4067 #endif
4068 
4069       gc_epilogue(false);
4070     }
4071 
4072     // Print the remainder of the GC log output.
4073     log_gc_footer(os::elapsedTime() - pause_start_sec);
4074 
4075     // It is not yet to safe to tell the concurrent mark to
4076     // start as we have some optional output below. We don't want the
4077     // output from the concurrent mark thread interfering with this
4078     // logging output either.
4079 
4080     _hrm.verify_optional();
4081     verify_region_sets_optional();
4082 
4083     TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4084     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4085 
4086     print_heap_after_gc();
4087     trace_heap_after_gc(_gc_tracer_stw);
4088 
4089     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4090     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4091     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4092     // before any GC notifications are raised.
4093     g1mm()->update_sizes();
4094 
4095     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4096     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4097     _gc_timer_stw->register_gc_end();
4098     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4099   }
4100   // It should now be safe to tell the concurrent mark thread to start
4101   // without its logging output interfering with the logging output
4102   // that came from the pause.
4103 


4218 
4219       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4220 
4221       double term_sec = 0.0;
4222       size_t evac_term_attempts = 0;
4223       {
4224         double start = os::elapsedTime();
4225         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4226         evac.do_void();
4227 
4228         evac_term_attempts = evac.term_attempts();
4229         term_sec = evac.term_time();
4230         double elapsed_sec = os::elapsedTime() - start;
4231         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4232         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4233         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4234       }
4235 
4236       assert(pss->queue_is_empty(), "should be empty");
4237 
4238       if (PrintTerminationStats) {
4239         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4240         size_t lab_waste;
4241         size_t lab_undo_waste;
4242         pss->waste(lab_waste, lab_undo_waste);
4243         _g1h->print_termination_stats(gclog_or_tty,
4244                                       worker_id,
4245                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
4246                                       strong_roots_sec * 1000.0,                  /* strong roots time */
4247                                       term_sec * 1000.0,                          /* evac term time */
4248                                       evac_term_attempts,                         /* evac term attempts */
4249                                       lab_waste,                                  /* alloc buffer waste */
4250                                       lab_undo_waste                              /* undo waste */
4251                                       );
4252       }
4253 
4254       // Close the inner scope so that the ResourceMark and HandleMark
4255       // destructors are executed here and are included as part of the
4256       // "GC Worker Time".
4257     }
4258     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4259   }
4260 };
4261 
4262 void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) {
4263   st->print_raw_cr("GC Termination Stats");
4264   st->print_raw_cr("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
4265   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts  total   alloc    undo");
4266   st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4267 }
4268 
4269 void G1CollectedHeap::print_termination_stats(outputStream* const st,
4270                                               uint worker_id,
4271                                               double elapsed_ms,
4272                                               double strong_roots_ms,
4273                                               double term_ms,
4274                                               size_t term_attempts,
4275                                               size_t alloc_buffer_waste,
4276                                               size_t undo_waste) const {
4277   st->print_cr("%3d %9.2f %9.2f %6.2f "

4278                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4279                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4280                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4281                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4282                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4283                alloc_buffer_waste * HeapWordSize / K,
4284                undo_waste * HeapWordSize / K);
4285 }
4286 
4287 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4288 private:
4289   BoolObjectClosure* _is_alive;
4290   int _initial_string_table_size;
4291   int _initial_symbol_table_size;
4292 
4293   bool  _process_strings;
4294   int _strings_processed;
4295   int _strings_removed;
4296 
4297   bool  _process_symbols;


4306     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4307 
4308     _initial_string_table_size = StringTable::the_table()->table_size();
4309     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4310     if (process_strings) {
4311       StringTable::clear_parallel_claimed_index();
4312     }
4313     if (process_symbols) {
4314       SymbolTable::clear_parallel_claimed_index();
4315     }
4316   }
4317 
4318   ~G1StringSymbolTableUnlinkTask() {
4319     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4320               "claim value %d after unlink less than initial string table size %d",
4321               StringTable::parallel_claimed_index(), _initial_string_table_size);
4322     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4323               "claim value %d after unlink less than initial symbol table size %d",
4324               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4325 
4326     if (G1TraceStringSymbolTableScrubbing) {
4327       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4328                              "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4329                              "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4330                              strings_processed(), strings_removed(),
4331                              symbols_processed(), symbols_removed());
4332     }
4333   }
4334 
4335   void work(uint worker_id) {
4336     int strings_processed = 0;
4337     int strings_removed = 0;
4338     int symbols_processed = 0;
4339     int symbols_removed = 0;
4340     if (_process_strings) {
4341       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4342       Atomic::add(strings_processed, &_strings_processed);
4343       Atomic::add(strings_removed, &_strings_removed);
4344     }
4345     if (_process_symbols) {
4346       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4347       Atomic::add(symbols_processed, &_symbols_processed);
4348       Atomic::add(symbols_removed, &_symbols_removed);
4349     }
4350   }
4351 
4352   size_t strings_processed() const { return (size_t)_strings_processed; }
4353   size_t strings_removed()   const { return (size_t)_strings_removed; }


5152   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5153 }
5154 
5155 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5156   // Should G1EvacuationFailureALot be in effect for this GC?
5157   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5158 
5159   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5160   double start_par_time_sec = os::elapsedTime();
5161   double end_par_time_sec;
5162 
5163   {
5164     const uint n_workers = workers()->active_workers();
5165     G1RootProcessor root_processor(this, n_workers);
5166     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5167     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5168     if (collector_state()->during_initial_mark_pause()) {
5169       ClassLoaderDataGraph::clear_claimed_marks();
5170     }
5171 
5172     // The individual threads will set their evac-failure closures.
5173     if (PrintTerminationStats) {
5174       print_termination_stats_hdr(gclog_or_tty);
5175     }
5176 
5177     workers()->run_task(&g1_par_task);
5178     end_par_time_sec = os::elapsedTime();
5179 
5180     // Closing the inner scope will execute the destructor
5181     // for the G1RootProcessor object. We record the current
5182     // elapsed time before closing the scope so that time
5183     // taken for the destructor is NOT included in the
5184     // reported parallel time.
5185   }
5186 
5187   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5188 
5189   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5190   phase_times->record_par_time(par_time_ms);
5191 
5192   double code_root_fixup_time_ms =
5193         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5194   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5195 }


5394   }
5395 }
5396 
5397 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5398   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5399   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5400     verify_dirty_region(hr);
5401   }
5402 }
5403 
5404 void G1CollectedHeap::verify_dirty_young_regions() {
5405   verify_dirty_young_list(_young_list->first_region());
5406 }
5407 
5408 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5409                                                HeapWord* tams, HeapWord* end) {
5410   guarantee(tams <= end,
5411             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5412   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5413   if (result < end) {
5414     gclog_or_tty->cr();
5415     gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT,
5416                            bitmap_name, p2i(result));
5417     gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT,
5418                            bitmap_name, p2i(tams), p2i(end));
5419     return false;
5420   }
5421   return true;
5422 }
5423 
5424 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5425   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5426   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5427 
5428   HeapWord* bottom = hr->bottom();
5429   HeapWord* ptams  = hr->prev_top_at_mark_start();
5430   HeapWord* ntams  = hr->next_top_at_mark_start();
5431   HeapWord* end    = hr->end();
5432 
5433   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5434 
5435   bool res_n = true;
5436   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5437   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5438   // if we happen to be in that state.
5439   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5440     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5441   }
5442   if (!res_p || !res_n) {
5443     gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT,
5444                            HR_FORMAT_PARAMS(hr));
5445     gclog_or_tty->print_cr("#### Caller: %s", caller);
5446     return false;
5447   }
5448   return true;
5449 }
5450 
5451 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5452   if (!G1VerifyBitmaps) return;
5453 
5454   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5455 }
5456 
5457 class G1VerifyBitmapClosure : public HeapRegionClosure {
5458 private:
5459   const char* _caller;
5460   G1CollectedHeap* _g1h;
5461   bool _failures;
5462 
5463 public:
5464   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5465     _caller(caller), _g1h(g1h), _failures(false) { }


5477 
5478 void G1CollectedHeap::check_bitmaps(const char* caller) {
5479   if (!G1VerifyBitmaps) return;
5480 
5481   G1VerifyBitmapClosure cl(caller, this);
5482   heap_region_iterate(&cl);
5483   guarantee(!cl.failures(), "bitmap verification");
5484 }
5485 
5486 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5487  private:
5488   bool _failures;
5489  public:
5490   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5491 
5492   virtual bool doHeapRegion(HeapRegion* hr) {
5493     uint i = hr->hrm_index();
5494     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5495     if (hr->is_humongous()) {
5496       if (hr->in_collection_set()) {
5497         gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
5498         _failures = true;
5499         return true;
5500       }
5501       if (cset_state.is_in_cset()) {
5502         gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
5503         _failures = true;
5504         return true;
5505       }
5506       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5507         gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
5508         _failures = true;
5509         return true;
5510       }
5511     } else {
5512       if (cset_state.is_humongous()) {
5513         gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
5514         _failures = true;
5515         return true;
5516       }
5517       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5518         gclog_or_tty->print_cr("\n## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5519                                hr->in_collection_set(), cset_state.value(), i);
5520         _failures = true;
5521         return true;
5522       }
5523       if (cset_state.is_in_cset()) {
5524         if (hr->is_young() != (cset_state.is_young())) {
5525           gclog_or_tty->print_cr("\n## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5526                                  hr->is_young(), cset_state.value(), i);
5527           _failures = true;
5528           return true;
5529         }
5530         if (hr->is_old() != (cset_state.is_old())) {
5531           gclog_or_tty->print_cr("\n## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5532                                  hr->is_old(), cset_state.value(), i);
5533           _failures = true;
5534           return true;
5535         }
5536       }
5537     }
5538     return false;
5539   }
5540 
5541   bool failures() const { return _failures; }
5542 };
5543 
5544 bool G1CollectedHeap::check_cset_fast_test() {
5545   G1CheckCSetFastTableClosure cl;
5546   _hrm.iterate(&cl);
5547   return !cl.failures();
5548 }
5549 #endif // PRODUCT
5550 
5551 void G1CollectedHeap::cleanUpCardTable() {


5729     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5730     // until the end of a concurrent mark.
5731     //
5732     // It is not required to check whether the object has been found dead by marking
5733     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5734     // all objects allocated during that time are considered live.
5735     // SATB marking is even more conservative than the remembered set.
5736     // So if at this point in the collection there is no remembered set entry,
5737     // nobody has a reference to it.
5738     // At the start of collection we flush all refinement logs, and remembered sets
5739     // are completely up-to-date wrt to references to the humongous object.
5740     //
5741     // Other implementation considerations:
5742     // - never consider object arrays at this time because they would pose
5743     // considerable effort for cleaning up the the remembered sets. This is
5744     // required because stale remembered sets might reference locations that
5745     // are currently allocated into.
5746     uint region_idx = r->hrm_index();
5747     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5748         !r->rem_set()->is_empty()) {
5749 
5750       if (G1TraceEagerReclaimHumongousObjects) {
5751         gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5752                                region_idx,
5753                                (size_t)obj->size() * HeapWordSize,
5754                                p2i(r->bottom()),
5755                                r->rem_set()->occupied(),
5756                                r->rem_set()->strong_code_roots_list_length(),
5757                                next_bitmap->isMarked(r->bottom()),
5758                                g1h->is_humongous_reclaim_candidate(region_idx),
5759                                obj->is_typeArray()
5760                               );
5761       }
5762 
5763       return false;
5764     }
5765 
5766     guarantee(obj->is_typeArray(),
5767               "Only eagerly reclaiming type arrays is supported, but the object "
5768               PTR_FORMAT " is not.", p2i(r->bottom()));
5769 
5770     if (G1TraceEagerReclaimHumongousObjects) {
5771       gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5772                              region_idx,
5773                              (size_t)obj->size() * HeapWordSize,
5774                              p2i(r->bottom()),
5775                              r->rem_set()->occupied(),
5776                              r->rem_set()->strong_code_roots_list_length(),
5777                              next_bitmap->isMarked(r->bottom()),
5778                              g1h->is_humongous_reclaim_candidate(region_idx),
5779                              obj->is_typeArray()
5780                             );
5781     }
5782     // Need to clear mark bit of the humongous object if already set.
5783     if (next_bitmap->isMarked(r->bottom())) {
5784       next_bitmap->clear(r->bottom());
5785     }
5786     do {
5787       HeapRegion* next = g1h->next_region_in_humongous(r);
5788       _freed_bytes += r->used();
5789       r->set_containing_set(NULL);
5790       _humongous_regions_removed.increment(1u, r->capacity());
5791       g1h->free_humongous_region(r, _free_region_list, false);
5792       r = next;
5793     } while (r != NULL);
5794 
5795     return false;
5796   }
5797 
5798   HeapRegionSetCount& humongous_free_count() {
5799     return _humongous_regions_removed;
5800   }
5801 
5802   size_t bytes_freed() const {
5803     return _freed_bytes;
5804   }
5805 
5806   size_t humongous_reclaimed() const {
5807     return _humongous_regions_removed.length();
5808   }
5809 };
5810 
5811 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5812   assert_at_safepoint(true);
5813 
5814   if (!G1EagerReclaimHumongousObjects ||
5815       (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
5816     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5817     return;
5818   }
5819 
5820   double start_time = os::elapsedTime();
5821 
5822   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5823 
5824   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5825   heap_region_iterate(&cl);
5826 
5827   HeapRegionSetCount empty_set;
5828   remove_from_old_sets(empty_set, cl.humongous_free_count());
5829 
5830   G1HRPrinter* hrp = hr_printer();
5831   if (hrp->is_active()) {
5832     FreeRegionListIterator iter(&local_cleanup_list);
5833     while (iter.more_available()) {
5834       HeapRegion* hr = iter.get_next();
5835       hrp->cleanup(hr);


5848 // the current incremental collection set in preparation of a
5849 // full collection. After the full GC we will start to build up
5850 // the incremental collection set again.
5851 // This is only called when we're doing a full collection
5852 // and is immediately followed by the tearing down of the young list.
5853 
5854 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5855   HeapRegion* cur = cs_head;
5856 
5857   while (cur != NULL) {
5858     HeapRegion* next = cur->next_in_collection_set();
5859     assert(cur->in_collection_set(), "bad CS");
5860     cur->set_next_in_collection_set(NULL);
5861     clear_in_cset(cur);
5862     cur->set_young_index_in_cset(-1);
5863     cur = next;
5864   }
5865 }
5866 
5867 void G1CollectedHeap::set_free_regions_coming() {
5868   if (G1ConcRegionFreeingVerbose) {
5869     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5870                            "setting free regions coming");
5871   }
5872 
5873   assert(!free_regions_coming(), "pre-condition");
5874   _free_regions_coming = true;
5875 }
5876 
5877 void G1CollectedHeap::reset_free_regions_coming() {
5878   assert(free_regions_coming(), "pre-condition");
5879 
5880   {
5881     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5882     _free_regions_coming = false;
5883     SecondaryFreeList_lock->notify_all();
5884   }
5885 
5886   if (G1ConcRegionFreeingVerbose) {
5887     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
5888                            "reset free regions coming");
5889   }
5890 }
5891 
5892 void G1CollectedHeap::wait_while_free_regions_coming() {
5893   // Most of the time we won't have to wait, so let's do a quick test
5894   // first before we take the lock.
5895   if (!free_regions_coming()) {
5896     return;
5897   }
5898 
5899   if (G1ConcRegionFreeingVerbose) {
5900     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5901                            "waiting for free regions");
5902   }
5903 
5904   {
5905     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5906     while (free_regions_coming()) {
5907       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5908     }
5909   }
5910 
5911   if (G1ConcRegionFreeingVerbose) {
5912     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
5913                            "done waiting for free regions");
5914   }
5915 }
5916 
5917 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5918   return _allocator->is_retained_old_region(hr);
5919 }
5920 
5921 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5922   _young_list->push_region(hr);
5923 }
5924 
5925 class NoYoungRegionsClosure: public HeapRegionClosure {
5926 private:
5927   bool _success;
5928 public:
5929   NoYoungRegionsClosure() : _success(true) { }
5930   bool doHeapRegion(HeapRegion* r) {
5931     if (r->is_young()) {
5932       gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5933                              p2i(r->bottom()), p2i(r->end()));
5934       _success = false;
5935     }
5936     return false;
5937   }
5938   bool success() { return _success; }
5939 };
5940 
5941 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5942   bool ret = _young_list->check_list_empty(check_sample);
5943 
5944   if (check_heap) {
5945     NoYoungRegionsClosure closure;
5946     heap_region_iterate(&closure);
5947     ret = ret && closure.success();
5948   }
5949 
5950   return ret;
5951 }
5952 


6163 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6164                                              size_t allocated_bytes,
6165                                              InCSetState dest) {
6166   bool during_im = collector_state()->during_initial_mark_pause();
6167   alloc_region->note_end_of_copying(during_im);
6168   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6169   if (dest.is_young()) {
6170     young_list()->add_survivor_region(alloc_region);
6171   } else {
6172     _old_set.add(alloc_region);
6173   }
6174   _hr_printer.retire(alloc_region);
6175 }
6176 
6177 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6178   bool expanded = false;
6179   uint index = _hrm.find_highest_free(&expanded);
6180 
6181   if (index != G1_NO_HRM_INDEX) {
6182     if (expanded) {
6183       ergo_verbose1(ErgoHeapSizing,
6184                     "attempt heap expansion",
6185                     ergo_format_reason("requested address range outside heap bounds")
6186                     ergo_format_byte("region size"),
6187                     HeapRegion::GrainWords * HeapWordSize);
6188     }
6189     _hrm.allocate_free_regions_starting_at(index, 1);
6190     return region_at(index);
6191   }
6192   return NULL;
6193 }
6194 
6195 // Heap region set verification
6196 
6197 class VerifyRegionListsClosure : public HeapRegionClosure {
6198 private:
6199   HeapRegionSet*   _old_set;
6200   HeapRegionSet*   _humongous_set;
6201   HeapRegionManager*   _hrm;
6202 
6203 public:
6204   HeapRegionSetCount _old_count;
6205   HeapRegionSetCount _humongous_count;
6206   HeapRegionSetCount _free_count;




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentG1Refine.hpp"
  33 #include "gc/g1/concurrentG1RefineThread.hpp"
  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectorPolicy.hpp"
  38 #include "gc/g1/g1CollectorState.hpp"

  39 #include "gc/g1/g1EvacStats.inline.hpp"
  40 #include "gc/g1/g1GCPhaseTimes.hpp"

  41 #include "gc/g1/g1MarkSweep.hpp"
  42 #include "gc/g1/g1OopClosures.inline.hpp"
  43 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  44 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  45 #include "gc/g1/g1RemSet.inline.hpp"
  46 #include "gc/g1/g1RootClosures.hpp"
  47 #include "gc/g1/g1RootProcessor.hpp"
  48 #include "gc/g1/g1StringDedup.hpp"
  49 #include "gc/g1/g1YCTypes.hpp"
  50 #include "gc/g1/heapRegion.inline.hpp"
  51 #include "gc/g1/heapRegionRemSet.hpp"
  52 #include "gc/g1/heapRegionSet.inline.hpp"
  53 #include "gc/g1/suspendibleThreadSet.hpp"
  54 #include "gc/g1/vm_operations_g1.hpp"
  55 #include "gc/shared/gcHeapSummary.hpp"
  56 #include "gc/shared/gcId.hpp"
  57 #include "gc/shared/gcLocker.inline.hpp"
  58 #include "gc/shared/gcTimer.hpp"
  59 #include "gc/shared/gcTrace.hpp"
  60 #include "gc/shared/gcTraceTime.inline.hpp"
  61 #include "gc/shared/generationSpec.hpp"
  62 #include "gc/shared/isGCActiveMark.hpp"
  63 #include "gc/shared/referenceProcessor.hpp"
  64 #include "gc/shared/taskqueue.inline.hpp"
  65 #include "logging/log.hpp"
  66 #include "memory/allocation.hpp"
  67 #include "memory/iterator.hpp"
  68 #include "oops/oop.inline.hpp"
  69 #include "runtime/atomic.inline.hpp"
  70 #include "runtime/init.hpp"
  71 #include "runtime/orderAccess.inline.hpp"
  72 #include "runtime/vmThread.hpp"
  73 #include "utilities/globalDefinitions.hpp"
  74 #include "utilities/stack.inline.hpp"
  75 
  76 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  77 
  78 // INVARIANTS/NOTES
  79 //
  80 // All allocation activity covered by the G1CollectedHeap interface is
  81 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  82 // and allocate_new_tlab, which are the "entry" points to the
  83 // allocation code from the rest of the JVM.  (Note that this does not
  84 // apply to TLAB allocation, which is not part of this interface: it
  85 // is done by clients of this interface.)


 206   } while (hr != head);
 207   assert(hr != NULL, "invariant");
 208   hr->set_next_dirty_cards_region(NULL);
 209   return hr;
 210 }
 211 
 212 // Returns true if the reference points to an object that
 213 // can move in an incremental collection.
 214 bool G1CollectedHeap::is_scavengable(const void* p) {
 215   HeapRegion* hr = heap_region_containing(p);
 216   return !hr->is_pinned();
 217 }
 218 
 219 // Private methods.
 220 
 221 HeapRegion*
 222 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 223   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 224   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 225     if (!_secondary_free_list.is_empty()) {
 226       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 227                                       "secondary_free_list has %u entries",
 228                                       _secondary_free_list.length());

 229       // It looks as if there are free regions available on the
 230       // secondary_free_list. Let's move them to the free_list and try
 231       // again to allocate from it.
 232       append_secondary_free_list();
 233 
 234       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 235              "empty we should have moved at least one entry to the free_list");
 236       HeapRegion* res = _hrm.allocate_free_region(is_old);
 237       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 238                                       "allocated " HR_FORMAT " from secondary_free_list",
 239                                       HR_FORMAT_PARAMS(res));

 240       return res;
 241     }
 242 
 243     // Wait here until we get notified either when (a) there are no
 244     // more free regions coming or (b) some regions have been moved on
 245     // the secondary_free_list.
 246     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 247   }
 248 
 249   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 250                                   "could not allocate from secondary_free_list");

 251   return NULL;
 252 }
 253 
 254 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 255   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 256          "the only time we use this to allocate a humongous region is "
 257          "when we are allocating a single humongous region");
 258 
 259   HeapRegion* res;
 260   if (G1StressConcRegionFreeing) {
 261     if (!_secondary_free_list.is_empty()) {
 262       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 263                                       "forced to look at the secondary_free_list");

 264       res = new_region_try_secondary_free_list(is_old);
 265       if (res != NULL) {
 266         return res;
 267       }
 268     }
 269   }
 270 
 271   res = _hrm.allocate_free_region(is_old);
 272 
 273   if (res == NULL) {
 274     log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "

 275                                     "res == NULL, trying the secondary_free_list");

 276     res = new_region_try_secondary_free_list(is_old);
 277   }
 278   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 279     // Currently, only attempts to allocate GC alloc regions set
 280     // do_expand to true. So, we should only reach here during a
 281     // safepoint. If this assumption changes we might have to
 282     // reconsider the use of _expand_heap_after_alloc_failure.
 283     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 284 
 285     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",



 286                               word_size * HeapWordSize);
 287 
 288     if (expand(word_size * HeapWordSize)) {
 289       // Given that expand() succeeded in expanding the heap, and we
 290       // always expand the heap by an amount aligned to the heap
 291       // region size, the free list should in theory not be empty.
 292       // In either case allocate_free_region() will check for NULL.
 293       res = _hrm.allocate_free_region(is_old);
 294     } else {
 295       _expand_heap_after_alloc_failure = false;
 296     }
 297   }
 298   return res;
 299 }
 300 
 301 HeapWord*
 302 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 303                                                            uint num_regions,
 304                                                            size_t word_size,
 305                                                            AllocationContext_t context) {
 306   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 307   assert(is_humongous(word_size), "word_size should be humongous");


 455     // potentially waits for regions from the secondary free list.
 456     wait_while_free_regions_coming();
 457     append_secondary_free_list_if_not_empty_with_lock();
 458 
 459     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 460     // are lucky enough to find some.
 461     first = _hrm.find_contiguous_only_empty(obj_regions);
 462     if (first != G1_NO_HRM_INDEX) {
 463       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 464     }
 465   }
 466 
 467   if (first == G1_NO_HRM_INDEX) {
 468     // Policy: We could not find enough regions for the humongous object in the
 469     // free list. Look through the heap to find a mix of free and uncommitted regions.
 470     // If so, try expansion.
 471     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 472     if (first != G1_NO_HRM_INDEX) {
 473       // We found something. Make sure these regions are committed, i.e. expand
 474       // the heap. Alternatively we could do a defragmentation GC.
 475       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",



 476                                     word_size * HeapWordSize);
 477 
 478 
 479       _hrm.expand_at(first, obj_regions);
 480       g1_policy()->record_new_heap_size(num_regions());
 481 
 482 #ifdef ASSERT
 483       for (uint i = first; i < first + obj_regions; ++i) {
 484         HeapRegion* hr = region_at(i);
 485         assert(hr->is_free(), "sanity");
 486         assert(hr->is_empty(), "sanity");
 487         assert(is_on_master_free_list(hr), "sanity");
 488       }
 489 #endif
 490       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 491     } else {
 492       // Policy: Potentially trigger a defragmentation GC.
 493     }
 494   }
 495 
 496   HeapWord* result = NULL;
 497   if (first != G1_NO_HRM_INDEX) {
 498     result = humongous_obj_allocate_initialize_regions(first, obj_regions,


 776     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 777     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 778       start_address = start_region->end();
 779       if (start_address > last_address) {
 780         increase_used(word_size * HeapWordSize);
 781         start_region->set_top(last_address + 1);
 782         continue;
 783       }
 784       start_region->set_top(start_address);
 785       curr_range = MemRegion(start_address, last_address + 1);
 786       start_region = _hrm.addr_to_region(start_address);
 787     }
 788 
 789     // Perform the actual region allocation, exiting if it fails.
 790     // Then note how much new space we have allocated.
 791     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 792       return false;
 793     }
 794     increase_used(word_size * HeapWordSize);
 795     if (commits != 0) {
 796       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",



 797                                 HeapRegion::GrainWords * HeapWordSize * commits);
 798 
 799     }
 800 
 801     // Mark each G1 region touched by the range as archive, add it to the old set,
 802     // and set the allocation context and top.
 803     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 804     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 805     prev_last_region = last_region;
 806 
 807     while (curr_region != NULL) {
 808       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 809              "Region already in use (index %u)", curr_region->hrm_index());
 810       _hr_printer.alloc(curr_region, G1HRPrinter::Archive);
 811       curr_region->set_allocation_context(AllocationContext::system());
 812       curr_region->set_archive();
 813       _old_set.add(curr_region);
 814       if (curr_region != last_region) {
 815         curr_region->set_top(curr_region->end());
 816         curr_region = _hrm.next_region_in_heap(curr_region);
 817       } else {
 818         curr_region->set_top(last_address + 1);


 959       guarantee(curr_region->is_archive(),
 960                 "Expected archive region at index %u", curr_region->hrm_index());
 961       uint curr_index = curr_region->hrm_index();
 962       _old_set.remove(curr_region);
 963       curr_region->set_free();
 964       curr_region->set_top(curr_region->bottom());
 965       if (curr_region != last_region) {
 966         curr_region = _hrm.next_region_in_heap(curr_region);
 967       } else {
 968         curr_region = NULL;
 969       }
 970       _hrm.shrink_at(curr_index, 1);
 971       uncommitted_regions++;
 972     }
 973 
 974     // Notify mark-sweep that this is no longer an archive range.
 975     G1MarkSweep::set_range_archive(ranges[i], false);
 976   }
 977 
 978   if (uncommitted_regions != 0) {
 979     log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",



 980                               HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
 981   }
 982   decrease_used(size_used);
 983 }
 984 
 985 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 986                                                         uint* gc_count_before_ret,
 987                                                         uint* gclocker_retry_count_ret) {
 988   // The structure of this method has a lot of similarities to
 989   // attempt_allocation_slow(). The reason these two were not merged
 990   // into a single one is that such a method would require several "if
 991   // allocation is not humongous do this, otherwise do that"
 992   // conditional paths which would obscure its flow. In fact, an early
 993   // version of this code did use a unified method which was harder to
 994   // follow and, as a result, it had subtle bugs that were hard to
 995   // track down. So keeping these two methods separate allows each to
 996   // be more readable. It will be good to keep these two in sync as
 997   // much as possible.
 998 
 999   assert_heap_not_locked_and_not_at_safepoint();


1199       // We only generate output for non-empty regions.
1200     } else if (hr->is_starts_humongous()) {
1201       _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1202     } else if (hr->is_continues_humongous()) {
1203       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1204     } else if (hr->is_archive()) {
1205       _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1206     } else if (hr->is_old()) {
1207       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1208     } else {
1209       ShouldNotReachHere();
1210     }
1211     return false;
1212   }
1213 
1214   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1215     : _hr_printer(hr_printer) { }
1216 };
1217 
1218 void G1CollectedHeap::print_hrm_post_compaction() {
1219   if (_hr_printer.is_active()) {
1220     PostCompactionPrinterClosure cl(hr_printer());
1221     heap_region_iterate(&cl);
1222   }
1223 
1224 }
1225 
1226 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1227                                          bool clear_all_soft_refs) {
1228   assert_at_safepoint(true /* should_be_vm_thread */);
1229 
1230   if (GC_locker::check_active_before_gc()) {
1231     return false;
1232   }
1233 
1234   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1235   gc_timer->register_gc_start();
1236 
1237   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1238   GCIdMark gc_id_mark;
1239   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1240 
1241   SvcGCMarker sgcm(SvcGCMarker::FULL);
1242   ResourceMark rm;
1243 

1244   print_heap_before_gc();
1245   trace_heap_before_gc(gc_tracer);
1246 
1247   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1248 
1249   verify_region_sets_optional();
1250 
1251   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1252                            collector_policy()->should_clear_all_soft_refs();
1253 
1254   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1255 
1256   {
1257     IsGCActiveMark x;
1258 
1259     // Timing
1260     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1261     GCTraceCPUTime tcpu;
1262 
1263     {
1264       GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1265       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1266       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1267 
1268       g1_policy()->record_full_collection_start();
1269 
1270       // Note: When we have a more flexible GC logging framework that
1271       // allows us to add optional attributes to a GC log record we
1272       // could consider timing and reporting how long we wait in the
1273       // following two methods.
1274       wait_while_free_regions_coming();
1275       // If we start the compaction before the CM threads finish
1276       // scanning the root regions we might trip them over as we'll
1277       // be moving objects / updating references. So let's wait until
1278       // they are done. By telling them to abort, they should complete
1279       // early.
1280       _cm->root_regions()->abort();
1281       _cm->root_regions()->wait_until_scan_finished();
1282       append_secondary_free_list_if_not_empty_with_lock();
1283 
1284       gc_prologue(true);


1295 #if defined(COMPILER2) || INCLUDE_JVMCI
1296       DerivedPointerTable::clear();
1297 #endif
1298 
1299       // Disable discovery and empty the discovered lists
1300       // for the CM ref processor.
1301       ref_processor_cm()->disable_discovery();
1302       ref_processor_cm()->abandon_partial_discovery();
1303       ref_processor_cm()->verify_no_references_recorded();
1304 
1305       // Abandon current iterations of concurrent marking and concurrent
1306       // refinement, if any are in progress. We have to do this before
1307       // wait_until_scan_finished() below.
1308       concurrent_mark()->abort();
1309 
1310       // Make sure we'll choose a new allocation region afterwards.
1311       _allocator->release_mutator_alloc_region();
1312       _allocator->abandon_gc_alloc_regions();
1313       g1_rem_set()->cleanupHRRS();
1314 





1315       // We may have added regions to the current incremental collection
1316       // set between the last GC or pause and now. We need to clear the
1317       // incremental collection set and then start rebuilding it afresh
1318       // after this full GC.
1319       abandon_collection_set(g1_policy()->inc_cset_head());
1320       g1_policy()->clear_incremental_cset();
1321       g1_policy()->stop_incremental_cset_building();
1322 
1323       tear_down_region_sets(false /* free_list_only */);
1324       collector_state()->set_gcs_are_young(true);
1325 
1326       // See the comments in g1CollectedHeap.hpp and
1327       // G1CollectedHeap::ref_processing_init() about
1328       // how reference processing currently works in G1.
1329 
1330       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1331       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1332 
1333       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1334       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);


1361       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1362       ClassLoaderDataGraph::purge();
1363       MetaspaceAux::verify_metrics();
1364 
1365       // Note: since we've just done a full GC, concurrent
1366       // marking is no longer active. Therefore we need not
1367       // re-enable reference discovery for the CM ref processor.
1368       // That will be done at the start of the next marking cycle.
1369       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1370       ref_processor_cm()->verify_no_references_recorded();
1371 
1372       reset_gc_time_stamp();
1373       // Since everything potentially moved, we will clear all remembered
1374       // sets, and clear all cards.  Later we will rebuild remembered
1375       // sets. We will also reset the GC time stamps of the regions.
1376       clear_rsets_post_compaction();
1377       check_gc_time_stamps();
1378 
1379       resize_if_necessary_after_full_collection();
1380 

1381       // We should do this after we potentially resize the heap so
1382       // that all the COMMIT / UNCOMMIT events are generated before
1383       // the compaction events.

1384       print_hrm_post_compaction();


1385 
1386       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1387       if (hot_card_cache->use_cache()) {
1388         hot_card_cache->reset_card_counts();
1389         hot_card_cache->reset_hot_cache();
1390       }
1391 
1392       // Rebuild remembered sets of all regions.
1393       uint n_workers =
1394         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1395                                                 workers()->active_workers(),
1396                                                 Threads::number_of_non_daemon_threads());
1397       workers()->set_active_workers(n_workers);
1398 
1399       ParRebuildRSTask rebuild_rs_task(this);
1400       workers()->run_task(&rebuild_rs_task);
1401 
1402       // Rebuild the strong code root lists for each region
1403       rebuild_strong_code_roots();
1404 


1433       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1434       // objects marked during a full GC against the previous bitmap.
1435       // But we need to clear it before calling check_bitmaps below since
1436       // the full GC has compacted objects and updated TAMS but not updated
1437       // the prev bitmap.
1438       if (G1VerifyBitmaps) {
1439         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1440       }
1441       check_bitmaps("Full GC End");
1442 
1443       // Start a new incremental collection set for the next pause
1444       assert(g1_policy()->collection_set() == NULL, "must be");
1445       g1_policy()->start_incremental_cset_building();
1446 
1447       clear_cset_fast_test();
1448 
1449       _allocator->init_mutator_alloc_region();
1450 
1451       g1_policy()->record_full_collection_end();
1452 




1453       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1454       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1455       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1456       // before any GC notifications are raised.
1457       g1mm()->update_sizes();
1458 
1459       gc_epilogue(true);
1460     }
1461 
1462     g1_policy()->print_detailed_heap_transition();


1463 
1464     print_heap_after_gc();
1465     trace_heap_after_gc(gc_tracer);
1466 
1467     post_full_gc_dump(gc_timer);
1468 
1469     gc_timer->register_gc_end();
1470     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1471   }
1472 
1473   return true;
1474 }
1475 
1476 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1477   // Currently, there is no facility in the do_full_collection(bool) API to notify
1478   // the caller that the collection did not succeed (e.g., because it was locked
1479   // out by the GC locker). So, right now, we'll ignore the return value.
1480   bool dummy = do_full_collection(true,                /* explicit_gc */
1481                                   clear_all_soft_refs);
1482 }


1520 
1521   // This assert only makes sense here, before we adjust them
1522   // with respect to the min and max heap size.
1523   assert(minimum_desired_capacity <= maximum_desired_capacity,
1524          "minimum_desired_capacity = " SIZE_FORMAT ", "
1525          "maximum_desired_capacity = " SIZE_FORMAT,
1526          minimum_desired_capacity, maximum_desired_capacity);
1527 
1528   // Should not be greater than the heap max size. No need to adjust
1529   // it with respect to the heap min size as it's a lower bound (i.e.,
1530   // we'll try to make the capacity larger than it, not smaller).
1531   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1532   // Should not be less than the heap min size. No need to adjust it
1533   // with respect to the heap max size as it's an upper bound (i.e.,
1534   // we'll try to make the capacity smaller than it, not greater).
1535   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1536 
1537   if (capacity_after_gc < minimum_desired_capacity) {
1538     // Don't expand unless it's significant
1539     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1540 
1541     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1542                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1543                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1544 




1545     expand(expand_bytes);
1546 
1547     // No expansion, now see if we want to shrink
1548   } else if (capacity_after_gc > maximum_desired_capacity) {
1549     // Capacity too large, compute shrinking size
1550     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1551 
1552     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1553                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1554                               capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1555 




1556     shrink(shrink_bytes);
1557   }
1558 }
1559 
1560 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1561                                                             AllocationContext_t context,
1562                                                             bool do_gc,
1563                                                             bool clear_all_soft_refs,
1564                                                             bool expect_null_mutator_alloc_region,
1565                                                             bool* gc_succeeded) {
1566   *gc_succeeded = true;
1567   // Let's attempt the allocation first.
1568   HeapWord* result =
1569     attempt_allocation_at_safepoint(word_size,
1570                                     context,
1571                                     expect_null_mutator_alloc_region);
1572   if (result != NULL) {
1573     assert(*gc_succeeded, "sanity");
1574     return result;
1575   }


1641 
1642   // What else?  We might try synchronous finalization later.  If the total
1643   // space available is large enough for the allocation, then a more
1644   // complete compaction phase than we've tried so far might be
1645   // appropriate.
1646   assert(*succeeded, "sanity");
1647   return NULL;
1648 }
1649 
1650 // Attempting to expand the heap sufficiently
1651 // to support an allocation of the given "word_size".  If
1652 // successful, perform the allocation and return the address of the
1653 // allocated block, or else "NULL".
1654 
1655 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1656   assert_at_safepoint(true /* should_be_vm_thread */);
1657 
1658   verify_region_sets_optional();
1659 
1660   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1661   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",



1662                             word_size * HeapWordSize);
1663 
1664 
1665   if (expand(expand_bytes)) {
1666     _hrm.verify_optional();
1667     verify_region_sets_optional();
1668     return attempt_allocation_at_safepoint(word_size,
1669                                            context,
1670                                            false /* expect_null_mutator_alloc_region */);
1671   }
1672   return NULL;
1673 }
1674 
1675 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1676   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1677   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1678                                        HeapRegion::GrainBytes);
1679 
1680   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",


1681                             expand_bytes, aligned_expand_bytes);
1682 
1683   if (is_maximal_no_gc()) {
1684     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");


1685     return false;
1686   }
1687 
1688   double expand_heap_start_time_sec = os::elapsedTime();
1689   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1690   assert(regions_to_expand > 0, "Must expand by at least one region");
1691 
1692   uint expanded_by = _hrm.expand_by(regions_to_expand);
1693   if (expand_time_ms != NULL) {
1694     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1695   }
1696 
1697   if (expanded_by > 0) {
1698     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1699     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1700     g1_policy()->record_new_heap_size(num_regions());
1701   } else {
1702     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1703 

1704     // The expansion of the virtual storage space was unsuccessful.
1705     // Let's see if it was because we ran out of swap.
1706     if (G1ExitOnExpansionFailure &&
1707         _hrm.available() >= regions_to_expand) {
1708       // We had head room...
1709       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1710     }
1711   }
1712   return regions_to_expand > 0;
1713 }
1714 
1715 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1716   size_t aligned_shrink_bytes =
1717     ReservedSpace::page_align_size_down(shrink_bytes);
1718   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1719                                          HeapRegion::GrainBytes);
1720   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1721 
1722   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1723   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1724 
1725 
1726   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",



1727                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1728   if (num_regions_removed > 0) {
1729     g1_policy()->record_new_heap_size(num_regions());
1730   } else {
1731     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");


1732   }
1733 }
1734 
1735 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1736   verify_region_sets_optional();
1737 
1738   // We should only reach here at the end of a Full GC which means we
1739   // should not not be holding to any GC alloc regions. The method
1740   // below will make sure of that and do any remaining clean up.
1741   _allocator->abandon_gc_alloc_regions();
1742 
1743   // Instead of tearing down / rebuilding the free lists here, we
1744   // could instead use the remove_all_pending() method on free_list to
1745   // remove only the ones that we need to remove.
1746   tear_down_region_sets(true /* free_list_only */);
1747   shrink_helper(shrink_bytes);
1748   rebuild_region_sets(true /* free_list_only */);
1749 
1750   _hrm.verify_optional();
1751   verify_region_sets_optional();


1823   // Initialize the G1EvacuationFailureALot counters and flags.
1824   NOT_PRODUCT(reset_evacuation_should_fail();)
1825 
1826   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1827 }
1828 
1829 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1830                                                                  size_t size,
1831                                                                  size_t translation_factor) {
1832   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1833   // Allocate a new reserved space, preferring to use large pages.
1834   ReservedSpace rs(size, preferred_page_size);
1835   G1RegionToSpaceMapper* result  =
1836     G1RegionToSpaceMapper::create_mapper(rs,
1837                                          size,
1838                                          rs.alignment(),
1839                                          HeapRegion::GrainBytes,
1840                                          translation_factor,
1841                                          mtGC);
1842   if (TracePageSizes) {
1843     tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
1844                   description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size);
1845   }
1846   return result;
1847 }
1848 
1849 jint G1CollectedHeap::initialize() {
1850   CollectedHeap::pre_initialize();
1851   os::enable_vtime();
1852 


1853   // Necessary to satisfy locking discipline assertions.
1854 
1855   MutexLocker x(Heap_lock);
1856 




1857   // While there are no constraints in the GC code that HeapWordSize
1858   // be any particular value, there are multiple other areas in the
1859   // system which believe this to be true (e.g. oop->object_size in some
1860   // cases incorrectly returns the size in wordSize units rather than
1861   // HeapWordSize).
1862   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1863 
1864   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1865   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1866   size_t heap_alignment = collector_policy()->heap_alignment();
1867 
1868   // Ensure that the sizes are properly aligned.
1869   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1870   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1871   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1872 
1873   _refine_cte_cl = new RefineCardTableEntryClosure();
1874 
1875   jint ecode = JNI_OK;
1876   _cg1r = ConcurrentG1Refine::create(this, _refine_cte_cl, &ecode);


2029   G1AllocRegion::setup(this, dummy_region);
2030 
2031   _allocator->init_mutator_alloc_region();
2032 
2033   // Do create of the monitoring and management support so that
2034   // values in the heap have been properly initialized.
2035   _g1mm = new G1MonitoringSupport(this);
2036 
2037   G1StringDedup::initialize();
2038 
2039   _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC);
2040   for (uint i = 0; i < ParallelGCThreads; i++) {
2041     new (&_preserved_objs[i]) OopAndMarkOopStack();
2042   }
2043 
2044   return JNI_OK;
2045 }
2046 
2047 void G1CollectedHeap::stop() {
2048   // Stop all concurrent threads. We do this to make sure these threads
2049   // do not continue to execute and access resources (e.g. logging)
2050   // that are destroyed during shutdown.
2051   _cg1r->stop();
2052   _cmThread->stop();
2053   if (G1StringDedup::is_enabled()) {
2054     G1StringDedup::stop();
2055   }
2056 }
2057 
2058 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2059   return HeapRegion::max_region_size();
2060 }
2061 
2062 void G1CollectedHeap::post_initialize() {
2063   CollectedHeap::post_initialize();
2064   ref_processing_init();
2065 }
2066 
2067 void G1CollectedHeap::ref_processing_init() {
2068   // Reference processing in G1 currently works as follows:
2069   //


2146 }
2147 
2148 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2149   hr->reset_gc_time_stamp();
2150 }
2151 
2152 #ifndef PRODUCT
2153 
2154 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2155 private:
2156   unsigned _gc_time_stamp;
2157   bool _failures;
2158 
2159 public:
2160   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2161     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2162 
2163   virtual bool doHeapRegion(HeapRegion* hr) {
2164     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2165     if (_gc_time_stamp != region_gc_time_stamp) {
2166       log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),

2167                            region_gc_time_stamp, _gc_time_stamp);
2168       _failures = true;
2169     }
2170     return false;
2171   }
2172 
2173   bool failures() { return _failures; }
2174 };
2175 
2176 void G1CollectedHeap::check_gc_time_stamps() {
2177   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2178   heap_region_iterate(&cl);
2179   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2180 }
2181 #endif // PRODUCT
2182 
2183 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2184   _cg1r->hot_card_cache()->drain(cl, worker_i);
2185 }
2186 


2740 private:
2741   G1CollectedHeap* _g1h;
2742   VerifyOption     _vo;
2743   bool             _failures;
2744 public:
2745   // _vo == UsePrevMarking -> use "prev" marking information,
2746   // _vo == UseNextMarking -> use "next" marking information,
2747   // _vo == UseMarkWord    -> use mark word from object header.
2748   VerifyRootsClosure(VerifyOption vo) :
2749     _g1h(G1CollectedHeap::heap()),
2750     _vo(vo),
2751     _failures(false) { }
2752 
2753   bool failures() { return _failures; }
2754 
2755   template <class T> void do_oop_nv(T* p) {
2756     T heap_oop = oopDesc::load_heap_oop(p);
2757     if (!oopDesc::is_null(heap_oop)) {
2758       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2759       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2760         LogHandle(gc, verify) log;
2761         log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2762         if (_vo == VerifyOption_G1UseMarkWord) {
2763           log.info("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
2764         }
2765         ResourceMark rm;
2766         obj->print_on(log.info_stream());
2767         _failures = true;
2768       }
2769     }
2770   }
2771 
2772   void do_oop(oop* p)       { do_oop_nv(p); }
2773   void do_oop(narrowOop* p) { do_oop_nv(p); }
2774 };
2775 
2776 class G1VerifyCodeRootOopClosure: public OopClosure {
2777   G1CollectedHeap* _g1h;
2778   OopClosure* _root_cl;
2779   nmethod* _nm;
2780   VerifyOption _vo;
2781   bool _failures;
2782 
2783   template <class T> void do_oop_work(T* p) {
2784     // First verify that this root is live
2785     _root_cl->do_oop(p);
2786 


2791 
2792     // Don't check the code roots during marking verification in a full GC
2793     if (_vo == VerifyOption_G1UseMarkWord) {
2794       return;
2795     }
2796 
2797     // Now verify that the current nmethod (which contains p) is
2798     // in the code root list of the heap region containing the
2799     // object referenced by p.
2800 
2801     T heap_oop = oopDesc::load_heap_oop(p);
2802     if (!oopDesc::is_null(heap_oop)) {
2803       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2804 
2805       // Now fetch the region containing the object
2806       HeapRegion* hr = _g1h->heap_region_containing(obj);
2807       HeapRegionRemSet* hrrs = hr->rem_set();
2808       // Verify that the strong code root list for this region
2809       // contains the nmethod
2810       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2811         log_info(gc, verify)("Code root location " PTR_FORMAT " "
2812                              "from nmethod " PTR_FORMAT " not in strong "
2813                              "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2814                              p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2815         _failures = true;
2816       }
2817     }
2818   }
2819 
2820 public:
2821   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2822     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2823 
2824   void do_oop(oop* p) { do_oop_work(p); }
2825   void do_oop(narrowOop* p) { do_oop_work(p); }
2826 
2827   void set_nmethod(nmethod* nm) { _nm = nm; }
2828   bool failures() { return _failures; }
2829 };
2830 
2831 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {


2972   }
2973 
2974   bool doHeapRegion(HeapRegion* r) {
2975     // For archive regions, verify there are no heap pointers to
2976     // non-pinned regions. For all others, verify liveness info.
2977     if (r->is_archive()) {
2978       VerifyArchiveRegionClosure verify_oop_pointers(r);
2979       r->object_iterate(&verify_oop_pointers);
2980       return true;
2981     }
2982     if (!r->is_continues_humongous()) {
2983       bool failures = false;
2984       r->verify(_vo, &failures);
2985       if (failures) {
2986         _failures = true;
2987       } else if (!r->is_starts_humongous()) {
2988         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2989         r->object_iterate(&not_dead_yet_cl);
2990         if (_vo != VerifyOption_G1UseNextMarking) {
2991           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2992             log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
2993                                  p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());




2994             _failures = true;
2995           }
2996         } else {
2997           // When vo == UseNextMarking we cannot currently do a sanity
2998           // check on the live bytes as the calculation has not been
2999           // finalized yet.
3000         }
3001       }
3002     }
3003     return false; // stop the region iteration if we hit a failure
3004   }
3005 };
3006 
3007 // This is the task used for parallel verification of the heap regions
3008 
3009 class G1ParVerifyTask: public AbstractGangTask {
3010 private:
3011   G1CollectedHeap*  _g1h;
3012   VerifyOption      _vo;
3013   bool              _failures;


3021       AbstractGangTask("Parallel verify task"),
3022       _g1h(g1h),
3023       _vo(vo),
3024       _failures(false),
3025       _hrclaimer(g1h->workers()->active_workers()) {}
3026 
3027   bool failures() {
3028     return _failures;
3029   }
3030 
3031   void work(uint worker_id) {
3032     HandleMark hm;
3033     VerifyRegionClosure blk(true, _vo);
3034     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3035     if (blk.failures()) {
3036       _failures = true;
3037     }
3038   }
3039 };
3040 
3041 void G1CollectedHeap::verify(VerifyOption vo) {
3042   if (!SafepointSynchronize::is_at_safepoint()) {
3043     log_info(gc, verify)("Skipping verification. Not at safepoint.");
3044   }
3045 
3046   assert(Thread::current()->is_VM_thread(),
3047          "Expected to be executed serially by the VM thread at this point");
3048 
3049   log_debug(gc, verify)("Roots");
3050   VerifyRootsClosure rootsCl(vo);
3051   VerifyKlassClosure klassCl(this, &rootsCl);
3052   CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3053 
3054   // We apply the relevant closures to all the oops in the
3055   // system dictionary, class loader data graph, the string table
3056   // and the nmethods in the code cache.
3057   G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3058   G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3059 
3060   {
3061     G1RootProcessor root_processor(this, 1);
3062     root_processor.process_all_roots(&rootsCl,
3063                                      &cldCl,
3064                                      &blobsCl);
3065   }
3066 
3067   bool failures = rootsCl.failures() || codeRootsCl.failures();
3068 
3069   if (vo != VerifyOption_G1UseMarkWord) {
3070     // If we're verifying during a full GC then the region sets
3071     // will have been torn down at the start of the GC. Therefore
3072     // verifying the region sets will fail. So we only verify
3073     // the region sets when not in a full GC.
3074     log_debug(gc, verify)("HeapRegionSets");
3075     verify_region_sets();
3076   }
3077 
3078   log_debug(gc, verify)("HeapRegions");
3079   if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3080 
3081     G1ParVerifyTask task(this, vo);
3082     workers()->run_task(&task);
3083     if (task.failures()) {
3084       failures = true;
3085     }
3086 
3087   } else {
3088     VerifyRegionClosure blk(false, vo);
3089     heap_region_iterate(&blk);
3090     if (blk.failures()) {
3091       failures = true;
3092     }
3093   }
3094 
3095   if (G1StringDedup::is_enabled()) {
3096     log_debug(gc, verify)("StrDedup");
3097     G1StringDedup::verify();
3098   }
3099 
3100   if (failures) {
3101     log_info(gc, verify)("Heap after failed verification:");
3102     // It helps to have the per-region information in the output to
3103     // help us track down what went wrong. This is why we call
3104     // print_extended_on() instead of print_on().
3105     LogHandle(gc, verify) log;
3106     ResourceMark rm;
3107     print_extended_on(log.info_stream());
3108   }
3109   guarantee(!failures, "there should not have been any failures");













3110 }
3111 
3112 double G1CollectedHeap::verify(bool guard, const char* msg) {
3113   double verify_time_ms = 0.0;
3114 
3115   if (guard && total_collections() >= VerifyGCStartAt) {
3116     double verify_start = os::elapsedTime();
3117     HandleMark hm;  // Discard invalid handles created during verification
3118     prepare_for_verify();
3119     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3120     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3121   }
3122 
3123   return verify_time_ms;
3124 }
3125 
3126 void G1CollectedHeap::verify_before_gc() {
3127   double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
3128   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3129 }
3130 
3131 void G1CollectedHeap::verify_after_gc() {
3132   double verify_time_ms = verify(VerifyAfterGC, "After GC");
3133   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3134 }
3135 
3136 class PrintRegionClosure: public HeapRegionClosure {
3137   outputStream* _st;
3138 public:
3139   PrintRegionClosure(outputStream* st) : _st(st) {}
3140   bool doHeapRegion(HeapRegion* r) {
3141     r->print_on(_st);
3142     return false;
3143   }
3144 };
3145 
3146 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3147                                        const HeapRegion* hr,
3148                                        const VerifyOption vo) const {
3149   switch (vo) {
3150   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3151   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3152   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();


3222     G1StringDedup::print_worker_threads_on(st);
3223   }
3224 }
3225 
3226 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3227   workers()->threads_do(tc);
3228   tc->do_thread(_cmThread);
3229   _cg1r->threads_do(tc);
3230   if (G1StringDedup::is_enabled()) {
3231     G1StringDedup::threads_do(tc);
3232   }
3233 }
3234 
3235 void G1CollectedHeap::print_tracing_info() const {
3236   // We'll overload this to mean "trace GC pause statistics."
3237   if (TraceYoungGenTime || TraceOldGenTime) {
3238     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3239     // to that.
3240     g1_policy()->print_tracing_info();
3241   }

3242   g1_rem_set()->print_summary_info();


3243   concurrent_mark()->print_summary_info();

3244   g1_policy()->print_yg_surv_rate_info();
3245 }
3246 
3247 #ifndef PRODUCT
3248 // Helpful for debugging RSet issues.
3249 
3250 class PrintRSetsClosure : public HeapRegionClosure {
3251 private:
3252   const char* _msg;
3253   size_t _occupied_sum;
3254 
3255 public:
3256   bool doHeapRegion(HeapRegion* r) {
3257     HeapRegionRemSet* hrrs = r->rem_set();
3258     size_t occupied = hrrs->occupied();
3259     _occupied_sum += occupied;
3260 
3261     tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));

3262     if (occupied == 0) {
3263       tty->print_cr("  RSet is empty");
3264     } else {
3265       hrrs->print();
3266     }
3267     tty->print_cr("----------");
3268     return false;
3269   }
3270 
3271   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3272     tty->cr();
3273     tty->print_cr("========================================");
3274     tty->print_cr("%s", msg);
3275     tty->cr();
3276   }
3277 
3278   ~PrintRSetsClosure() {
3279     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
3280     tty->print_cr("========================================");
3281     tty->cr();
3282   }
3283 };
3284 
3285 void G1CollectedHeap::print_cset_rsets() {
3286   PrintRSetsClosure cl("Printing CSet RSets");
3287   collection_set_iterate(&cl);
3288 }
3289 
3290 void G1CollectedHeap::print_all_rsets() {
3291   PrintRSetsClosure cl("Printing All RSets");;
3292   heap_region_iterate(&cl);
3293 }
3294 #endif // PRODUCT
3295 
3296 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
3297   YoungList* young_list = heap()->young_list();
3298 
3299   size_t eden_used_bytes = young_list->eden_used_bytes();
3300   size_t survivor_used_bytes = young_list->survivor_used_bytes();
3301 


3319 
3320   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
3321   gc_tracer->report_metaspace_summary(when, metaspace_summary);
3322 }
3323 
3324 
3325 G1CollectedHeap* G1CollectedHeap::heap() {
3326   CollectedHeap* heap = Universe::heap();
3327   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
3328   assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
3329   return (G1CollectedHeap*)heap;
3330 }
3331 
3332 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3333   // always_do_update_barrier = false;
3334   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3335   // Fill TLAB's and such
3336   accumulate_statistics_all_tlabs();
3337   ensure_parsability(true);
3338 
3339   g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());



3340 }
3341 
3342 void G1CollectedHeap::gc_epilogue(bool full) {



3343   // we are at the end of the GC. Total collections has already been increased.
3344   g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);


3345 
3346   // FIXME: what is this about?
3347   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3348   // is set.
3349 #if defined(COMPILER2) || INCLUDE_JVMCI
3350   assert(DerivedPointerTable::is_empty(), "derived pointer present");
3351 #endif
3352   // always_do_update_barrier = true;
3353 
3354   resize_all_tlabs();
3355   allocation_context_stats().update(full);
3356 
3357   // We have just completed a GC. Update the soft reference
3358   // policy with the new heap occupancy
3359   Universe::update_heap_info_at_gc();
3360 }
3361 
3362 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3363                                                uint gc_count_before,
3364                                                bool* succeeded,


3570     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3571 
3572     // Here's a good place to add any other checks we'd like to
3573     // perform on CSet regions.
3574     return false;
3575   }
3576 };
3577 #endif // ASSERT
3578 
3579 uint G1CollectedHeap::num_task_queues() const {
3580   return _task_queues->size();
3581 }
3582 
3583 #if TASKQUEUE_STATS
3584 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3585   st->print_raw_cr("GC Task Stats");
3586   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3587   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3588 }
3589 
3590 void G1CollectedHeap::print_taskqueue_stats() const {
3591   if (!develop_log_is_enabled(Trace, gc, task, stats)) {
3592     return;
3593   }
3594   LogHandle(gc, task, stats) log;
3595   ResourceMark rm;
3596   outputStream* st = log.trace_stream();
3597 
3598   print_taskqueue_stats_hdr(st);
3599 
3600   TaskQueueStats totals;
3601   const uint n = num_task_queues();
3602   for (uint i = 0; i < n; ++i) {
3603     st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
3604     totals += task_queue(i)->stats;
3605   }
3606   st->print_raw("tot "); totals.print(st); st->cr();
3607 
3608   DEBUG_ONLY(totals.verify());
3609 }
3610 
3611 void G1CollectedHeap::reset_taskqueue_stats() {
3612   const uint n = num_task_queues();
3613   for (uint i = 0; i < n; ++i) {
3614     task_queue(i)->stats.reset();
3615   }
3616 }
3617 #endif // TASKQUEUE_STATS
3618 
3619 void G1CollectedHeap::log_gc_footer(double pause_time_counter) {



















3620   if (evacuation_failed()) {
3621     log_info(gc)("To-space exhausted");
3622   }
3623 
3624   double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter);
3625   g1_policy()->print_phases(pause_time_sec);
3626 
3627   g1_policy()->print_detailed_heap_transition();








3628 }
3629 
3630 
3631 void G1CollectedHeap::wait_for_root_region_scanning() {
3632   double scan_wait_start = os::elapsedTime();
3633   // We have to wait until the CM threads finish scanning the
3634   // root regions as it's the only way to ensure that all the
3635   // objects on them have been correctly scanned before we start
3636   // moving them during the GC.
3637   bool waited = _cm->root_regions()->wait_until_scan_finished();
3638   double wait_time_ms = 0.0;
3639   if (waited) {
3640     double scan_wait_end = os::elapsedTime();
3641     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3642   }
3643   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3644 }
3645 
3646 bool
3647 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3648   assert_at_safepoint(true /* should_be_vm_thread */);
3649   guarantee(!is_gc_active(), "collection is not reentrant");
3650 
3651   if (GC_locker::check_active_before_gc()) {
3652     return false;
3653   }
3654 
3655   _gc_timer_stw->register_gc_start();
3656 
3657   GCIdMark gc_id_mark;
3658   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3659 
3660   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3661   ResourceMark rm;
3662 
3663   wait_for_root_region_scanning();
3664 

3665   print_heap_before_gc();
3666   trace_heap_before_gc(_gc_tracer_stw);
3667 
3668   verify_region_sets_optional();
3669   verify_dirty_young_regions();
3670 
3671   // This call will decide whether this pause is an initial-mark
3672   // pause. If it is, during_initial_mark_pause() will return true
3673   // for the duration of this pause.
3674   g1_policy()->decide_on_conc_mark_initiation();
3675 
3676   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3677   assert(!collector_state()->during_initial_mark_pause() ||
3678           collector_state()->gcs_are_young(), "sanity");
3679 
3680   // We also do not allow mixed GCs during marking.
3681   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3682 
3683   // Record whether this pause is an initial mark. When the current
3684   // thread has completed its logging output and it's safe to signal
3685   // the CM thread, the flag's value in the policy has been reset.
3686   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3687 
3688   // Inner scope for scope based logging, timers, and stats collection
3689   {
3690     EvacuationInfo evacuation_info;
3691 
3692     if (collector_state()->during_initial_mark_pause()) {
3693       // We are about to start a marking cycle, so we increment the
3694       // full collection counter.
3695       increment_old_marking_cycles_started();
3696       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3697     }
3698 
3699     _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3700 
3701     GCTraceCPUTime tcpu;
3702 
3703     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3704                                                                   workers()->active_workers(),
3705                                                                   Threads::number_of_non_daemon_threads());
3706     workers()->set_active_workers(active_workers);
3707     FormatBuffer<> gc_string("Pause ");
3708     if (collector_state()->during_initial_mark_pause()) {
3709       gc_string.append("Initial Mark");
3710     } else if (collector_state()->gcs_are_young()) {
3711       gc_string.append("Young");
3712     } else {
3713       gc_string.append("Mixed");
3714     }
3715     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3716 
3717     double pause_start_sec = os::elapsedTime();
3718     double pause_start_counter = os::elapsed_counter();
3719     g1_policy()->note_gc_start(active_workers);

3720 
3721     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3722     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3723 
3724     // If the secondary_free_list is not empty, append it to the
3725     // free_list. No need to wait for the cleanup operation to finish;
3726     // the region allocation code will check the secondary_free_list
3727     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3728     // set, skip this step so that the region allocation code has to
3729     // get entries from the secondary_free_list.
3730     if (!G1StressConcRegionFreeing) {
3731       append_secondary_free_list_if_not_empty_with_lock();
3732     }
3733 
3734     assert(check_young_list_well_formed(), "young list should be well formed");
3735 
3736     // Don't dynamically change the number of GC threads this early.  A value of
3737     // 0 is used to indicate serial work.  When parallel work is done,
3738     // it will be set.
3739 


3757       // reference processing currently works in G1.
3758 
3759       // Enable discovery in the STW reference processor
3760       if (g1_policy()->should_process_references()) {
3761         ref_processor_stw()->enable_discovery();
3762       } else {
3763         ref_processor_stw()->disable_discovery();
3764       }
3765 
3766       {
3767         // We want to temporarily turn off discovery by the
3768         // CM ref processor, if necessary, and turn it back on
3769         // on again later if we do. Using a scoped
3770         // NoRefDiscovery object will do this.
3771         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3772 
3773         // Forget the current alloc region (we might even choose it to be part
3774         // of the collection set!).
3775         _allocator->release_mutator_alloc_region();
3776 





3777         // This timing is only used by the ergonomics to handle our pause target.
3778         // It is unclear why this should not include the full pause. We will
3779         // investigate this in CR 7178365.
3780         //
3781         // Preserving the old comment here if that helps the investigation:
3782         //
3783         // The elapsed time induced by the start time below deliberately elides
3784         // the possible verification above.
3785         double sample_start_time_sec = os::elapsedTime();
3786 
3787         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3788 
3789         if (collector_state()->during_initial_mark_pause()) {
3790           concurrent_mark()->checkpointRootsInitialPre();
3791         }
3792 
3793         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3794         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3795 
3796         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());


3880 
3881         if (collector_state()->during_initial_mark_pause()) {
3882           // We have to do this before we notify the CM threads that
3883           // they can start working to make sure that all the
3884           // appropriate initialization is done on the CM object.
3885           concurrent_mark()->checkpointRootsInitialPost();
3886           collector_state()->set_mark_in_progress(true);
3887           // Note that we don't actually trigger the CM thread at
3888           // this point. We do that later when we're sure that
3889           // the current thread has completed its logging output.
3890         }
3891 
3892         allocate_dummy_regions();
3893 
3894         _allocator->init_mutator_alloc_region();
3895 
3896         {
3897           size_t expand_bytes = g1_policy()->expansion_amount();
3898           if (expand_bytes > 0) {
3899             size_t bytes_before = capacity();
3900             // No need for an ergo logging here,
3901             // expansion_amount() does this when it returns a value > 0.
3902             double expand_ms;
3903             if (!expand(expand_bytes, &expand_ms)) {
3904               // We failed to expand the heap. Cannot do anything about it.
3905             }
3906             g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3907           }
3908         }
3909 
3910         // We redo the verification but now wrt to the new CSet which
3911         // has just got initialized after the previous CSet was freed.
3912         _cm->verify_no_cset_oops();
3913         _cm->note_end_of_gc();
3914 
3915         // This timing is only used by the ergonomics to handle our pause target.
3916         // It is unclear why this should not include the full pause. We will
3917         // investigate this in CR 7178365.
3918         double sample_end_time_sec = os::elapsedTime();
3919         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3920         size_t total_cards_scanned = per_thread_states.total_cards_scanned();


3940         // stamp here we invalidate all the GC time stamps on all the
3941         // regions and saved_mark_word() will simply return top() for
3942         // all the regions. This is a nicer way of ensuring this rather
3943         // than iterating over the regions and fixing them. In fact, the
3944         // GC time stamp increment here also ensures that
3945         // saved_mark_word() will return top() between pauses, i.e.,
3946         // during concurrent refinement. So we don't need the
3947         // is_gc_active() check to decided which top to use when
3948         // scanning cards (see CR 7039627).
3949         increment_gc_time_stamp();
3950 
3951         verify_after_gc();
3952         check_bitmaps("GC End");
3953 
3954         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3955         ref_processor_stw()->verify_no_references_recorded();
3956 
3957         // CM reference discovery will be re-enabled if necessary.
3958       }
3959 






3960 #ifdef TRACESPINNING
3961       ParallelTaskTerminator::print_termination_counts();
3962 #endif
3963 
3964       gc_epilogue(false);
3965     }
3966 
3967     // Print the remainder of the GC log output.
3968     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3969 
3970     // It is not yet to safe to tell the concurrent mark to
3971     // start as we have some optional output below. We don't want the
3972     // output from the concurrent mark thread interfering with this
3973     // logging output either.
3974 
3975     _hrm.verify_optional();
3976     verify_region_sets_optional();
3977 
3978     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3979     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3980 
3981     print_heap_after_gc();
3982     trace_heap_after_gc(_gc_tracer_stw);
3983 
3984     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3985     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3986     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3987     // before any GC notifications are raised.
3988     g1mm()->update_sizes();
3989 
3990     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3991     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3992     _gc_timer_stw->register_gc_end();
3993     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3994   }
3995   // It should now be safe to tell the concurrent mark thread to start
3996   // without its logging output interfering with the logging output
3997   // that came from the pause.
3998 


4113 
4114       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
4115 
4116       double term_sec = 0.0;
4117       size_t evac_term_attempts = 0;
4118       {
4119         double start = os::elapsedTime();
4120         G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4121         evac.do_void();
4122 
4123         evac_term_attempts = evac.term_attempts();
4124         term_sec = evac.term_time();
4125         double elapsed_sec = os::elapsedTime() - start;
4126         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4127         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4128         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
4129       }
4130 
4131       assert(pss->queue_is_empty(), "should be empty");
4132 
4133       if (log_is_enabled(Debug, gc, task, stats)) {
4134         MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4135         size_t lab_waste;
4136         size_t lab_undo_waste;
4137         pss->waste(lab_waste, lab_undo_waste);
4138         _g1h->print_termination_stats(worker_id,

4139                                       (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
4140                                       strong_roots_sec * 1000.0,                  /* strong roots time */
4141                                       term_sec * 1000.0,                          /* evac term time */
4142                                       evac_term_attempts,                         /* evac term attempts */
4143                                       lab_waste,                                  /* alloc buffer waste */
4144                                       lab_undo_waste                              /* undo waste */
4145                                       );
4146       }
4147 
4148       // Close the inner scope so that the ResourceMark and HandleMark
4149       // destructors are executed here and are included as part of the
4150       // "GC Worker Time".
4151     }
4152     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4153   }
4154 };
4155 
4156 void G1CollectedHeap::print_termination_stats_hdr() {
4157   log_debug(gc, task, stats)("GC Termination Stats");
4158   log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
4159   log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
4160   log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4161 }
4162 
4163 void G1CollectedHeap::print_termination_stats(uint worker_id,

4164                                               double elapsed_ms,
4165                                               double strong_roots_ms,
4166                                               double term_ms,
4167                                               size_t term_attempts,
4168                                               size_t alloc_buffer_waste,
4169                                               size_t undo_waste) const {
4170   log_debug(gc, task, stats)
4171               ("%3d %9.2f %9.2f %6.2f "
4172                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4173                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4174                worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4175                term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4176                (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4177                alloc_buffer_waste * HeapWordSize / K,
4178                undo_waste * HeapWordSize / K);
4179 }
4180 
4181 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4182 private:
4183   BoolObjectClosure* _is_alive;
4184   int _initial_string_table_size;
4185   int _initial_symbol_table_size;
4186 
4187   bool  _process_strings;
4188   int _strings_processed;
4189   int _strings_removed;
4190 
4191   bool  _process_symbols;


4200     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4201 
4202     _initial_string_table_size = StringTable::the_table()->table_size();
4203     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4204     if (process_strings) {
4205       StringTable::clear_parallel_claimed_index();
4206     }
4207     if (process_symbols) {
4208       SymbolTable::clear_parallel_claimed_index();
4209     }
4210   }
4211 
4212   ~G1StringSymbolTableUnlinkTask() {
4213     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4214               "claim value %d after unlink less than initial string table size %d",
4215               StringTable::parallel_claimed_index(), _initial_string_table_size);
4216     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4217               "claim value %d after unlink less than initial symbol table size %d",
4218               SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
4219 
4220     log_debug(gc, stringdedup)("Cleaned string and symbol table, "

4221                                "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
4222                                "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
4223                                strings_processed(), strings_removed(),
4224                                symbols_processed(), symbols_removed());
4225   }

4226 
4227   void work(uint worker_id) {
4228     int strings_processed = 0;
4229     int strings_removed = 0;
4230     int symbols_processed = 0;
4231     int symbols_removed = 0;
4232     if (_process_strings) {
4233       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4234       Atomic::add(strings_processed, &_strings_processed);
4235       Atomic::add(strings_removed, &_strings_removed);
4236     }
4237     if (_process_symbols) {
4238       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4239       Atomic::add(symbols_processed, &_symbols_processed);
4240       Atomic::add(symbols_removed, &_symbols_removed);
4241     }
4242   }
4243 
4244   size_t strings_processed() const { return (size_t)_strings_processed; }
4245   size_t strings_removed()   const { return (size_t)_strings_removed; }


5044   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5045 }
5046 
5047 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
5048   // Should G1EvacuationFailureALot be in effect for this GC?
5049   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5050 
5051   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5052   double start_par_time_sec = os::elapsedTime();
5053   double end_par_time_sec;
5054 
5055   {
5056     const uint n_workers = workers()->active_workers();
5057     G1RootProcessor root_processor(this, n_workers);
5058     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
5059     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5060     if (collector_state()->during_initial_mark_pause()) {
5061       ClassLoaderDataGraph::clear_claimed_marks();
5062     }
5063 
5064     print_termination_stats_hdr();



5065 
5066     workers()->run_task(&g1_par_task);
5067     end_par_time_sec = os::elapsedTime();
5068 
5069     // Closing the inner scope will execute the destructor
5070     // for the G1RootProcessor object. We record the current
5071     // elapsed time before closing the scope so that time
5072     // taken for the destructor is NOT included in the
5073     // reported parallel time.
5074   }
5075 
5076   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5077 
5078   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5079   phase_times->record_par_time(par_time_ms);
5080 
5081   double code_root_fixup_time_ms =
5082         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5083   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5084 }


5283   }
5284 }
5285 
5286 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5287   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5288   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5289     verify_dirty_region(hr);
5290   }
5291 }
5292 
5293 void G1CollectedHeap::verify_dirty_young_regions() {
5294   verify_dirty_young_list(_young_list->first_region());
5295 }
5296 
5297 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5298                                                HeapWord* tams, HeapWord* end) {
5299   guarantee(tams <= end,
5300             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5301   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5302   if (result < end) {
5303     log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
5304     log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));



5305     return false;
5306   }
5307   return true;
5308 }
5309 
5310 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5311   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5312   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5313 
5314   HeapWord* bottom = hr->bottom();
5315   HeapWord* ptams  = hr->prev_top_at_mark_start();
5316   HeapWord* ntams  = hr->next_top_at_mark_start();
5317   HeapWord* end    = hr->end();
5318 
5319   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5320 
5321   bool res_n = true;
5322   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5323   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5324   // if we happen to be in that state.
5325   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5326     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5327   }
5328   if (!res_p || !res_n) {
5329     log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
5330     log_info(gc, verify)("#### Caller: %s", caller);

5331     return false;
5332   }
5333   return true;
5334 }
5335 
5336 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5337   if (!G1VerifyBitmaps) return;
5338 
5339   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5340 }
5341 
5342 class G1VerifyBitmapClosure : public HeapRegionClosure {
5343 private:
5344   const char* _caller;
5345   G1CollectedHeap* _g1h;
5346   bool _failures;
5347 
5348 public:
5349   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5350     _caller(caller), _g1h(g1h), _failures(false) { }


5362 
5363 void G1CollectedHeap::check_bitmaps(const char* caller) {
5364   if (!G1VerifyBitmaps) return;
5365 
5366   G1VerifyBitmapClosure cl(caller, this);
5367   heap_region_iterate(&cl);
5368   guarantee(!cl.failures(), "bitmap verification");
5369 }
5370 
5371 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5372  private:
5373   bool _failures;
5374  public:
5375   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5376 
5377   virtual bool doHeapRegion(HeapRegion* hr) {
5378     uint i = hr->hrm_index();
5379     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5380     if (hr->is_humongous()) {
5381       if (hr->in_collection_set()) {
5382         log_info(gc, verify)("## humongous region %u in CSet", i);
5383         _failures = true;
5384         return true;
5385       }
5386       if (cset_state.is_in_cset()) {
5387         log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
5388         _failures = true;
5389         return true;
5390       }
5391       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5392         log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
5393         _failures = true;
5394         return true;
5395       }
5396     } else {
5397       if (cset_state.is_humongous()) {
5398         log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
5399         _failures = true;
5400         return true;
5401       }
5402       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5403         log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5404                              hr->in_collection_set(), cset_state.value(), i);
5405         _failures = true;
5406         return true;
5407       }
5408       if (cset_state.is_in_cset()) {
5409         if (hr->is_young() != (cset_state.is_young())) {
5410           log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5411                                hr->is_young(), cset_state.value(), i);
5412           _failures = true;
5413           return true;
5414         }
5415         if (hr->is_old() != (cset_state.is_old())) {
5416           log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5417                                hr->is_old(), cset_state.value(), i);
5418           _failures = true;
5419           return true;
5420         }
5421       }
5422     }
5423     return false;
5424   }
5425 
5426   bool failures() const { return _failures; }
5427 };
5428 
5429 bool G1CollectedHeap::check_cset_fast_test() {
5430   G1CheckCSetFastTableClosure cl;
5431   _hrm.iterate(&cl);
5432   return !cl.failures();
5433 }
5434 #endif // PRODUCT
5435 
5436 void G1CollectedHeap::cleanUpCardTable() {


5614     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5615     // until the end of a concurrent mark.
5616     //
5617     // It is not required to check whether the object has been found dead by marking
5618     // or not, in fact it would prevent reclamation within a concurrent cycle, as
5619     // all objects allocated during that time are considered live.
5620     // SATB marking is even more conservative than the remembered set.
5621     // So if at this point in the collection there is no remembered set entry,
5622     // nobody has a reference to it.
5623     // At the start of collection we flush all refinement logs, and remembered sets
5624     // are completely up-to-date wrt to references to the humongous object.
5625     //
5626     // Other implementation considerations:
5627     // - never consider object arrays at this time because they would pose
5628     // considerable effort for cleaning up the the remembered sets. This is
5629     // required because stale remembered sets might reference locations that
5630     // are currently allocated into.
5631     uint region_idx = r->hrm_index();
5632     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5633         !r->rem_set()->is_empty()) {
5634       log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",


5635                                region_idx,
5636                                (size_t)obj->size() * HeapWordSize,
5637                                p2i(r->bottom()),
5638                                r->rem_set()->occupied(),
5639                                r->rem_set()->strong_code_roots_list_length(),
5640                                next_bitmap->isMarked(r->bottom()),
5641                                g1h->is_humongous_reclaim_candidate(region_idx),
5642                                obj->is_typeArray()
5643                               );


5644       return false;
5645     }
5646 
5647     guarantee(obj->is_typeArray(),
5648               "Only eagerly reclaiming type arrays is supported, but the object "
5649               PTR_FORMAT " is not.", p2i(r->bottom()));
5650 
5651     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",

5652                              region_idx,
5653                              (size_t)obj->size() * HeapWordSize,
5654                              p2i(r->bottom()),
5655                              r->rem_set()->occupied(),
5656                              r->rem_set()->strong_code_roots_list_length(),
5657                              next_bitmap->isMarked(r->bottom()),
5658                              g1h->is_humongous_reclaim_candidate(region_idx),
5659                              obj->is_typeArray()
5660                             );
5661 
5662     // Need to clear mark bit of the humongous object if already set.
5663     if (next_bitmap->isMarked(r->bottom())) {
5664       next_bitmap->clear(r->bottom());
5665     }
5666     do {
5667       HeapRegion* next = g1h->next_region_in_humongous(r);
5668       _freed_bytes += r->used();
5669       r->set_containing_set(NULL);
5670       _humongous_regions_removed.increment(1u, r->capacity());
5671       g1h->free_humongous_region(r, _free_region_list, false);
5672       r = next;
5673     } while (r != NULL);
5674 
5675     return false;
5676   }
5677 
5678   HeapRegionSetCount& humongous_free_count() {
5679     return _humongous_regions_removed;
5680   }
5681 
5682   size_t bytes_freed() const {
5683     return _freed_bytes;
5684   }
5685 
5686   size_t humongous_reclaimed() const {
5687     return _humongous_regions_removed.length();
5688   }
5689 };
5690 
5691 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5692   assert_at_safepoint(true);
5693 
5694   if (!G1EagerReclaimHumongousObjects ||
5695       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
5696     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5697     return;
5698   }
5699 
5700   double start_time = os::elapsedTime();
5701 
5702   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5703 
5704   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5705   heap_region_iterate(&cl);
5706 
5707   HeapRegionSetCount empty_set;
5708   remove_from_old_sets(empty_set, cl.humongous_free_count());
5709 
5710   G1HRPrinter* hrp = hr_printer();
5711   if (hrp->is_active()) {
5712     FreeRegionListIterator iter(&local_cleanup_list);
5713     while (iter.more_available()) {
5714       HeapRegion* hr = iter.get_next();
5715       hrp->cleanup(hr);


5728 // the current incremental collection set in preparation of a
5729 // full collection. After the full GC we will start to build up
5730 // the incremental collection set again.
5731 // This is only called when we're doing a full collection
5732 // and is immediately followed by the tearing down of the young list.
5733 
5734 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5735   HeapRegion* cur = cs_head;
5736 
5737   while (cur != NULL) {
5738     HeapRegion* next = cur->next_in_collection_set();
5739     assert(cur->in_collection_set(), "bad CS");
5740     cur->set_next_in_collection_set(NULL);
5741     clear_in_cset(cur);
5742     cur->set_young_index_in_cset(-1);
5743     cur = next;
5744   }
5745 }
5746 
5747 void G1CollectedHeap::set_free_regions_coming() {
5748   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");



5749 
5750   assert(!free_regions_coming(), "pre-condition");
5751   _free_regions_coming = true;
5752 }
5753 
5754 void G1CollectedHeap::reset_free_regions_coming() {
5755   assert(free_regions_coming(), "pre-condition");
5756 
5757   {
5758     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5759     _free_regions_coming = false;
5760     SecondaryFreeList_lock->notify_all();
5761   }
5762 
5763   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");



5764 }
5765 
5766 void G1CollectedHeap::wait_while_free_regions_coming() {
5767   // Most of the time we won't have to wait, so let's do a quick test
5768   // first before we take the lock.
5769   if (!free_regions_coming()) {
5770     return;
5771   }
5772 
5773   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");



5774 
5775   {
5776     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5777     while (free_regions_coming()) {
5778       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5779     }
5780   }
5781 
5782   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");



5783 }
5784 
5785 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5786   return _allocator->is_retained_old_region(hr);
5787 }
5788 
5789 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5790   _young_list->push_region(hr);
5791 }
5792 
5793 class NoYoungRegionsClosure: public HeapRegionClosure {
5794 private:
5795   bool _success;
5796 public:
5797   NoYoungRegionsClosure() : _success(true) { }
5798   bool doHeapRegion(HeapRegion* r) {
5799     if (r->is_young()) {
5800       log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5801                            p2i(r->bottom()), p2i(r->end()));
5802       _success = false;
5803     }
5804     return false;
5805   }
5806   bool success() { return _success; }
5807 };
5808 
5809 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
5810   bool ret = _young_list->check_list_empty(check_sample);
5811 
5812   if (check_heap) {
5813     NoYoungRegionsClosure closure;
5814     heap_region_iterate(&closure);
5815     ret = ret && closure.success();
5816   }
5817 
5818   return ret;
5819 }
5820 


6031 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6032                                              size_t allocated_bytes,
6033                                              InCSetState dest) {
6034   bool during_im = collector_state()->during_initial_mark_pause();
6035   alloc_region->note_end_of_copying(during_im);
6036   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6037   if (dest.is_young()) {
6038     young_list()->add_survivor_region(alloc_region);
6039   } else {
6040     _old_set.add(alloc_region);
6041   }
6042   _hr_printer.retire(alloc_region);
6043 }
6044 
6045 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6046   bool expanded = false;
6047   uint index = _hrm.find_highest_free(&expanded);
6048 
6049   if (index != G1_NO_HRM_INDEX) {
6050     if (expanded) {
6051       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",



6052                                 HeapRegion::GrainWords * HeapWordSize);
6053     }
6054     _hrm.allocate_free_regions_starting_at(index, 1);
6055     return region_at(index);
6056   }
6057   return NULL;
6058 }
6059 
6060 // Heap region set verification
6061 
6062 class VerifyRegionListsClosure : public HeapRegionClosure {
6063 private:
6064   HeapRegionSet*   _old_set;
6065   HeapRegionSet*   _humongous_set;
6066   HeapRegionManager*   _hrm;
6067 
6068 public:
6069   HeapRegionSetCount _old_count;
6070   HeapRegionSetCount _humongous_count;
6071   HeapRegionSetCount _free_count;


< prev index next >