< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentG1Refine.hpp"
  33 #include "gc/g1/concurrentG1RefineThread.hpp"
  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectorPolicy.hpp"
  38 #include "gc/g1/g1CollectorState.hpp"
  39 #include "gc/g1/g1EvacStats.inline.hpp"
  40 #include "gc/g1/g1GCPhaseTimes.hpp"

  41 #include "gc/g1/g1MarkSweep.hpp"
  42 #include "gc/g1/g1OopClosures.inline.hpp"
  43 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  44 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  45 #include "gc/g1/g1RemSet.inline.hpp"
  46 #include "gc/g1/g1RootClosures.hpp"
  47 #include "gc/g1/g1RootProcessor.hpp"
  48 #include "gc/g1/g1StringDedup.hpp"
  49 #include "gc/g1/g1YCTypes.hpp"
  50 #include "gc/g1/heapRegion.inline.hpp"
  51 #include "gc/g1/heapRegionRemSet.hpp"
  52 #include "gc/g1/heapRegionSet.inline.hpp"
  53 #include "gc/g1/suspendibleThreadSet.hpp"
  54 #include "gc/g1/vm_operations_g1.hpp"
  55 #include "gc/shared/gcHeapSummary.hpp"
  56 #include "gc/shared/gcId.hpp"
  57 #include "gc/shared/gcLocker.inline.hpp"
  58 #include "gc/shared/gcTimer.hpp"
  59 #include "gc/shared/gcTrace.hpp"
  60 #include "gc/shared/gcTraceTime.inline.hpp"


 382   // no thread sees the update to top before the zeroing of the
 383   // object header and the BOT initialization.
 384   OrderAccess::storestore();
 385 
 386   // Now, we will update the top fields of the "continues humongous"
 387   // regions except the last one.
 388   for (uint i = first; i < last; ++i) {
 389     hr = region_at(i);
 390     hr->set_top(hr->end());
 391   }
 392 
 393   hr = region_at(last);
 394   // If we cannot fit a filler object, we must set top to the end
 395   // of the humongous object, otherwise we cannot iterate the heap
 396   // and the BOT will not be complete.
 397   hr->set_top(hr->end() - words_not_fillable);
 398 
 399   assert(hr->bottom() < obj_top && obj_top <= hr->end(),
 400          "obj_top should be in last region");
 401 
 402   check_bitmaps("Humongous Region Allocation", first_hr);
 403 
 404   assert(words_not_fillable == 0 ||
 405          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 406          "Miscalculation in humongous allocation");
 407 
 408   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 409 
 410   for (uint i = first; i <= last; ++i) {
 411     hr = region_at(i);
 412     _humongous_set.add(hr);
 413     _hr_printer.alloc(hr);
 414   }
 415 
 416   return new_obj;
 417 }
 418 
 419 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 420   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
 421   return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 422 }
 423 
 424 // If could fit into free regions w/o expansion, try.
 425 // Otherwise, if can expand, do so.
 426 // Otherwise, if using ex regions might help, try with ex given back.
 427 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 428   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 429 
 430   verify_region_sets_optional();
 431 
 432   uint first = G1_NO_HRM_INDEX;
 433   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 434 
 435   if (obj_regions == 1) {
 436     // Only one region to allocate, try to use a fast path by directly allocating
 437     // from the free lists. Do not try to expand here, we will potentially do that
 438     // later.
 439     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 440     if (hr != NULL) {
 441       first = hr->hrm_index();
 442     }
 443   } else {
 444     // We can't allocate humongous regions spanning more than one region while
 445     // cleanupComplete() is running, since some of the regions we find to be
 446     // empty might not yet be added to the free list. It is not straightforward
 447     // to know in which list they are on so that we can remove them. We only
 448     // need to do this if we need to allocate more than one region to satisfy the
 449     // current humongous allocation request. If we are only allocating one region
 450     // we use the one-region region allocation code (see above), that already


 484       }
 485 #endif
 486       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 487     } else {
 488       // Policy: Potentially trigger a defragmentation GC.
 489     }
 490   }
 491 
 492   HeapWord* result = NULL;
 493   if (first != G1_NO_HRM_INDEX) {
 494     result = humongous_obj_allocate_initialize_regions(first, obj_regions,
 495                                                        word_size, context);
 496     assert(result != NULL, "it should always return a valid result");
 497 
 498     // A successful humongous object allocation changes the used space
 499     // information of the old generation so we need to recalculate the
 500     // sizes and update the jstat counters here.
 501     g1mm()->update_sizes();
 502   }
 503 
 504   verify_region_sets_optional();
 505 
 506   return result;
 507 }
 508 
 509 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 510   assert_heap_not_locked_and_not_at_safepoint();
 511   assert(!is_humongous(word_size), "we do not allow humongous TLABs");
 512 
 513   uint dummy_gc_count_before;
 514   uint dummy_gclocker_retry_count = 0;
 515   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 516 }
 517 
 518 HeapWord*
 519 G1CollectedHeap::mem_allocate(size_t word_size,
 520                               bool*  gc_overhead_limit_was_exceeded) {
 521   assert_heap_not_locked_and_not_at_safepoint();
 522 
 523   // Loop until the allocation is satisfied, or unsatisfied after GC.
 524   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {


1213 
1214   if (GCLocker::check_active_before_gc()) {
1215     return false;
1216   }
1217 
1218   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1219   gc_timer->register_gc_start();
1220 
1221   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1222   GCIdMark gc_id_mark;
1223   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1224 
1225   SvcGCMarker sgcm(SvcGCMarker::FULL);
1226   ResourceMark rm;
1227 
1228   print_heap_before_gc();
1229   trace_heap_before_gc(gc_tracer);
1230 
1231   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1232 
1233   verify_region_sets_optional();
1234 
1235   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1236                            collector_policy()->should_clear_all_soft_refs();
1237 
1238   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1239 
1240   {
1241     IsGCActiveMark x;
1242 
1243     // Timing
1244     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1245     GCTraceCPUTime tcpu;
1246 
1247     {
1248       GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1249       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1250       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1251 
1252       g1_policy()->record_full_collection_start();
1253 
1254       // Note: When we have a more flexible GC logging framework that
1255       // allows us to add optional attributes to a GC log record we
1256       // could consider timing and reporting how long we wait in the
1257       // following two methods.
1258       wait_while_free_regions_coming();
1259       // If we start the compaction before the CM threads finish
1260       // scanning the root regions we might trip them over as we'll
1261       // be moving objects / updating references. So let's wait until
1262       // they are done. By telling them to abort, they should complete
1263       // early.
1264       _cm->root_regions()->abort();
1265       _cm->root_regions()->wait_until_scan_finished();
1266       append_secondary_free_list_if_not_empty_with_lock();
1267 
1268       gc_prologue(true);
1269       increment_total_collections(true /* full gc */);
1270       increment_old_marking_cycles_started();
1271 
1272       assert(used() == recalculate_used(), "Should be equal");
1273 
1274       verify_before_gc();
1275 
1276       check_bitmaps("Full GC Start");
1277       pre_full_gc_dump(gc_timer);
1278 
1279 #if defined(COMPILER2) || INCLUDE_JVMCI
1280       DerivedPointerTable::clear();
1281 #endif
1282 
1283       // Disable discovery and empty the discovered lists
1284       // for the CM ref processor.
1285       ref_processor_cm()->disable_discovery();
1286       ref_processor_cm()->abandon_partial_discovery();
1287       ref_processor_cm()->verify_no_references_recorded();
1288 
1289       // Abandon current iterations of concurrent marking and concurrent
1290       // refinement, if any are in progress. We have to do this before
1291       // wait_until_scan_finished() below.
1292       concurrent_mark()->abort();
1293 
1294       // Make sure we'll choose a new allocation region afterwards.
1295       _allocator->release_mutator_alloc_region();
1296       _allocator->abandon_gc_alloc_regions();


1391       }
1392 
1393 #ifdef TRACESPINNING
1394       ParallelTaskTerminator::print_termination_counts();
1395 #endif
1396 
1397       // Discard all rset updates
1398       JavaThread::dirty_card_queue_set().abandon_logs();
1399       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1400 
1401       _young_list->reset_sampled_info();
1402       // At this point there should be no regions in the
1403       // entire heap tagged as young.
1404       assert(check_young_list_empty(true /* check_heap */),
1405              "young list should be empty at this point");
1406 
1407       // Update the number of full collections that have been completed.
1408       increment_old_marking_cycles_completed(false /* concurrent */);
1409 
1410       _hrm.verify_optional();
1411       verify_region_sets_optional();
1412 
1413       verify_after_gc();
1414 
1415       // Clear the previous marking bitmap, if needed for bitmap verification.
1416       // Note we cannot do this when we clear the next marking bitmap in
1417       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1418       // objects marked during a full GC against the previous bitmap.
1419       // But we need to clear it before calling check_bitmaps below since
1420       // the full GC has compacted objects and updated TAMS but not updated
1421       // the prev bitmap.
1422       if (G1VerifyBitmaps) {
1423         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1424       }
1425       check_bitmaps("Full GC End");
1426 
1427       // Start a new incremental collection set for the next pause
1428       assert(g1_policy()->collection_set() == NULL, "must be");
1429       g1_policy()->start_incremental_cset_building();
1430 
1431       clear_cset_fast_test();
1432 
1433       _allocator->init_mutator_alloc_region();
1434 
1435       g1_policy()->record_full_collection_end();
1436 
1437       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1438       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1439       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1440       // before any GC notifications are raised.
1441       g1mm()->update_sizes();
1442 
1443       gc_epilogue(true);
1444     }
1445 


1622 
1623   assert(!collector_policy()->should_clear_all_soft_refs(),
1624          "Flag should have been handled and cleared prior to this point");
1625 
1626   // What else?  We might try synchronous finalization later.  If the total
1627   // space available is large enough for the allocation, then a more
1628   // complete compaction phase than we've tried so far might be
1629   // appropriate.
1630   assert(*succeeded, "sanity");
1631   return NULL;
1632 }
1633 
1634 // Attempting to expand the heap sufficiently
1635 // to support an allocation of the given "word_size".  If
1636 // successful, perform the allocation and return the address of the
1637 // allocated block, or else "NULL".
1638 
1639 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1640   assert_at_safepoint(true /* should_be_vm_thread */);
1641 
1642   verify_region_sets_optional();
1643 
1644   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1645   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1646                             word_size * HeapWordSize);
1647 
1648 
1649   if (expand(expand_bytes)) {
1650     _hrm.verify_optional();
1651     verify_region_sets_optional();
1652     return attempt_allocation_at_safepoint(word_size,
1653                                            context,
1654                                            false /* expect_null_mutator_alloc_region */);
1655   }
1656   return NULL;
1657 }
1658 
1659 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1660   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1661   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1662                                        HeapRegion::GrainBytes);
1663 
1664   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1665                             expand_bytes, aligned_expand_bytes);
1666 
1667   if (is_maximal_no_gc()) {
1668     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1669     return false;
1670   }
1671 


1700   size_t aligned_shrink_bytes =
1701     ReservedSpace::page_align_size_down(shrink_bytes);
1702   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1703                                          HeapRegion::GrainBytes);
1704   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1705 
1706   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1707   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1708 
1709 
1710   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1711                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1712   if (num_regions_removed > 0) {
1713     g1_policy()->record_new_heap_size(num_regions());
1714   } else {
1715     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1716   }
1717 }
1718 
1719 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1720   verify_region_sets_optional();
1721 
1722   // We should only reach here at the end of a Full GC which means we
1723   // should not not be holding to any GC alloc regions. The method
1724   // below will make sure of that and do any remaining clean up.
1725   _allocator->abandon_gc_alloc_regions();
1726 
1727   // Instead of tearing down / rebuilding the free lists here, we
1728   // could instead use the remove_all_pending() method on free_list to
1729   // remove only the ones that we need to remove.
1730   tear_down_region_sets(true /* free_list_only */);
1731   shrink_helper(shrink_bytes);
1732   rebuild_region_sets(true /* free_list_only */);
1733 
1734   _hrm.verify_optional();
1735   verify_region_sets_optional();
1736 }
1737 
1738 // Public methods.
1739 
1740 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1741   CollectedHeap(),
1742   _g1_policy(policy_),
1743   _dirty_card_queue_set(false),
1744   _is_alive_closure_cm(this),
1745   _is_alive_closure_stw(this),
1746   _ref_processor_cm(NULL),
1747   _ref_processor_stw(NULL),
1748   _bot(NULL),
1749   _cg1r(NULL),
1750   _g1mm(NULL),
1751   _refine_cte_cl(NULL),
1752   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1753   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1754   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1755   _humongous_reclaim_candidates(),


1761   _summary_bytes_used(0),
1762   _survivor_evac_stats(YoungPLABSize, PLABWeight),
1763   _old_evac_stats(OldPLABSize, PLABWeight),
1764   _expand_heap_after_alloc_failure(true),
1765   _old_marking_cycles_started(0),
1766   _old_marking_cycles_completed(0),
1767   _heap_summary_sent(false),
1768   _in_cset_fast_test(),
1769   _dirty_cards_region_list(NULL),
1770   _worker_cset_start_region(NULL),
1771   _worker_cset_start_region_time_stamp(NULL),
1772   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1773   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1774   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1775   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1776 
1777   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1778                           /* are_GC_task_threads */true,
1779                           /* are_ConcurrentGC_threads */false);
1780   _workers->initialize_workers();

1781 
1782   _allocator = G1Allocator::create_allocator(this);
1783   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1784 
1785   // Override the default _filler_array_max_size so that no humongous filler
1786   // objects are created.
1787   _filler_array_max_size = _humongous_object_threshold_in_words;
1788 
1789   uint n_queues = ParallelGCThreads;
1790   _task_queues = new RefToScanQueueSet(n_queues);
1791 
1792   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1793   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1794   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1795 
1796   for (uint i = 0; i < n_queues; i++) {
1797     RefToScanQueue* q = new RefToScanQueue();
1798     q->initialize();
1799     _task_queues->register_queue(i, q);
1800     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();


2650 // must be equal to the humongous object limit.
2651 size_t G1CollectedHeap::max_tlab_size() const {
2652   return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
2653 }
2654 
2655 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2656   AllocationContext_t context = AllocationContext::current();
2657   return _allocator->unsafe_max_tlab_alloc(context);
2658 }
2659 
2660 size_t G1CollectedHeap::max_capacity() const {
2661   return _hrm.reserved().byte_size();
2662 }
2663 
2664 jlong G1CollectedHeap::millis_since_last_gc() {
2665   // assert(false, "NYI");
2666   return 0;
2667 }
2668 
2669 void G1CollectedHeap::prepare_for_verify() {
2670   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2671     ensure_parsability(false);
2672   }
2673   g1_rem_set()->prepare_for_verify();
2674 }
2675 
2676 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2677                                               VerifyOption vo) {
2678   switch (vo) {
2679   case VerifyOption_G1UsePrevMarking:
2680     return hr->obj_allocated_since_prev_marking(obj);
2681   case VerifyOption_G1UseNextMarking:
2682     return hr->obj_allocated_since_next_marking(obj);
2683   case VerifyOption_G1UseMarkWord:
2684     return false;
2685   default:
2686     ShouldNotReachHere();
2687   }
2688   return false; // keep some compilers happy
2689 }
2690 
2691 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
2692   switch (vo) {
2693   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
2694   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
2695   case VerifyOption_G1UseMarkWord:    return NULL;
2696   default:                            ShouldNotReachHere();
2697   }
2698   return NULL; // keep some compilers happy
2699 }
2700 
2701 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
2702   switch (vo) {
2703   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
2704   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
2705   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
2706   default:                            ShouldNotReachHere();
2707   }
2708   return false; // keep some compilers happy
2709 }
2710 
2711 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
2712   switch (vo) {
2713   case VerifyOption_G1UsePrevMarking: return "PTAMS";
2714   case VerifyOption_G1UseNextMarking: return "NTAMS";
2715   case VerifyOption_G1UseMarkWord:    return "NONE";
2716   default:                            ShouldNotReachHere();
2717   }
2718   return NULL; // keep some compilers happy
2719 }
2720 
2721 class VerifyRootsClosure: public OopClosure {
2722 private:
2723   G1CollectedHeap* _g1h;
2724   VerifyOption     _vo;
2725   bool             _failures;
2726 public:
2727   // _vo == UsePrevMarking -> use "prev" marking information,
2728   // _vo == UseNextMarking -> use "next" marking information,
2729   // _vo == UseMarkWord    -> use mark word from object header.
2730   VerifyRootsClosure(VerifyOption vo) :
2731     _g1h(G1CollectedHeap::heap()),
2732     _vo(vo),
2733     _failures(false) { }
2734 
2735   bool failures() { return _failures; }
2736 
2737   template <class T> void do_oop_nv(T* p) {
2738     T heap_oop = oopDesc::load_heap_oop(p);
2739     if (!oopDesc::is_null(heap_oop)) {
2740       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2741       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2742         LogHandle(gc, verify) log;
2743         log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
2744         if (_vo == VerifyOption_G1UseMarkWord) {
2745           log.info("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
2746         }
2747         ResourceMark rm;
2748         obj->print_on(log.info_stream());
2749         _failures = true;
2750       }
2751     }
2752   }
2753 
2754   void do_oop(oop* p)       { do_oop_nv(p); }
2755   void do_oop(narrowOop* p) { do_oop_nv(p); }
2756 };
2757 
2758 class G1VerifyCodeRootOopClosure: public OopClosure {
2759   G1CollectedHeap* _g1h;
2760   OopClosure* _root_cl;
2761   nmethod* _nm;
2762   VerifyOption _vo;
2763   bool _failures;
2764 
2765   template <class T> void do_oop_work(T* p) {
2766     // First verify that this root is live
2767     _root_cl->do_oop(p);
2768 
2769     if (!G1VerifyHeapRegionCodeRoots) {
2770       // We're not verifying the code roots attached to heap region.
2771       return;
2772     }
2773 
2774     // Don't check the code roots during marking verification in a full GC
2775     if (_vo == VerifyOption_G1UseMarkWord) {
2776       return;
2777     }
2778 
2779     // Now verify that the current nmethod (which contains p) is
2780     // in the code root list of the heap region containing the
2781     // object referenced by p.
2782 
2783     T heap_oop = oopDesc::load_heap_oop(p);
2784     if (!oopDesc::is_null(heap_oop)) {
2785       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2786 
2787       // Now fetch the region containing the object
2788       HeapRegion* hr = _g1h->heap_region_containing(obj);
2789       HeapRegionRemSet* hrrs = hr->rem_set();
2790       // Verify that the strong code root list for this region
2791       // contains the nmethod
2792       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2793         log_info(gc, verify)("Code root location " PTR_FORMAT " "
2794                              "from nmethod " PTR_FORMAT " not in strong "
2795                              "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
2796                              p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
2797         _failures = true;
2798       }
2799     }
2800   }
2801 
2802 public:
2803   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2804     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2805 
2806   void do_oop(oop* p) { do_oop_work(p); }
2807   void do_oop(narrowOop* p) { do_oop_work(p); }
2808 
2809   void set_nmethod(nmethod* nm) { _nm = nm; }
2810   bool failures() { return _failures; }
2811 };
2812 
2813 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2814   G1VerifyCodeRootOopClosure* _oop_cl;
2815 
2816 public:
2817   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
2818     _oop_cl(oop_cl) {}
2819 
2820   void do_code_blob(CodeBlob* cb) {
2821     nmethod* nm = cb->as_nmethod_or_null();
2822     if (nm != NULL) {
2823       _oop_cl->set_nmethod(nm);
2824       nm->oops_do(_oop_cl);
2825     }
2826   }
2827 };
2828 
2829 class YoungRefCounterClosure : public OopClosure {
2830   G1CollectedHeap* _g1h;
2831   int              _count;
2832  public:
2833   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
2834   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
2835   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2836 
2837   int count() { return _count; }
2838   void reset_count() { _count = 0; };
2839 };
2840 
2841 class VerifyKlassClosure: public KlassClosure {
2842   YoungRefCounterClosure _young_ref_counter_closure;
2843   OopClosure *_oop_closure;
2844  public:
2845   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
2846   void do_klass(Klass* k) {
2847     k->oops_do(_oop_closure);
2848 
2849     _young_ref_counter_closure.reset_count();
2850     k->oops_do(&_young_ref_counter_closure);
2851     if (_young_ref_counter_closure.count() > 0) {
2852       guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
2853     }
2854   }
2855 };
2856 
2857 class VerifyLivenessOopClosure: public OopClosure {
2858   G1CollectedHeap* _g1h;
2859   VerifyOption _vo;
2860 public:
2861   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
2862     _g1h(g1h), _vo(vo)
2863   { }
2864   void do_oop(narrowOop *p) { do_oop_work(p); }
2865   void do_oop(      oop *p) { do_oop_work(p); }
2866 
2867   template <class T> void do_oop_work(T *p) {
2868     oop obj = oopDesc::load_decode_heap_oop(p);
2869     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
2870               "Dead object referenced by a not dead object");
2871   }
2872 };
2873 
2874 class VerifyObjsInRegionClosure: public ObjectClosure {
2875 private:
2876   G1CollectedHeap* _g1h;
2877   size_t _live_bytes;
2878   HeapRegion *_hr;
2879   VerifyOption _vo;
2880 public:
2881   // _vo == UsePrevMarking -> use "prev" marking information,
2882   // _vo == UseNextMarking -> use "next" marking information,
2883   // _vo == UseMarkWord    -> use mark word from object header.
2884   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
2885     : _live_bytes(0), _hr(hr), _vo(vo) {
2886     _g1h = G1CollectedHeap::heap();
2887   }
2888   void do_object(oop o) {
2889     VerifyLivenessOopClosure isLive(_g1h, _vo);
2890     assert(o != NULL, "Huh?");
2891     if (!_g1h->is_obj_dead_cond(o, _vo)) {
2892       // If the object is alive according to the mark word,
2893       // then verify that the marking information agrees.
2894       // Note we can't verify the contra-positive of the
2895       // above: if the object is dead (according to the mark
2896       // word), it may not be marked, or may have been marked
2897       // but has since became dead, or may have been allocated
2898       // since the last marking.
2899       if (_vo == VerifyOption_G1UseMarkWord) {
2900         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
2901       }
2902 
2903       o->oop_iterate_no_header(&isLive);
2904       if (!_hr->obj_allocated_since_prev_marking(o)) {
2905         size_t obj_size = o->size();    // Make sure we don't overflow
2906         _live_bytes += (obj_size * HeapWordSize);
2907       }
2908     }
2909   }
2910   size_t live_bytes() { return _live_bytes; }
2911 };
2912 
2913 class VerifyArchiveOopClosure: public OopClosure {
2914 public:
2915   VerifyArchiveOopClosure(HeapRegion *hr) { }
2916   void do_oop(narrowOop *p) { do_oop_work(p); }
2917   void do_oop(      oop *p) { do_oop_work(p); }
2918 
2919   template <class T> void do_oop_work(T *p) {
2920     oop obj = oopDesc::load_decode_heap_oop(p);
2921     guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
2922               "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
2923               p2i(p), p2i(obj));
2924   }
2925 };
2926 
2927 class VerifyArchiveRegionClosure: public ObjectClosure {
2928 public:
2929   VerifyArchiveRegionClosure(HeapRegion *hr) { }
2930   // Verify that all object pointers are to archive regions.
2931   void do_object(oop o) {
2932     VerifyArchiveOopClosure checkOop(NULL);
2933     assert(o != NULL, "Should not be here for NULL oops");
2934     o->oop_iterate_no_header(&checkOop);
2935   }
2936 };
2937 
2938 class VerifyRegionClosure: public HeapRegionClosure {
2939 private:
2940   bool             _par;
2941   VerifyOption     _vo;
2942   bool             _failures;
2943 public:
2944   // _vo == UsePrevMarking -> use "prev" marking information,
2945   // _vo == UseNextMarking -> use "next" marking information,
2946   // _vo == UseMarkWord    -> use mark word from object header.
2947   VerifyRegionClosure(bool par, VerifyOption vo)
2948     : _par(par),
2949       _vo(vo),
2950       _failures(false) {}
2951 
2952   bool failures() {
2953     return _failures;
2954   }
2955 
2956   bool doHeapRegion(HeapRegion* r) {
2957     // For archive regions, verify there are no heap pointers to
2958     // non-pinned regions. For all others, verify liveness info.
2959     if (r->is_archive()) {
2960       VerifyArchiveRegionClosure verify_oop_pointers(r);
2961       r->object_iterate(&verify_oop_pointers);
2962       return true;
2963     }
2964     if (!r->is_continues_humongous()) {
2965       bool failures = false;
2966       r->verify(_vo, &failures);
2967       if (failures) {
2968         _failures = true;
2969       } else if (!r->is_starts_humongous()) {
2970         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2971         r->object_iterate(&not_dead_yet_cl);
2972         if (_vo != VerifyOption_G1UseNextMarking) {
2973           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2974             log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
2975                                  p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
2976             _failures = true;
2977           }
2978         } else {
2979           // When vo == UseNextMarking we cannot currently do a sanity
2980           // check on the live bytes as the calculation has not been
2981           // finalized yet.
2982         }
2983       }
2984     }
2985     return false; // stop the region iteration if we hit a failure
2986   }
2987 };
2988 
2989 // This is the task used for parallel verification of the heap regions
2990 
2991 class G1ParVerifyTask: public AbstractGangTask {
2992 private:
2993   G1CollectedHeap*  _g1h;
2994   VerifyOption      _vo;
2995   bool              _failures;
2996   HeapRegionClaimer _hrclaimer;
2997 
2998 public:
2999   // _vo == UsePrevMarking -> use "prev" marking information,
3000   // _vo == UseNextMarking -> use "next" marking information,
3001   // _vo == UseMarkWord    -> use mark word from object header.
3002   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3003       AbstractGangTask("Parallel verify task"),
3004       _g1h(g1h),
3005       _vo(vo),
3006       _failures(false),
3007       _hrclaimer(g1h->workers()->active_workers()) {}
3008 
3009   bool failures() {
3010     return _failures;
3011   }
3012 
3013   void work(uint worker_id) {
3014     HandleMark hm;
3015     VerifyRegionClosure blk(true, _vo);
3016     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3017     if (blk.failures()) {
3018       _failures = true;
3019     }
3020   }
3021 };
3022 
3023 void G1CollectedHeap::verify(VerifyOption vo) {
3024   if (!SafepointSynchronize::is_at_safepoint()) {
3025     log_info(gc, verify)("Skipping verification. Not at safepoint.");
3026   }
3027 
3028   assert(Thread::current()->is_VM_thread(),
3029          "Expected to be executed serially by the VM thread at this point");
3030 
3031   log_debug(gc, verify)("Roots");
3032   VerifyRootsClosure rootsCl(vo);
3033   VerifyKlassClosure klassCl(this, &rootsCl);
3034   CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3035 
3036   // We apply the relevant closures to all the oops in the
3037   // system dictionary, class loader data graph, the string table
3038   // and the nmethods in the code cache.
3039   G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3040   G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3041 
3042   {
3043     G1RootProcessor root_processor(this, 1);
3044     root_processor.process_all_roots(&rootsCl,
3045                                      &cldCl,
3046                                      &blobsCl);
3047   }
3048 
3049   bool failures = rootsCl.failures() || codeRootsCl.failures();
3050 
3051   if (vo != VerifyOption_G1UseMarkWord) {
3052     // If we're verifying during a full GC then the region sets
3053     // will have been torn down at the start of the GC. Therefore
3054     // verifying the region sets will fail. So we only verify
3055     // the region sets when not in a full GC.
3056     log_debug(gc, verify)("HeapRegionSets");
3057     verify_region_sets();
3058   }
3059 
3060   log_debug(gc, verify)("HeapRegions");
3061   if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3062 
3063     G1ParVerifyTask task(this, vo);
3064     workers()->run_task(&task);
3065     if (task.failures()) {
3066       failures = true;
3067     }
3068 
3069   } else {
3070     VerifyRegionClosure blk(false, vo);
3071     heap_region_iterate(&blk);
3072     if (blk.failures()) {
3073       failures = true;
3074     }
3075   }
3076 
3077   if (G1StringDedup::is_enabled()) {
3078     log_debug(gc, verify)("StrDedup");
3079     G1StringDedup::verify();
3080   }
3081 
3082   if (failures) {
3083     log_info(gc, verify)("Heap after failed verification:");
3084     // It helps to have the per-region information in the output to
3085     // help us track down what went wrong. This is why we call
3086     // print_extended_on() instead of print_on().
3087     LogHandle(gc, verify) log;
3088     ResourceMark rm;
3089     print_extended_on(log.info_stream());
3090   }
3091   guarantee(!failures, "there should not have been any failures");
3092 }
3093 
3094 double G1CollectedHeap::verify(bool guard, const char* msg) {
3095   double verify_time_ms = 0.0;
3096 
3097   if (guard && total_collections() >= VerifyGCStartAt) {
3098     double verify_start = os::elapsedTime();
3099     HandleMark hm;  // Discard invalid handles created during verification
3100     prepare_for_verify();
3101     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3102     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3103   }
3104 
3105   return verify_time_ms;
3106 }
3107 
3108 void G1CollectedHeap::verify_before_gc() {
3109   double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
3110   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3111 }
3112 
3113 void G1CollectedHeap::verify_after_gc() {
3114   double verify_time_ms = verify(VerifyAfterGC, "After GC");
3115   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3116 }
3117 
3118 class PrintRegionClosure: public HeapRegionClosure {
3119   outputStream* _st;
3120 public:
3121   PrintRegionClosure(outputStream* st) : _st(st) {}
3122   bool doHeapRegion(HeapRegion* r) {
3123     r->print_on(_st);
3124     return false;
3125   }
3126 };
3127 
3128 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3129                                        const HeapRegion* hr,
3130                                        const VerifyOption vo) const {
3131   switch (vo) {
3132   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3133   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3134   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
3135   default:                            ShouldNotReachHere();


3640   assert_at_safepoint(true /* should_be_vm_thread */);
3641   guarantee(!is_gc_active(), "collection is not reentrant");
3642 
3643   if (GCLocker::check_active_before_gc()) {
3644     return false;
3645   }
3646 
3647   _gc_timer_stw->register_gc_start();
3648 
3649   GCIdMark gc_id_mark;
3650   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3651 
3652   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3653   ResourceMark rm;
3654 
3655   wait_for_root_region_scanning();
3656 
3657   print_heap_before_gc();
3658   trace_heap_before_gc(_gc_tracer_stw);
3659 
3660   verify_region_sets_optional();
3661   verify_dirty_young_regions();
3662 
3663   // This call will decide whether this pause is an initial-mark
3664   // pause. If it is, during_initial_mark_pause() will return true
3665   // for the duration of this pause.
3666   g1_policy()->decide_on_conc_mark_initiation();
3667 
3668   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3669   assert(!collector_state()->during_initial_mark_pause() ||
3670           collector_state()->gcs_are_young(), "sanity");
3671 
3672   // We also do not allow mixed GCs during marking.
3673   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3674 
3675   // Record whether this pause is an initial mark. When the current
3676   // thread has completed its logging output and it's safe to signal
3677   // the CM thread, the flag's value in the policy has been reset.
3678   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3679 
3680   // Inner scope for scope based logging, timers, and stats collection
3681   {


3724 
3725     assert(check_young_list_well_formed(), "young list should be well formed");
3726 
3727     // Don't dynamically change the number of GC threads this early.  A value of
3728     // 0 is used to indicate serial work.  When parallel work is done,
3729     // it will be set.
3730 
3731     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3732       IsGCActiveMark x;
3733 
3734       gc_prologue(false);
3735       increment_total_collections(false /* full gc */);
3736       increment_gc_time_stamp();
3737 
3738       if (VerifyRememberedSets) {
3739         log_info(gc, verify)("[Verifying RemSets before GC]");
3740         VerifyRegionRemSetClosure v_cl;
3741         heap_region_iterate(&v_cl);
3742       }
3743 
3744       verify_before_gc();
3745 
3746       check_bitmaps("GC Start");
3747 
3748 #if defined(COMPILER2) || INCLUDE_JVMCI
3749       DerivedPointerTable::clear();
3750 #endif
3751 
3752       // Please see comment in g1CollectedHeap.hpp and
3753       // G1CollectedHeap::ref_processing_init() to see how
3754       // reference processing currently works in G1.
3755 
3756       // Enable discovery in the STW reference processor
3757       if (g1_policy()->should_process_references()) {
3758         ref_processor_stw()->enable_discovery();
3759       } else {
3760         ref_processor_stw()->disable_discovery();
3761       }
3762 
3763       {
3764         // We want to temporarily turn off discovery by the
3765         // CM ref processor, if necessary, and turn it back on
3766         // on again later if we do. Using a scoped


3784         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3785 
3786         if (collector_state()->during_initial_mark_pause()) {
3787           concurrent_mark()->checkpointRootsInitialPre();
3788         }
3789 
3790         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3791         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3792 
3793         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
3794 
3795         // Make sure the remembered sets are up to date. This needs to be
3796         // done before register_humongous_regions_with_cset(), because the
3797         // remembered sets are used there to choose eager reclaim candidates.
3798         // If the remembered sets are not up to date we might miss some
3799         // entries that need to be handled.
3800         g1_rem_set()->cleanupHRRS();
3801 
3802         register_humongous_regions_with_cset();
3803 
3804         assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3805 
3806         _cm->note_start_of_gc();
3807         // We call this after finalize_cset() to
3808         // ensure that the CSet has been finalized.
3809         _cm->verify_no_cset_oops();
3810 
3811         if (_hr_printer.is_active()) {
3812           HeapRegion* hr = g1_policy()->collection_set();
3813           while (hr != NULL) {
3814             _hr_printer.cset(hr);
3815             hr = hr->next_in_collection_set();
3816           }
3817         }
3818 
3819 #ifdef ASSERT
3820         VerifyCSetClosure cl;
3821         collection_set_iterate(&cl);
3822 #endif // ASSERT
3823 
3824         // Initialize the GC alloc regions.


3934         // during the last GC). But it shouldn't. Given that
3935         // saved_mark_word() is conditional on whether the GC time stamp
3936         // on the region is current or not, by incrementing the GC time
3937         // stamp here we invalidate all the GC time stamps on all the
3938         // regions and saved_mark_word() will simply return top() for
3939         // all the regions. This is a nicer way of ensuring this rather
3940         // than iterating over the regions and fixing them. In fact, the
3941         // GC time stamp increment here also ensures that
3942         // saved_mark_word() will return top() between pauses, i.e.,
3943         // during concurrent refinement. So we don't need the
3944         // is_gc_active() check to decided which top to use when
3945         // scanning cards (see CR 7039627).
3946         increment_gc_time_stamp();
3947 
3948         if (VerifyRememberedSets) {
3949           log_info(gc, verify)("[Verifying RemSets after GC]");
3950           VerifyRegionRemSetClosure v_cl;
3951           heap_region_iterate(&v_cl);
3952         }
3953 
3954         verify_after_gc();
3955         check_bitmaps("GC End");
3956 
3957         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3958         ref_processor_stw()->verify_no_references_recorded();
3959 
3960         // CM reference discovery will be re-enabled if necessary.
3961       }
3962 
3963 #ifdef TRACESPINNING
3964       ParallelTaskTerminator::print_termination_counts();
3965 #endif
3966 
3967       gc_epilogue(false);
3968     }
3969 
3970     // Print the remainder of the GC log output.
3971     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3972 
3973     // It is not yet to safe to tell the concurrent mark to
3974     // start as we have some optional output below. We don't want the
3975     // output from the concurrent mark thread interfering with this
3976     // logging output either.
3977 
3978     _hrm.verify_optional();
3979     verify_region_sets_optional();
3980 
3981     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3982     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3983 
3984     print_heap_after_gc();
3985     trace_heap_after_gc(_gc_tracer_stw);
3986 
3987     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3988     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3989     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3990     // before any GC notifications are raised.
3991     g1mm()->update_sizes();
3992 
3993     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3994     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3995     _gc_timer_stw->register_gc_end();
3996     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3997   }
3998   // It should now be safe to tell the concurrent mark thread to start
3999   // without its logging output interfering with the logging output


5228   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
5229                      G1CollectedHeap* g1h) :
5230     AbstractGangTask("G1 Par Cleanup CT Task"),
5231     _ct_bs(ct_bs), _g1h(g1h) { }
5232 
5233   void work(uint worker_id) {
5234     HeapRegion* r;
5235     while (r = _g1h->pop_dirty_cards_region()) {
5236       clear_cards(r);
5237     }
5238   }
5239 
5240   void clear_cards(HeapRegion* r) {
5241     // Cards of the survivors should have already been dirtied.
5242     if (!r->is_survivor()) {
5243       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5244     }
5245   }
5246 };
5247 
5248 #ifndef PRODUCT
5249 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5250   G1CollectedHeap* _g1h;
5251   G1SATBCardTableModRefBS* _ct_bs;
5252 public:
5253   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
5254     : _g1h(g1h), _ct_bs(ct_bs) { }
5255   virtual bool doHeapRegion(HeapRegion* r) {
5256     if (r->is_survivor()) {
5257       _g1h->verify_dirty_region(r);
5258     } else {
5259       _g1h->verify_not_dirty_region(r);
5260     }
5261     return false;
5262   }
5263 };
5264 
5265 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5266   // All of the region should be clean.
5267   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5268   MemRegion mr(hr->bottom(), hr->end());
5269   ct_bs->verify_not_dirty_region(mr);
5270 }
5271 
5272 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5273   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
5274   // dirty allocated blocks as they allocate them. The thread that
5275   // retires each region and replaces it with a new one will do a
5276   // maximal allocation to fill in [pre_dummy_top(),end()] but will
5277   // not dirty that area (one less thing to have to do while holding
5278   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5279   // is dirty.
5280   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5281   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5282   if (hr->is_young()) {
5283     ct_bs->verify_g1_young_region(mr);
5284   } else {
5285     ct_bs->verify_dirty_region(mr);
5286   }
5287 }
5288 
5289 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5290   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5291   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5292     verify_dirty_region(hr);
5293   }
5294 }
5295 
5296 void G1CollectedHeap::verify_dirty_young_regions() {
5297   verify_dirty_young_list(_young_list->first_region());
5298 }
5299 
5300 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5301                                                HeapWord* tams, HeapWord* end) {
5302   guarantee(tams <= end,
5303             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
5304   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5305   if (result < end) {
5306     log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
5307     log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
5308     return false;
5309   }
5310   return true;
5311 }
5312 
5313 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5314   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5315   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5316 
5317   HeapWord* bottom = hr->bottom();
5318   HeapWord* ptams  = hr->prev_top_at_mark_start();
5319   HeapWord* ntams  = hr->next_top_at_mark_start();
5320   HeapWord* end    = hr->end();
5321 
5322   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5323 
5324   bool res_n = true;
5325   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5326   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5327   // if we happen to be in that state.
5328   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5329     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5330   }
5331   if (!res_p || !res_n) {
5332     log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
5333     log_info(gc, verify)("#### Caller: %s", caller);
5334     return false;
5335   }
5336   return true;
5337 }
5338 
5339 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5340   if (!G1VerifyBitmaps) return;
5341 
5342   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5343 }
5344 
5345 class G1VerifyBitmapClosure : public HeapRegionClosure {
5346 private:
5347   const char* _caller;
5348   G1CollectedHeap* _g1h;
5349   bool _failures;
5350 
5351 public:
5352   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5353     _caller(caller), _g1h(g1h), _failures(false) { }
5354 
5355   bool failures() { return _failures; }
5356 
5357   virtual bool doHeapRegion(HeapRegion* hr) {
5358     bool result = _g1h->verify_bitmaps(_caller, hr);
5359     if (!result) {
5360       _failures = true;
5361     }
5362     return false;
5363   }
5364 };
5365 
5366 void G1CollectedHeap::check_bitmaps(const char* caller) {
5367   if (!G1VerifyBitmaps) return;
5368 
5369   G1VerifyBitmapClosure cl(caller, this);
5370   heap_region_iterate(&cl);
5371   guarantee(!cl.failures(), "bitmap verification");
5372 }
5373 
5374 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5375  private:
5376   bool _failures;
5377  public:
5378   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5379 
5380   virtual bool doHeapRegion(HeapRegion* hr) {
5381     uint i = hr->hrm_index();
5382     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5383     if (hr->is_humongous()) {
5384       if (hr->in_collection_set()) {
5385         log_info(gc, verify)("## humongous region %u in CSet", i);
5386         _failures = true;
5387         return true;
5388       }
5389       if (cset_state.is_in_cset()) {
5390         log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
5391         _failures = true;
5392         return true;
5393       }
5394       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5395         log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
5396         _failures = true;
5397         return true;
5398       }
5399     } else {
5400       if (cset_state.is_humongous()) {
5401         log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
5402         _failures = true;
5403         return true;
5404       }
5405       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5406         log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5407                              hr->in_collection_set(), cset_state.value(), i);
5408         _failures = true;
5409         return true;
5410       }
5411       if (cset_state.is_in_cset()) {
5412         if (hr->is_young() != (cset_state.is_young())) {
5413           log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5414                                hr->is_young(), cset_state.value(), i);
5415           _failures = true;
5416           return true;
5417         }
5418         if (hr->is_old() != (cset_state.is_old())) {
5419           log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
5420                                hr->is_old(), cset_state.value(), i);
5421           _failures = true;
5422           return true;
5423         }
5424       }
5425     }
5426     return false;
5427   }
5428 
5429   bool failures() const { return _failures; }
5430 };
5431 
5432 bool G1CollectedHeap::check_cset_fast_test() {
5433   G1CheckCSetFastTableClosure cl;
5434   _hrm.iterate(&cl);
5435   return !cl.failures();
5436 }
5437 #endif // PRODUCT
5438 
5439 class G1ParScrubRemSetTask: public AbstractGangTask {
5440 protected:
5441   G1RemSet* _g1rs;
5442   BitMap* _region_bm;
5443   BitMap* _card_bm;
5444   HeapRegionClaimer _hrclaimer;
5445 
5446 public:
5447   G1ParScrubRemSetTask(G1RemSet* g1_rs, BitMap* region_bm, BitMap* card_bm, uint num_workers) :
5448     AbstractGangTask("G1 ScrubRS"),
5449     _g1rs(g1_rs),
5450     _region_bm(region_bm),
5451     _card_bm(card_bm),
5452     _hrclaimer(num_workers) {
5453   }
5454 
5455   void work(uint worker_id) {
5456     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
5457   }
5458 };
5459 
5460 void G1CollectedHeap::scrub_rem_set(BitMap* region_bm, BitMap* card_bm) {
5461   uint num_workers = workers()->active_workers();
5462   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), region_bm, card_bm, num_workers);
5463   workers()->run_task(&g1_par_scrub_rs_task);
5464 }
5465 
5466 void G1CollectedHeap::cleanUpCardTable() {
5467   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5468   double start = os::elapsedTime();
5469 
5470   {
5471     // Iterate over the dirty cards region list.
5472     G1ParCleanupCTTask cleanup_task(ct_bs, this);
5473 
5474     workers()->run_task(&cleanup_task);
5475 #ifndef PRODUCT
5476     if (G1VerifyCTCleanup || VerifyAfterGC) {
5477       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5478       heap_region_iterate(&cleanup_verifier);
5479     }
5480 #endif
5481   }
5482 
5483   double elapsed = os::elapsedTime() - start;
5484   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
5485 }
5486 
5487 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
5488   size_t pre_used = 0;
5489   FreeRegionList local_free_list("Local List for CSet Freeing");
5490 
5491   double young_time_ms     = 0.0;
5492   double non_young_time_ms = 0.0;
5493 
5494   // Since the collection set is a superset of the the young list,
5495   // all we need to do to clear the young list is clear its
5496   // head and length, and unlink any young regions in the code below
5497   _young_list->clear();
5498 
5499   G1CollectorPolicy* policy = g1_policy();


5981 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5982   HeapRegion* hr = heap_region_containing(p);
5983   return hr->is_in(p);
5984 }
5985 
5986 // Methods for the mutator alloc region
5987 
5988 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5989                                                       bool force) {
5990   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5991   assert(!force || g1_policy()->can_expand_young_list(),
5992          "if force is true we should be able to expand the young list");
5993   bool young_list_full = g1_policy()->is_young_list_full();
5994   if (force || !young_list_full) {
5995     HeapRegion* new_alloc_region = new_region(word_size,
5996                                               false /* is_old */,
5997                                               false /* do_expand */);
5998     if (new_alloc_region != NULL) {
5999       set_region_short_lived_locked(new_alloc_region);
6000       _hr_printer.alloc(new_alloc_region, young_list_full);
6001       check_bitmaps("Mutator Region Allocation", new_alloc_region);
6002       return new_alloc_region;
6003     }
6004   }
6005   return NULL;
6006 }
6007 
6008 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6009                                                   size_t allocated_bytes) {
6010   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6011   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6012 
6013   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6014   increase_used(allocated_bytes);
6015   _hr_printer.retire(alloc_region);
6016   // We update the eden sizes here, when the region is retired,
6017   // instead of when it's allocated, since this is the point that its
6018   // used space has been recored in _summary_bytes_used.
6019   g1mm()->update_eden_size();
6020 }
6021 
6022 // Methods for the GC alloc regions
6023 
6024 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6025                                                  uint count,
6026                                                  InCSetState dest) {
6027   assert(FreeList_lock->owned_by_self(), "pre-condition");
6028 
6029   if (count < g1_policy()->max_regions(dest)) {
6030     const bool is_survivor = (dest.is_young());
6031     HeapRegion* new_alloc_region = new_region(word_size,
6032                                               !is_survivor,
6033                                               true /* do_expand */);
6034     if (new_alloc_region != NULL) {
6035       // We really only need to do this for old regions given that we
6036       // should never scan survivors. But it doesn't hurt to do it
6037       // for survivors too.
6038       new_alloc_region->record_timestamp();
6039       if (is_survivor) {
6040         new_alloc_region->set_survivor();
6041         check_bitmaps("Survivor Region Allocation", new_alloc_region);
6042       } else {
6043         new_alloc_region->set_old();
6044         check_bitmaps("Old Region Allocation", new_alloc_region);
6045       }
6046       _hr_printer.alloc(new_alloc_region);
6047       bool during_im = collector_state()->during_initial_mark_pause();
6048       new_alloc_region->note_start_of_copying(during_im);
6049       return new_alloc_region;
6050     }
6051   }
6052   return NULL;
6053 }
6054 
6055 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6056                                              size_t allocated_bytes,
6057                                              InCSetState dest) {
6058   bool during_im = collector_state()->during_initial_mark_pause();
6059   alloc_region->note_end_of_copying(during_im);
6060   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6061   if (dest.is_young()) {
6062     young_list()->add_survivor_region(alloc_region);
6063   } else {
6064     _old_set.add(alloc_region);
6065   }
6066   _hr_printer.retire(alloc_region);
6067 }
6068 
6069 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
6070   bool expanded = false;
6071   uint index = _hrm.find_highest_free(&expanded);
6072 
6073   if (index != G1_NO_HRM_INDEX) {
6074     if (expanded) {
6075       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
6076                                 HeapRegion::GrainWords * HeapWordSize);
6077     }
6078     _hrm.allocate_free_regions_starting_at(index, 1);
6079     return region_at(index);
6080   }
6081   return NULL;
6082 }
6083 
6084 // Heap region set verification
6085 
6086 class VerifyRegionListsClosure : public HeapRegionClosure {
6087 private:
6088   HeapRegionSet*   _old_set;
6089   HeapRegionSet*   _humongous_set;
6090   HeapRegionManager*   _hrm;
6091 
6092 public:
6093   uint _old_count;
6094   uint _humongous_count;
6095   uint _free_count;
6096 
6097   VerifyRegionListsClosure(HeapRegionSet* old_set,
6098                            HeapRegionSet* humongous_set,
6099                            HeapRegionManager* hrm) :
6100     _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6101     _old_count(), _humongous_count(), _free_count(){ }
6102 
6103   bool doHeapRegion(HeapRegion* hr) {
6104     if (hr->is_young()) {
6105       // TODO
6106     } else if (hr->is_humongous()) {
6107       assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
6108       _humongous_count++;
6109     } else if (hr->is_empty()) {
6110       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
6111       _free_count++;
6112     } else if (hr->is_old()) {
6113       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
6114       _old_count++;
6115     } else {
6116       // There are no other valid region types. Check for one invalid
6117       // one we can identify: pinned without old or humongous set.
6118       assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
6119       ShouldNotReachHere();
6120     }
6121     return false;
6122   }
6123 
6124   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6125     guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
6126     guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
6127     guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
6128   }
6129 };
6130 
6131 void G1CollectedHeap::verify_region_sets() {
6132   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6133 
6134   // First, check the explicit lists.
6135   _hrm.verify();
6136   {
6137     // Given that a concurrent operation might be adding regions to
6138     // the secondary free list we have to take the lock before
6139     // verifying it.
6140     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6141     _secondary_free_list.verify_list();
6142   }
6143 
6144   // If a concurrent region freeing operation is in progress it will
6145   // be difficult to correctly attributed any free regions we come
6146   // across to the correct free list given that they might belong to
6147   // one of several (free_list, secondary_free_list, any local lists,
6148   // etc.). So, if that's the case we will skip the rest of the
6149   // verification operation. Alternatively, waiting for the concurrent
6150   // operation to complete will have a non-trivial effect on the GC's
6151   // operation (no concurrent operation will last longer than the
6152   // interval between two calls to verification) and it might hide
6153   // any issues that we would like to catch during testing.
6154   if (free_regions_coming()) {
6155     return;
6156   }
6157 
6158   // Make sure we append the secondary_free_list on the free_list so
6159   // that all free regions we will come across can be safely
6160   // attributed to the free_list.
6161   append_secondary_free_list_if_not_empty_with_lock();
6162 
6163   // Finally, make sure that the region accounting in the lists is
6164   // consistent with what we see in the heap.
6165 
6166   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6167   heap_region_iterate(&cl);
6168   cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6169 }
6170 
6171 // Optimized nmethod scanning
6172 
6173 class RegisterNMethodOopClosure: public OopClosure {
6174   G1CollectedHeap* _g1h;
6175   nmethod* _nm;
6176 
6177   template <class T> void do_oop_work(T* p) {
6178     T heap_oop = oopDesc::load_heap_oop(p);
6179     if (!oopDesc::is_null(heap_oop)) {
6180       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6181       HeapRegion* hr = _g1h->heap_region_containing(obj);
6182       assert(!hr->is_continues_humongous(),
6183              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
6184              " starting at " HR_FORMAT,
6185              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
6186 
6187       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6188       hr->add_strong_code_root_locked(_nm);




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentG1Refine.hpp"
  33 #include "gc/g1/concurrentG1RefineThread.hpp"
  34 #include "gc/g1/concurrentMarkThread.inline.hpp"
  35 #include "gc/g1/g1Allocator.inline.hpp"
  36 #include "gc/g1/g1CollectedHeap.inline.hpp"
  37 #include "gc/g1/g1CollectorPolicy.hpp"
  38 #include "gc/g1/g1CollectorState.hpp"
  39 #include "gc/g1/g1EvacStats.inline.hpp"
  40 #include "gc/g1/g1GCPhaseTimes.hpp"
  41 #include "gc/g1/g1HeapVerifier.hpp"
  42 #include "gc/g1/g1MarkSweep.hpp"
  43 #include "gc/g1/g1OopClosures.inline.hpp"
  44 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  45 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  46 #include "gc/g1/g1RemSet.inline.hpp"
  47 #include "gc/g1/g1RootClosures.hpp"
  48 #include "gc/g1/g1RootProcessor.hpp"
  49 #include "gc/g1/g1StringDedup.hpp"
  50 #include "gc/g1/g1YCTypes.hpp"
  51 #include "gc/g1/heapRegion.inline.hpp"
  52 #include "gc/g1/heapRegionRemSet.hpp"
  53 #include "gc/g1/heapRegionSet.inline.hpp"
  54 #include "gc/g1/suspendibleThreadSet.hpp"
  55 #include "gc/g1/vm_operations_g1.hpp"
  56 #include "gc/shared/gcHeapSummary.hpp"
  57 #include "gc/shared/gcId.hpp"
  58 #include "gc/shared/gcLocker.inline.hpp"
  59 #include "gc/shared/gcTimer.hpp"
  60 #include "gc/shared/gcTrace.hpp"
  61 #include "gc/shared/gcTraceTime.inline.hpp"


 383   // no thread sees the update to top before the zeroing of the
 384   // object header and the BOT initialization.
 385   OrderAccess::storestore();
 386 
 387   // Now, we will update the top fields of the "continues humongous"
 388   // regions except the last one.
 389   for (uint i = first; i < last; ++i) {
 390     hr = region_at(i);
 391     hr->set_top(hr->end());
 392   }
 393 
 394   hr = region_at(last);
 395   // If we cannot fit a filler object, we must set top to the end
 396   // of the humongous object, otherwise we cannot iterate the heap
 397   // and the BOT will not be complete.
 398   hr->set_top(hr->end() - words_not_fillable);
 399 
 400   assert(hr->bottom() < obj_top && obj_top <= hr->end(),
 401          "obj_top should be in last region");
 402 
 403   _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
 404 
 405   assert(words_not_fillable == 0 ||
 406          first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
 407          "Miscalculation in humongous allocation");
 408 
 409   increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
 410 
 411   for (uint i = first; i <= last; ++i) {
 412     hr = region_at(i);
 413     _humongous_set.add(hr);
 414     _hr_printer.alloc(hr);
 415   }
 416 
 417   return new_obj;
 418 }
 419 
 420 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
 421   assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
 422   return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
 423 }
 424 
 425 // If could fit into free regions w/o expansion, try.
 426 // Otherwise, if can expand, do so.
 427 // Otherwise, if using ex regions might help, try with ex given back.
 428 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 429   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 430 
 431   _verifier->verify_region_sets_optional();
 432 
 433   uint first = G1_NO_HRM_INDEX;
 434   uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
 435 
 436   if (obj_regions == 1) {
 437     // Only one region to allocate, try to use a fast path by directly allocating
 438     // from the free lists. Do not try to expand here, we will potentially do that
 439     // later.
 440     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 441     if (hr != NULL) {
 442       first = hr->hrm_index();
 443     }
 444   } else {
 445     // We can't allocate humongous regions spanning more than one region while
 446     // cleanupComplete() is running, since some of the regions we find to be
 447     // empty might not yet be added to the free list. It is not straightforward
 448     // to know in which list they are on so that we can remove them. We only
 449     // need to do this if we need to allocate more than one region to satisfy the
 450     // current humongous allocation request. If we are only allocating one region
 451     // we use the one-region region allocation code (see above), that already


 485       }
 486 #endif
 487       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 488     } else {
 489       // Policy: Potentially trigger a defragmentation GC.
 490     }
 491   }
 492 
 493   HeapWord* result = NULL;
 494   if (first != G1_NO_HRM_INDEX) {
 495     result = humongous_obj_allocate_initialize_regions(first, obj_regions,
 496                                                        word_size, context);
 497     assert(result != NULL, "it should always return a valid result");
 498 
 499     // A successful humongous object allocation changes the used space
 500     // information of the old generation so we need to recalculate the
 501     // sizes and update the jstat counters here.
 502     g1mm()->update_sizes();
 503   }
 504 
 505   _verifier->verify_region_sets_optional();
 506 
 507   return result;
 508 }
 509 
 510 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 511   assert_heap_not_locked_and_not_at_safepoint();
 512   assert(!is_humongous(word_size), "we do not allow humongous TLABs");
 513 
 514   uint dummy_gc_count_before;
 515   uint dummy_gclocker_retry_count = 0;
 516   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 517 }
 518 
 519 HeapWord*
 520 G1CollectedHeap::mem_allocate(size_t word_size,
 521                               bool*  gc_overhead_limit_was_exceeded) {
 522   assert_heap_not_locked_and_not_at_safepoint();
 523 
 524   // Loop until the allocation is satisfied, or unsatisfied after GC.
 525   for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {


1214 
1215   if (GCLocker::check_active_before_gc()) {
1216     return false;
1217   }
1218 
1219   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1220   gc_timer->register_gc_start();
1221 
1222   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1223   GCIdMark gc_id_mark;
1224   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1225 
1226   SvcGCMarker sgcm(SvcGCMarker::FULL);
1227   ResourceMark rm;
1228 
1229   print_heap_before_gc();
1230   trace_heap_before_gc(gc_tracer);
1231 
1232   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1233 
1234   _verifier->verify_region_sets_optional();
1235 
1236   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1237                            collector_policy()->should_clear_all_soft_refs();
1238 
1239   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1240 
1241   {
1242     IsGCActiveMark x;
1243 
1244     // Timing
1245     assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1246     GCTraceCPUTime tcpu;
1247 
1248     {
1249       GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1250       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1251       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1252 
1253       g1_policy()->record_full_collection_start();
1254 
1255       // Note: When we have a more flexible GC logging framework that
1256       // allows us to add optional attributes to a GC log record we
1257       // could consider timing and reporting how long we wait in the
1258       // following two methods.
1259       wait_while_free_regions_coming();
1260       // If we start the compaction before the CM threads finish
1261       // scanning the root regions we might trip them over as we'll
1262       // be moving objects / updating references. So let's wait until
1263       // they are done. By telling them to abort, they should complete
1264       // early.
1265       _cm->root_regions()->abort();
1266       _cm->root_regions()->wait_until_scan_finished();
1267       append_secondary_free_list_if_not_empty_with_lock();
1268 
1269       gc_prologue(true);
1270       increment_total_collections(true /* full gc */);
1271       increment_old_marking_cycles_started();
1272 
1273       assert(used() == recalculate_used(), "Should be equal");
1274 
1275       _verifier->verify_before_gc();
1276 
1277       _verifier->check_bitmaps("Full GC Start");
1278       pre_full_gc_dump(gc_timer);
1279 
1280 #if defined(COMPILER2) || INCLUDE_JVMCI
1281       DerivedPointerTable::clear();
1282 #endif
1283 
1284       // Disable discovery and empty the discovered lists
1285       // for the CM ref processor.
1286       ref_processor_cm()->disable_discovery();
1287       ref_processor_cm()->abandon_partial_discovery();
1288       ref_processor_cm()->verify_no_references_recorded();
1289 
1290       // Abandon current iterations of concurrent marking and concurrent
1291       // refinement, if any are in progress. We have to do this before
1292       // wait_until_scan_finished() below.
1293       concurrent_mark()->abort();
1294 
1295       // Make sure we'll choose a new allocation region afterwards.
1296       _allocator->release_mutator_alloc_region();
1297       _allocator->abandon_gc_alloc_regions();


1392       }
1393 
1394 #ifdef TRACESPINNING
1395       ParallelTaskTerminator::print_termination_counts();
1396 #endif
1397 
1398       // Discard all rset updates
1399       JavaThread::dirty_card_queue_set().abandon_logs();
1400       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1401 
1402       _young_list->reset_sampled_info();
1403       // At this point there should be no regions in the
1404       // entire heap tagged as young.
1405       assert(check_young_list_empty(true /* check_heap */),
1406              "young list should be empty at this point");
1407 
1408       // Update the number of full collections that have been completed.
1409       increment_old_marking_cycles_completed(false /* concurrent */);
1410 
1411       _hrm.verify_optional();
1412       _verifier->verify_region_sets_optional();
1413 
1414       _verifier->verify_after_gc();
1415 
1416       // Clear the previous marking bitmap, if needed for bitmap verification.
1417       // Note we cannot do this when we clear the next marking bitmap in
1418       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1419       // objects marked during a full GC against the previous bitmap.
1420       // But we need to clear it before calling check_bitmaps below since
1421       // the full GC has compacted objects and updated TAMS but not updated
1422       // the prev bitmap.
1423       if (G1VerifyBitmaps) {
1424         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1425       }
1426       _verifier->check_bitmaps("Full GC End");
1427 
1428       // Start a new incremental collection set for the next pause
1429       assert(g1_policy()->collection_set() == NULL, "must be");
1430       g1_policy()->start_incremental_cset_building();
1431 
1432       clear_cset_fast_test();
1433 
1434       _allocator->init_mutator_alloc_region();
1435 
1436       g1_policy()->record_full_collection_end();
1437 
1438       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1439       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1440       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1441       // before any GC notifications are raised.
1442       g1mm()->update_sizes();
1443 
1444       gc_epilogue(true);
1445     }
1446 


1623 
1624   assert(!collector_policy()->should_clear_all_soft_refs(),
1625          "Flag should have been handled and cleared prior to this point");
1626 
1627   // What else?  We might try synchronous finalization later.  If the total
1628   // space available is large enough for the allocation, then a more
1629   // complete compaction phase than we've tried so far might be
1630   // appropriate.
1631   assert(*succeeded, "sanity");
1632   return NULL;
1633 }
1634 
1635 // Attempting to expand the heap sufficiently
1636 // to support an allocation of the given "word_size".  If
1637 // successful, perform the allocation and return the address of the
1638 // allocated block, or else "NULL".
1639 
1640 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1641   assert_at_safepoint(true /* should_be_vm_thread */);
1642 
1643   _verifier->verify_region_sets_optional();
1644 
1645   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1646   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1647                             word_size * HeapWordSize);
1648 
1649 
1650   if (expand(expand_bytes)) {
1651     _hrm.verify_optional();
1652     _verifier->verify_region_sets_optional();
1653     return attempt_allocation_at_safepoint(word_size,
1654                                            context,
1655                                            false /* expect_null_mutator_alloc_region */);
1656   }
1657   return NULL;
1658 }
1659 
1660 bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) {
1661   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1662   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1663                                        HeapRegion::GrainBytes);
1664 
1665   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1666                             expand_bytes, aligned_expand_bytes);
1667 
1668   if (is_maximal_no_gc()) {
1669     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1670     return false;
1671   }
1672 


1701   size_t aligned_shrink_bytes =
1702     ReservedSpace::page_align_size_down(shrink_bytes);
1703   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1704                                          HeapRegion::GrainBytes);
1705   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1706 
1707   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1708   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1709 
1710 
1711   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1712                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1713   if (num_regions_removed > 0) {
1714     g1_policy()->record_new_heap_size(num_regions());
1715   } else {
1716     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1717   }
1718 }
1719 
1720 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1721   _verifier->verify_region_sets_optional();
1722 
1723   // We should only reach here at the end of a Full GC which means we
1724   // should not not be holding to any GC alloc regions. The method
1725   // below will make sure of that and do any remaining clean up.
1726   _allocator->abandon_gc_alloc_regions();
1727 
1728   // Instead of tearing down / rebuilding the free lists here, we
1729   // could instead use the remove_all_pending() method on free_list to
1730   // remove only the ones that we need to remove.
1731   tear_down_region_sets(true /* free_list_only */);
1732   shrink_helper(shrink_bytes);
1733   rebuild_region_sets(true /* free_list_only */);
1734 
1735   _hrm.verify_optional();
1736   _verifier->verify_region_sets_optional();
1737 }
1738 
1739 // Public methods.
1740 
1741 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1742   CollectedHeap(),
1743   _g1_policy(policy_),
1744   _dirty_card_queue_set(false),
1745   _is_alive_closure_cm(this),
1746   _is_alive_closure_stw(this),
1747   _ref_processor_cm(NULL),
1748   _ref_processor_stw(NULL),
1749   _bot(NULL),
1750   _cg1r(NULL),
1751   _g1mm(NULL),
1752   _refine_cte_cl(NULL),
1753   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1754   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1755   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1756   _humongous_reclaim_candidates(),


1762   _summary_bytes_used(0),
1763   _survivor_evac_stats(YoungPLABSize, PLABWeight),
1764   _old_evac_stats(OldPLABSize, PLABWeight),
1765   _expand_heap_after_alloc_failure(true),
1766   _old_marking_cycles_started(0),
1767   _old_marking_cycles_completed(0),
1768   _heap_summary_sent(false),
1769   _in_cset_fast_test(),
1770   _dirty_cards_region_list(NULL),
1771   _worker_cset_start_region(NULL),
1772   _worker_cset_start_region_time_stamp(NULL),
1773   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1774   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1775   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1776   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1777 
1778   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1779                           /* are_GC_task_threads */true,
1780                           /* are_ConcurrentGC_threads */false);
1781   _workers->initialize_workers();
1782   _verifier = new G1HeapVerifier(this);
1783 
1784   _allocator = G1Allocator::create_allocator(this);
1785   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1786 
1787   // Override the default _filler_array_max_size so that no humongous filler
1788   // objects are created.
1789   _filler_array_max_size = _humongous_object_threshold_in_words;
1790 
1791   uint n_queues = ParallelGCThreads;
1792   _task_queues = new RefToScanQueueSet(n_queues);
1793 
1794   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1795   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1796   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1797 
1798   for (uint i = 0; i < n_queues; i++) {
1799     RefToScanQueue* q = new RefToScanQueue();
1800     q->initialize();
1801     _task_queues->register_queue(i, q);
1802     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();


2652 // must be equal to the humongous object limit.
2653 size_t G1CollectedHeap::max_tlab_size() const {
2654   return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
2655 }
2656 
2657 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2658   AllocationContext_t context = AllocationContext::current();
2659   return _allocator->unsafe_max_tlab_alloc(context);
2660 }
2661 
2662 size_t G1CollectedHeap::max_capacity() const {
2663   return _hrm.reserved().byte_size();
2664 }
2665 
2666 jlong G1CollectedHeap::millis_since_last_gc() {
2667   // assert(false, "NYI");
2668   return 0;
2669 }
2670 
2671 void G1CollectedHeap::prepare_for_verify() {
2672   _verifier->prepare_for_verify();





























































































































































































































































































































































































































































2673 }
2674 
2675 class PrintRegionClosure: public HeapRegionClosure {
2676   outputStream* _st;
2677 public:
2678   PrintRegionClosure(outputStream* st) : _st(st) {}
2679   bool doHeapRegion(HeapRegion* r) {
2680     r->print_on(_st);
2681     return false;
2682   }
2683 };
2684 
2685 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2686                                        const HeapRegion* hr,
2687                                        const VerifyOption vo) const {
2688   switch (vo) {
2689   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2690   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2691   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
2692   default:                            ShouldNotReachHere();


3197   assert_at_safepoint(true /* should_be_vm_thread */);
3198   guarantee(!is_gc_active(), "collection is not reentrant");
3199 
3200   if (GCLocker::check_active_before_gc()) {
3201     return false;
3202   }
3203 
3204   _gc_timer_stw->register_gc_start();
3205 
3206   GCIdMark gc_id_mark;
3207   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3208 
3209   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3210   ResourceMark rm;
3211 
3212   wait_for_root_region_scanning();
3213 
3214   print_heap_before_gc();
3215   trace_heap_before_gc(_gc_tracer_stw);
3216 
3217   _verifier->verify_region_sets_optional();
3218   _verifier->verify_dirty_young_regions();
3219 
3220   // This call will decide whether this pause is an initial-mark
3221   // pause. If it is, during_initial_mark_pause() will return true
3222   // for the duration of this pause.
3223   g1_policy()->decide_on_conc_mark_initiation();
3224 
3225   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3226   assert(!collector_state()->during_initial_mark_pause() ||
3227           collector_state()->gcs_are_young(), "sanity");
3228 
3229   // We also do not allow mixed GCs during marking.
3230   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3231 
3232   // Record whether this pause is an initial mark. When the current
3233   // thread has completed its logging output and it's safe to signal
3234   // the CM thread, the flag's value in the policy has been reset.
3235   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3236 
3237   // Inner scope for scope based logging, timers, and stats collection
3238   {


3281 
3282     assert(check_young_list_well_formed(), "young list should be well formed");
3283 
3284     // Don't dynamically change the number of GC threads this early.  A value of
3285     // 0 is used to indicate serial work.  When parallel work is done,
3286     // it will be set.
3287 
3288     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3289       IsGCActiveMark x;
3290 
3291       gc_prologue(false);
3292       increment_total_collections(false /* full gc */);
3293       increment_gc_time_stamp();
3294 
3295       if (VerifyRememberedSets) {
3296         log_info(gc, verify)("[Verifying RemSets before GC]");
3297         VerifyRegionRemSetClosure v_cl;
3298         heap_region_iterate(&v_cl);
3299       }
3300 
3301       _verifier->verify_before_gc();
3302 
3303       _verifier->check_bitmaps("GC Start");
3304 
3305 #if defined(COMPILER2) || INCLUDE_JVMCI
3306       DerivedPointerTable::clear();
3307 #endif
3308 
3309       // Please see comment in g1CollectedHeap.hpp and
3310       // G1CollectedHeap::ref_processing_init() to see how
3311       // reference processing currently works in G1.
3312 
3313       // Enable discovery in the STW reference processor
3314       if (g1_policy()->should_process_references()) {
3315         ref_processor_stw()->enable_discovery();
3316       } else {
3317         ref_processor_stw()->disable_discovery();
3318       }
3319 
3320       {
3321         // We want to temporarily turn off discovery by the
3322         // CM ref processor, if necessary, and turn it back on
3323         // on again later if we do. Using a scoped


3341         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3342 
3343         if (collector_state()->during_initial_mark_pause()) {
3344           concurrent_mark()->checkpointRootsInitialPre();
3345         }
3346 
3347         double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms);
3348         g1_policy()->finalize_old_cset_part(time_remaining_ms);
3349 
3350         evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length());
3351 
3352         // Make sure the remembered sets are up to date. This needs to be
3353         // done before register_humongous_regions_with_cset(), because the
3354         // remembered sets are used there to choose eager reclaim candidates.
3355         // If the remembered sets are not up to date we might miss some
3356         // entries that need to be handled.
3357         g1_rem_set()->cleanupHRRS();
3358 
3359         register_humongous_regions_with_cset();
3360 
3361         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3362 
3363         _cm->note_start_of_gc();
3364         // We call this after finalize_cset() to
3365         // ensure that the CSet has been finalized.
3366         _cm->verify_no_cset_oops();
3367 
3368         if (_hr_printer.is_active()) {
3369           HeapRegion* hr = g1_policy()->collection_set();
3370           while (hr != NULL) {
3371             _hr_printer.cset(hr);
3372             hr = hr->next_in_collection_set();
3373           }
3374         }
3375 
3376 #ifdef ASSERT
3377         VerifyCSetClosure cl;
3378         collection_set_iterate(&cl);
3379 #endif // ASSERT
3380 
3381         // Initialize the GC alloc regions.


3491         // during the last GC). But it shouldn't. Given that
3492         // saved_mark_word() is conditional on whether the GC time stamp
3493         // on the region is current or not, by incrementing the GC time
3494         // stamp here we invalidate all the GC time stamps on all the
3495         // regions and saved_mark_word() will simply return top() for
3496         // all the regions. This is a nicer way of ensuring this rather
3497         // than iterating over the regions and fixing them. In fact, the
3498         // GC time stamp increment here also ensures that
3499         // saved_mark_word() will return top() between pauses, i.e.,
3500         // during concurrent refinement. So we don't need the
3501         // is_gc_active() check to decided which top to use when
3502         // scanning cards (see CR 7039627).
3503         increment_gc_time_stamp();
3504 
3505         if (VerifyRememberedSets) {
3506           log_info(gc, verify)("[Verifying RemSets after GC]");
3507           VerifyRegionRemSetClosure v_cl;
3508           heap_region_iterate(&v_cl);
3509         }
3510 
3511         _verifier->verify_after_gc();
3512         _verifier->check_bitmaps("GC End");
3513 
3514         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3515         ref_processor_stw()->verify_no_references_recorded();
3516 
3517         // CM reference discovery will be re-enabled if necessary.
3518       }
3519 
3520 #ifdef TRACESPINNING
3521       ParallelTaskTerminator::print_termination_counts();
3522 #endif
3523 
3524       gc_epilogue(false);
3525     }
3526 
3527     // Print the remainder of the GC log output.
3528     log_gc_footer(os::elapsed_counter() - pause_start_counter);
3529 
3530     // It is not yet to safe to tell the concurrent mark to
3531     // start as we have some optional output below. We don't want the
3532     // output from the concurrent mark thread interfering with this
3533     // logging output either.
3534 
3535     _hrm.verify_optional();
3536     _verifier->verify_region_sets_optional();
3537 
3538     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3539     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3540 
3541     print_heap_after_gc();
3542     trace_heap_after_gc(_gc_tracer_stw);
3543 
3544     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3545     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3546     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3547     // before any GC notifications are raised.
3548     g1mm()->update_sizes();
3549 
3550     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3551     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3552     _gc_timer_stw->register_gc_end();
3553     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3554   }
3555   // It should now be safe to tell the concurrent mark thread to start
3556   // without its logging output interfering with the logging output


4785   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
4786                      G1CollectedHeap* g1h) :
4787     AbstractGangTask("G1 Par Cleanup CT Task"),
4788     _ct_bs(ct_bs), _g1h(g1h) { }
4789 
4790   void work(uint worker_id) {
4791     HeapRegion* r;
4792     while (r = _g1h->pop_dirty_cards_region()) {
4793       clear_cards(r);
4794     }
4795   }
4796 
4797   void clear_cards(HeapRegion* r) {
4798     // Cards of the survivors should have already been dirtied.
4799     if (!r->is_survivor()) {
4800       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
4801     }
4802   }
4803 };
4804 































































































































































































4805 class G1ParScrubRemSetTask: public AbstractGangTask {
4806 protected:
4807   G1RemSet* _g1rs;
4808   BitMap* _region_bm;
4809   BitMap* _card_bm;
4810   HeapRegionClaimer _hrclaimer;
4811 
4812 public:
4813   G1ParScrubRemSetTask(G1RemSet* g1_rs, BitMap* region_bm, BitMap* card_bm, uint num_workers) :
4814     AbstractGangTask("G1 ScrubRS"),
4815     _g1rs(g1_rs),
4816     _region_bm(region_bm),
4817     _card_bm(card_bm),
4818     _hrclaimer(num_workers) {
4819   }
4820 
4821   void work(uint worker_id) {
4822     _g1rs->scrub(_region_bm, _card_bm, worker_id, &_hrclaimer);
4823   }
4824 };
4825 
4826 void G1CollectedHeap::scrub_rem_set(BitMap* region_bm, BitMap* card_bm) {
4827   uint num_workers = workers()->active_workers();
4828   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), region_bm, card_bm, num_workers);
4829   workers()->run_task(&g1_par_scrub_rs_task);
4830 }
4831 
4832 void G1CollectedHeap::cleanUpCardTable() {
4833   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
4834   double start = os::elapsedTime();
4835 
4836   {
4837     // Iterate over the dirty cards region list.
4838     G1ParCleanupCTTask cleanup_task(ct_bs, this);
4839 
4840     workers()->run_task(&cleanup_task);
4841 #ifndef PRODUCT
4842     _verifier->verify_card_table_cleanup();



4843 #endif
4844   }
4845 
4846   double elapsed = os::elapsedTime() - start;
4847   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
4848 }
4849 
4850 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4851   size_t pre_used = 0;
4852   FreeRegionList local_free_list("Local List for CSet Freeing");
4853 
4854   double young_time_ms     = 0.0;
4855   double non_young_time_ms = 0.0;
4856 
4857   // Since the collection set is a superset of the the young list,
4858   // all we need to do to clear the young list is clear its
4859   // head and length, and unlink any young regions in the code below
4860   _young_list->clear();
4861 
4862   G1CollectorPolicy* policy = g1_policy();


5344 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5345   HeapRegion* hr = heap_region_containing(p);
5346   return hr->is_in(p);
5347 }
5348 
5349 // Methods for the mutator alloc region
5350 
5351 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5352                                                       bool force) {
5353   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5354   assert(!force || g1_policy()->can_expand_young_list(),
5355          "if force is true we should be able to expand the young list");
5356   bool young_list_full = g1_policy()->is_young_list_full();
5357   if (force || !young_list_full) {
5358     HeapRegion* new_alloc_region = new_region(word_size,
5359                                               false /* is_old */,
5360                                               false /* do_expand */);
5361     if (new_alloc_region != NULL) {
5362       set_region_short_lived_locked(new_alloc_region);
5363       _hr_printer.alloc(new_alloc_region, young_list_full);
5364       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
5365       return new_alloc_region;
5366     }
5367   }
5368   return NULL;
5369 }
5370 
5371 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
5372                                                   size_t allocated_bytes) {
5373   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5374   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
5375 
5376   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
5377   increase_used(allocated_bytes);
5378   _hr_printer.retire(alloc_region);
5379   // We update the eden sizes here, when the region is retired,
5380   // instead of when it's allocated, since this is the point that its
5381   // used space has been recored in _summary_bytes_used.
5382   g1mm()->update_eden_size();
5383 }
5384 
5385 // Methods for the GC alloc regions
5386 
5387 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
5388                                                  uint count,
5389                                                  InCSetState dest) {
5390   assert(FreeList_lock->owned_by_self(), "pre-condition");
5391 
5392   if (count < g1_policy()->max_regions(dest)) {
5393     const bool is_survivor = (dest.is_young());
5394     HeapRegion* new_alloc_region = new_region(word_size,
5395                                               !is_survivor,
5396                                               true /* do_expand */);
5397     if (new_alloc_region != NULL) {
5398       // We really only need to do this for old regions given that we
5399       // should never scan survivors. But it doesn't hurt to do it
5400       // for survivors too.
5401       new_alloc_region->record_timestamp();
5402       if (is_survivor) {
5403         new_alloc_region->set_survivor();
5404         _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
5405       } else {
5406         new_alloc_region->set_old();
5407         _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
5408       }
5409       _hr_printer.alloc(new_alloc_region);
5410       bool during_im = collector_state()->during_initial_mark_pause();
5411       new_alloc_region->note_start_of_copying(during_im);
5412       return new_alloc_region;
5413     }
5414   }
5415   return NULL;
5416 }
5417 
5418 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
5419                                              size_t allocated_bytes,
5420                                              InCSetState dest) {
5421   bool during_im = collector_state()->during_initial_mark_pause();
5422   alloc_region->note_end_of_copying(during_im);
5423   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
5424   if (dest.is_young()) {
5425     young_list()->add_survivor_region(alloc_region);
5426   } else {
5427     _old_set.add(alloc_region);
5428   }
5429   _hr_printer.retire(alloc_region);
5430 }
5431 
5432 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
5433   bool expanded = false;
5434   uint index = _hrm.find_highest_free(&expanded);
5435 
5436   if (index != G1_NO_HRM_INDEX) {
5437     if (expanded) {
5438       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
5439                                 HeapRegion::GrainWords * HeapWordSize);
5440     }
5441     _hrm.allocate_free_regions_starting_at(index, 1);
5442     return region_at(index);
5443   }
5444   return NULL;























































































5445 }
5446 
5447 // Optimized nmethod scanning
5448 
5449 class RegisterNMethodOopClosure: public OopClosure {
5450   G1CollectedHeap* _g1h;
5451   nmethod* _nm;
5452 
5453   template <class T> void do_oop_work(T* p) {
5454     T heap_oop = oopDesc::load_heap_oop(p);
5455     if (!oopDesc::is_null(heap_oop)) {
5456       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5457       HeapRegion* hr = _g1h->heap_region_containing(obj);
5458       assert(!hr->is_continues_humongous(),
5459              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5460              " starting at " HR_FORMAT,
5461              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5462 
5463       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
5464       hr->add_strong_code_root_locked(_nm);


< prev index next >