src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6540 : 8054819: Rename HeapRegionSeq to HeapRegionManager
Reviewed-by: jwilhelm, jmasa


 511 
 512 G1CollectedHeap* G1CollectedHeap::_g1h;
 513 
 514 // Private methods.
 515 
 516 HeapRegion*
 517 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 518   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 519   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 520     if (!_secondary_free_list.is_empty()) {
 521       if (G1ConcRegionFreeingVerbose) {
 522         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 523                                "secondary_free_list has %u entries",
 524                                _secondary_free_list.length());
 525       }
 526       // It looks as if there are free regions available on the
 527       // secondary_free_list. Let's move them to the free_list and try
 528       // again to allocate from it.
 529       append_secondary_free_list();
 530 
 531       assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not "
 532              "empty we should have moved at least one entry to the free_list");
 533       HeapRegion* res = _hrs.allocate_free_region(is_old);
 534       if (G1ConcRegionFreeingVerbose) {
 535         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 536                                "allocated "HR_FORMAT" from secondary_free_list",
 537                                HR_FORMAT_PARAMS(res));
 538       }
 539       return res;
 540     }
 541 
 542     // Wait here until we get notified either when (a) there are no
 543     // more free regions coming or (b) some regions have been moved on
 544     // the secondary_free_list.
 545     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 546   }
 547 
 548   if (G1ConcRegionFreeingVerbose) {
 549     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 550                            "could not allocate from secondary_free_list");
 551   }
 552   return NULL;
 553 }
 554 
 555 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 556   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 557          "the only time we use this to allocate a humongous region is "
 558          "when we are allocating a single humongous region");
 559 
 560   HeapRegion* res;
 561   if (G1StressConcRegionFreeing) {
 562     if (!_secondary_free_list.is_empty()) {
 563       if (G1ConcRegionFreeingVerbose) {
 564         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 565                                "forced to look at the secondary_free_list");
 566       }
 567       res = new_region_try_secondary_free_list(is_old);
 568       if (res != NULL) {
 569         return res;
 570       }
 571     }
 572   }
 573 
 574   res = _hrs.allocate_free_region(is_old);
 575 
 576   if (res == NULL) {
 577     if (G1ConcRegionFreeingVerbose) {
 578       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 579                              "res == NULL, trying the secondary_free_list");
 580     }
 581     res = new_region_try_secondary_free_list(is_old);
 582   }
 583   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 584     // Currently, only attempts to allocate GC alloc regions set
 585     // do_expand to true. So, we should only reach here during a
 586     // safepoint. If this assumption changes we might have to
 587     // reconsider the use of _expand_heap_after_alloc_failure.
 588     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 589 
 590     ergo_verbose1(ErgoHeapSizing,
 591                   "attempt heap expansion",
 592                   ergo_format_reason("region allocation request failed")
 593                   ergo_format_byte("allocation request"),
 594                   word_size * HeapWordSize);
 595     if (expand(word_size * HeapWordSize)) {
 596       // Given that expand() succeeded in expanding the heap, and we
 597       // always expand the heap by an amount aligned to the heap
 598       // region size, the free list should in theory not be empty.
 599       // In either case allocate_free_region() will check for NULL.
 600       res = _hrs.allocate_free_region(is_old);
 601     } else {
 602       _expand_heap_after_alloc_failure = false;
 603     }
 604   }
 605   return res;
 606 }
 607 
 608 HeapWord*
 609 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 610                                                            uint num_regions,
 611                                                            size_t word_size) {
 612   assert(first != G1_NO_HRS_INDEX, "pre-condition");
 613   assert(isHumongous(word_size), "word_size should be humongous");
 614   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 615 
 616   // Index of last region in the series + 1.
 617   uint last = first + num_regions;
 618 
 619   // We need to initialize the region(s) we just discovered. This is
 620   // a bit tricky given that it can happen concurrently with
 621   // refinement threads refining cards on these regions and
 622   // potentially wanting to refine the BOT as they are scanning
 623   // those cards (this can happen shortly after a cleanup; see CR
 624   // 6991377). So we have to set up the region(s) carefully and in
 625   // a specific order.
 626 
 627   // The word size sum of all the regions we will allocate.
 628   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 629   assert(word_size <= word_size_sum, "sanity");
 630 
 631   // This will be the "starts humongous" region.
 632   HeapRegion* first_hr = region_at(first);


 730   // match new_top.
 731   assert(hr == NULL ||
 732          (hr->end() == new_end && hr->top() == new_top), "sanity");
 733   check_bitmaps("Humongous Region Allocation", first_hr);
 734 
 735   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 736   _summary_bytes_used += first_hr->used();
 737   _humongous_set.add(first_hr);
 738 
 739   return new_obj;
 740 }
 741 
 742 // If could fit into free regions w/o expansion, try.
 743 // Otherwise, if can expand, do so.
 744 // Otherwise, if using ex regions might help, try with ex given back.
 745 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
 746   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 747 
 748   verify_region_sets_optional();
 749 
 750   uint first = G1_NO_HRS_INDEX;
 751   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 752 
 753   if (obj_regions == 1) {
 754     // Only one region to allocate, try to use a fast path by directly allocating
 755     // from the free lists. Do not try to expand here, we will potentially do that
 756     // later.
 757     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 758     if (hr != NULL) {
 759       first = hr->hrs_index();
 760     }
 761   } else {
 762     // We can't allocate humongous regions spanning more than one region while
 763     // cleanupComplete() is running, since some of the regions we find to be
 764     // empty might not yet be added to the free list. It is not straightforward
 765     // to know in which list they are on so that we can remove them. We only
 766     // need to do this if we need to allocate more than one region to satisfy the
 767     // current humongous allocation request. If we are only allocating one region
 768     // we use the one-region region allocation code (see above), that already
 769     // potentially waits for regions from the secondary free list.
 770     wait_while_free_regions_coming();
 771     append_secondary_free_list_if_not_empty_with_lock();
 772 
 773     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 774     // are lucky enough to find some.
 775     first = _hrs.find_contiguous_only_empty(obj_regions);
 776     if (first != G1_NO_HRS_INDEX) {
 777       _hrs.allocate_free_regions_starting_at(first, obj_regions);
 778     }
 779   }
 780 
 781   if (first == G1_NO_HRS_INDEX) {
 782     // Policy: We could not find enough regions for the humongous object in the
 783     // free list. Look through the heap to find a mix of free and uncommitted regions.
 784     // If so, try expansion.
 785     first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
 786     if (first != G1_NO_HRS_INDEX) {
 787       // We found something. Make sure these regions are committed, i.e. expand
 788       // the heap. Alternatively we could do a defragmentation GC.
 789       ergo_verbose1(ErgoHeapSizing,
 790                     "attempt heap expansion",
 791                     ergo_format_reason("humongous allocation request failed")
 792                     ergo_format_byte("allocation request"),
 793                     word_size * HeapWordSize);
 794 
 795       _hrs.expand_at(first, obj_regions);
 796       g1_policy()->record_new_heap_size(num_regions());
 797 
 798 #ifdef ASSERT
 799       for (uint i = first; i < first + obj_regions; ++i) {
 800         HeapRegion* hr = region_at(i);
 801         assert(hr->is_empty(), "sanity");
 802         assert(is_on_master_free_list(hr), "sanity");
 803       }
 804 #endif
 805       _hrs.allocate_free_regions_starting_at(first, obj_regions);
 806     } else {
 807       // Policy: Potentially trigger a defragmentation GC.
 808     }
 809   }
 810 
 811   HeapWord* result = NULL;
 812   if (first != G1_NO_HRS_INDEX) {
 813     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
 814     assert(result != NULL, "it should always return a valid result");
 815 
 816     // A successful humongous object allocation changes the used space
 817     // information of the old generation so we need to recalculate the
 818     // sizes and update the jstat counters here.
 819     g1mm()->update_sizes();
 820   }
 821 
 822   verify_region_sets_optional();
 823 
 824   return result;
 825 }
 826 
 827 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 828   assert_heap_not_locked_and_not_at_safepoint();
 829   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 830 
 831   unsigned int dummy_gc_count_before;
 832   int dummy_gclocker_retry_count = 0;


1227         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1228       } else if (hr->startsHumongous()) {
1229         if (hr->region_num() == 1) {
1230           // single humongous region
1231           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1232         } else {
1233           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1234         }
1235       } else {
1236         assert(hr->continuesHumongous(), "only way to get here");
1237         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1238       }
1239     }
1240     return false;
1241   }
1242 
1243   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1244     : _hr_printer(hr_printer) { }
1245 };
1246 
1247 void G1CollectedHeap::print_hrs_post_compaction() {
1248   PostCompactionPrinterClosure cl(hr_printer());
1249   heap_region_iterate(&cl);
1250 }
1251 
1252 bool G1CollectedHeap::do_collection(bool explicit_gc,
1253                                     bool clear_all_soft_refs,
1254                                     size_t word_size) {
1255   assert_at_safepoint(true /* should_be_vm_thread */);
1256 
1257   if (GC_locker::check_active_before_gc()) {
1258     return false;
1259   }
1260 
1261   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1262   gc_timer->register_gc_start();
1263 
1264   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1265   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1266 
1267   SvcGCMarker sgcm(SvcGCMarker::FULL);


1396       // re-enable reference discovery for the CM ref processor.
1397       // That will be done at the start of the next marking cycle.
1398       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1399       ref_processor_cm()->verify_no_references_recorded();
1400 
1401       reset_gc_time_stamp();
1402       // Since everything potentially moved, we will clear all remembered
1403       // sets, and clear all cards.  Later we will rebuild remembered
1404       // sets. We will also reset the GC time stamps of the regions.
1405       clear_rsets_post_compaction();
1406       check_gc_time_stamps();
1407 
1408       // Resize the heap if necessary.
1409       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1410 
1411       if (_hr_printer.is_active()) {
1412         // We should do this after we potentially resize the heap so
1413         // that all the COMMIT / UNCOMMIT events are generated before
1414         // the end GC event.
1415 
1416         print_hrs_post_compaction();
1417         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1418       }
1419 
1420       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1421       if (hot_card_cache->use_cache()) {
1422         hot_card_cache->reset_card_counts();
1423         hot_card_cache->reset_hot_cache();
1424       }
1425 
1426       // Rebuild remembered sets of all regions.
1427       if (G1CollectedHeap::use_parallel_gc_threads()) {
1428         uint n_workers =
1429           AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1430                                                   workers()->active_workers(),
1431                                                   Threads::number_of_non_daemon_threads());
1432         assert(UseDynamicNumberOfGCThreads ||
1433                n_workers == workers()->total_workers(),
1434                "If not dynamic should be using all the  workers");
1435         workers()->set_active_workers(n_workers);
1436         // Set parallel threads in the heap (_n_par_threads) only


1469 
1470 #ifdef TRACESPINNING
1471       ParallelTaskTerminator::print_termination_counts();
1472 #endif
1473 
1474       // Discard all rset updates
1475       JavaThread::dirty_card_queue_set().abandon_logs();
1476       assert(!G1DeferredRSUpdate
1477              || (G1DeferredRSUpdate &&
1478                 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1479 
1480       _young_list->reset_sampled_info();
1481       // At this point there should be no regions in the
1482       // entire heap tagged as young.
1483       assert(check_young_list_empty(true /* check_heap */),
1484              "young list should be empty at this point");
1485 
1486       // Update the number of full collections that have been completed.
1487       increment_old_marking_cycles_completed(false /* concurrent */);
1488 
1489       _hrs.verify_optional();
1490       verify_region_sets_optional();
1491 
1492       verify_after_gc();
1493 
1494       // Clear the previous marking bitmap, if needed for bitmap verification.
1495       // Note we cannot do this when we clear the next marking bitmap in
1496       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1497       // objects marked during a full GC against the previous bitmap.
1498       // But we need to clear it before calling check_bitmaps below since
1499       // the full GC has compacted objects and updated TAMS but not updated
1500       // the prev bitmap.
1501       if (G1VerifyBitmaps) {
1502         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1503       }
1504       check_bitmaps("Full GC End");
1505 
1506       // Start a new incremental collection set for the next pause
1507       assert(g1_policy()->collection_set() == NULL, "must be");
1508       g1_policy()->start_incremental_cset_building();
1509 


1713   return NULL;
1714 }
1715 
1716 // Attempting to expand the heap sufficiently
1717 // to support an allocation of the given "word_size".  If
1718 // successful, perform the allocation and return the address of the
1719 // allocated block, or else "NULL".
1720 
1721 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1722   assert_at_safepoint(true /* should_be_vm_thread */);
1723 
1724   verify_region_sets_optional();
1725 
1726   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1727   ergo_verbose1(ErgoHeapSizing,
1728                 "attempt heap expansion",
1729                 ergo_format_reason("allocation request failed")
1730                 ergo_format_byte("allocation request"),
1731                 word_size * HeapWordSize);
1732   if (expand(expand_bytes)) {
1733     _hrs.verify_optional();
1734     verify_region_sets_optional();
1735     return attempt_allocation_at_safepoint(word_size,
1736                                  false /* expect_null_mutator_alloc_region */);
1737   }
1738   return NULL;
1739 }
1740 
1741 bool G1CollectedHeap::expand(size_t expand_bytes) {
1742   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1743   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1744                                        HeapRegion::GrainBytes);
1745   ergo_verbose2(ErgoHeapSizing,
1746                 "expand the heap",
1747                 ergo_format_byte("requested expansion amount")
1748                 ergo_format_byte("attempted expansion amount"),
1749                 expand_bytes, aligned_expand_bytes);
1750 
1751   if (is_maximal_no_gc()) {
1752     ergo_verbose0(ErgoHeapSizing,
1753                       "did not expand the heap",
1754                       ergo_format_reason("heap already fully expanded"));
1755     return false;
1756   }
1757 
1758   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1759   assert(regions_to_expand > 0, "Must expand by at least one region");
1760 
1761   uint expanded_by = _hrs.expand_by(regions_to_expand);
1762 
1763   if (expanded_by > 0) {
1764     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1765     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1766     g1_policy()->record_new_heap_size(num_regions());
1767   } else {
1768     ergo_verbose0(ErgoHeapSizing,
1769                   "did not expand the heap",
1770                   ergo_format_reason("heap expansion operation failed"));
1771     // The expansion of the virtual storage space was unsuccessful.
1772     // Let's see if it was because we ran out of swap.
1773     if (G1ExitOnExpansionFailure &&
1774         _hrs.available() >= regions_to_expand) {
1775       // We had head room...
1776       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1777     }
1778   }
1779   return regions_to_expand > 0;
1780 }
1781 
1782 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1783   size_t aligned_shrink_bytes =
1784     ReservedSpace::page_align_size_down(shrink_bytes);
1785   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1786                                          HeapRegion::GrainBytes);
1787   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1788 
1789   uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
1790   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1791 
1792   ergo_verbose3(ErgoHeapSizing,
1793                 "shrink the heap",
1794                 ergo_format_byte("requested shrinking amount")
1795                 ergo_format_byte("aligned shrinking amount")
1796                 ergo_format_byte("attempted shrinking amount"),
1797                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1798   if (num_regions_removed > 0) {
1799     g1_policy()->record_new_heap_size(num_regions());
1800   } else {
1801     ergo_verbose0(ErgoHeapSizing,
1802                   "did not shrink the heap",
1803                   ergo_format_reason("heap shrinking operation failed"));
1804   }
1805 }
1806 
1807 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1808   verify_region_sets_optional();
1809 
1810   // We should only reach here at the end of a Full GC which means we
1811   // should not not be holding to any GC alloc regions. The method
1812   // below will make sure of that and do any remaining clean up.
1813   abandon_gc_alloc_regions();
1814 
1815   // Instead of tearing down / rebuilding the free lists here, we
1816   // could instead use the remove_all_pending() method on free_list to
1817   // remove only the ones that we need to remove.
1818   tear_down_region_sets(true /* free_list_only */);
1819   shrink_helper(shrink_bytes);
1820   rebuild_region_sets(true /* free_list_only */);
1821 
1822   _hrs.verify_optional();
1823   verify_region_sets_optional();
1824 }
1825 
1826 // Public methods.
1827 
1828 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1829 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1830 #endif // _MSC_VER
1831 
1832 
1833 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1834   SharedHeap(policy_),
1835   _g1_policy(policy_),
1836   _dirty_card_queue_set(false),
1837   _into_cset_dirty_card_queue_set(false),
1838   _is_alive_closure_cm(this),
1839   _is_alive_closure_stw(this),
1840   _ref_processor_cm(NULL),
1841   _ref_processor_stw(NULL),
1842   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),


2011 
2012   // Reserve space for prev and next bitmap.
2013   size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
2014 
2015   ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2016   G1RegionToSpaceMapper* prev_bitmap_storage =
2017     G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
2018                                          os::vm_page_size(),
2019                                          HeapRegion::GrainBytes,
2020                                          CMBitMap::mark_distance(),
2021                                          mtGC);
2022 
2023   ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2024   G1RegionToSpaceMapper* next_bitmap_storage =
2025     G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
2026                                          os::vm_page_size(),
2027                                          HeapRegion::GrainBytes,
2028                                          CMBitMap::mark_distance(),
2029                                          mtGC);
2030 
2031   _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
2032   g1_barrier_set()->initialize(cardtable_storage);
2033    // Do later initialization work for concurrent refinement.
2034   _cg1r->init(card_counts_storage);
2035 
2036   // 6843694 - ensure that the maximum region index can fit
2037   // in the remembered set structures.
2038   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2039   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2040 
2041   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2042   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2043   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2044             "too many cards per region");
2045 
2046   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2047 
2048   _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2049 
2050   _g1h = this;
2051 
2052   _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2053   _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2054 
2055   // Create the ConcurrentMark data structure and thread.
2056   // (Must do this late, so that "max_regions" is defined.)
2057   _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2058   if (_cm == NULL || !_cm->completed_initialization()) {
2059     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2060     return JNI_ENOMEM;
2061   }
2062   _cmThread = _cm->cmThread();
2063 
2064   // Initialize the from_card cache structure of HeapRegionRemSet.
2065   HeapRegionRemSet::init_heap(max_regions());
2066 
2067   // Now expand into the initial heap size.
2068   if (!expand(init_byte_size)) {
2069     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2070     return JNI_ENOMEM;
2071   }
2072 
2073   // Perform any initialization actions delegated to the policy.


2094                                       Shared_DirtyCardQ_lock,
2095                                       &JavaThread::dirty_card_queue_set());
2096   }
2097 
2098   // Initialize the card queue set used to hold cards containing
2099   // references into the collection set.
2100   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2101                                              DirtyCardQ_CBL_mon,
2102                                              DirtyCardQ_FL_lock,
2103                                              -1, // never trigger processing
2104                                              -1, // no limit on length
2105                                              Shared_DirtyCardQ_lock,
2106                                              &JavaThread::dirty_card_queue_set());
2107 
2108   // In case we're keeping closure specialization stats, initialize those
2109   // counts and that mechanism.
2110   SpecializationStats::clear();
2111 
2112   // Here we allocate the dummy HeapRegion that is required by the
2113   // G1AllocRegion class.
2114   HeapRegion* dummy_region = _hrs.get_dummy_region();
2115 
2116   // We'll re-use the same region whether the alloc region will
2117   // require BOT updates or not and, if it doesn't, then a non-young
2118   // region will complain that it cannot support allocations without
2119   // BOT updates. So we'll tag the dummy region as young to avoid that.
2120   dummy_region->set_young();
2121   // Make sure it's full.
2122   dummy_region->set_top(dummy_region->end());
2123   G1AllocRegion::setup(this, dummy_region);
2124 
2125   init_mutator_alloc_region();
2126 
2127   // Do create of the monitoring and management support so that
2128   // values in the heap have been properly initialized.
2129   _g1mm = new G1MonitoringSupport(this);
2130 
2131   G1StringDedup::initialize();
2132 
2133   return JNI_OK;
2134 }


2211 
2212   // STW ref processor
2213   _ref_processor_stw =
2214     new ReferenceProcessor(mr,    // span
2215                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2216                                 // mt processing
2217                            MAX2((int)ParallelGCThreads, 1),
2218                                 // degree of mt processing
2219                            (ParallelGCThreads > 1),
2220                                 // mt discovery
2221                            MAX2((int)ParallelGCThreads, 1),
2222                                 // degree of mt discovery
2223                            true,
2224                                 // Reference discovery is atomic
2225                            &_is_alive_closure_stw);
2226                                 // is alive closure
2227                                 // (for efficiency/performance)
2228 }
2229 
2230 size_t G1CollectedHeap::capacity() const {
2231   return _hrs.length() * HeapRegion::GrainBytes;
2232 }
2233 
2234 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2235   assert(!hr->continuesHumongous(), "pre-condition");
2236   hr->reset_gc_time_stamp();
2237   if (hr->startsHumongous()) {
2238     uint first_index = hr->hrs_index() + 1;
2239     uint last_index = hr->last_hc_index();
2240     for (uint i = first_index; i < last_index; i += 1) {
2241       HeapRegion* chr = region_at(i);
2242       assert(chr->continuesHumongous(), "sanity");
2243       chr->reset_gc_time_stamp();
2244     }
2245   }
2246 }
2247 
2248 #ifndef PRODUCT
2249 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2250 private:
2251   unsigned _gc_time_stamp;
2252   bool _failures;
2253 
2254 public:
2255   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2256     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2257 
2258   virtual bool doHeapRegion(HeapRegion* hr) {


2516           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2517 
2518         // Schedule a standard evacuation pause. We're setting word_size
2519         // to 0 which means that we are not requesting a post-GC allocation.
2520         VM_G1IncCollectionPause op(gc_count_before,
2521                                    0,     /* word_size */
2522                                    false, /* should_initiate_conc_mark */
2523                                    g1_policy()->max_pause_time_ms(),
2524                                    cause);
2525         VMThread::execute(&op);
2526       } else {
2527         // Schedule a Full GC.
2528         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2529         VMThread::execute(&op);
2530       }
2531     }
2532   } while (retry_gc);
2533 }
2534 
2535 bool G1CollectedHeap::is_in(const void* p) const {
2536   if (_hrs.reserved().contains(p)) {
2537     // Given that we know that p is in the reserved space,
2538     // heap_region_containing_raw() should successfully
2539     // return the containing region.
2540     HeapRegion* hr = heap_region_containing_raw(p);
2541     return hr->is_in(p);
2542   } else {
2543     return false;
2544   }
2545 }
2546 
2547 #ifdef ASSERT
2548 bool G1CollectedHeap::is_in_exact(const void* p) const {
2549   bool contains = reserved_region().contains(p);
2550   bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
2551   if (contains && available) {
2552     return true;
2553   } else {
2554     return false;
2555   }
2556 }
2557 #endif
2558 
2559 // Iteration functions.
2560 
2561 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2562 
2563 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2564   ExtendedOopClosure* _cl;
2565 public:
2566   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2567   bool doHeapRegion(HeapRegion* r) {
2568     if (!r->continuesHumongous()) {
2569       r->oop_iterate(_cl);
2570     }


2597 }
2598 
2599 // Calls a SpaceClosure on a HeapRegion.
2600 
2601 class SpaceClosureRegionClosure: public HeapRegionClosure {
2602   SpaceClosure* _cl;
2603 public:
2604   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2605   bool doHeapRegion(HeapRegion* r) {
2606     _cl->do_space(r);
2607     return false;
2608   }
2609 };
2610 
2611 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2612   SpaceClosureRegionClosure blk(cl);
2613   heap_region_iterate(&blk);
2614 }
2615 
2616 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2617   _hrs.iterate(cl);
2618 }
2619 
2620 void
2621 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2622                                                  uint worker_id,
2623                                                  uint num_workers,
2624                                                  jint claim_value) const {
2625   _hrs.par_iterate(cl, worker_id, num_workers, claim_value);
2626 }
2627 
2628 class ResetClaimValuesClosure: public HeapRegionClosure {
2629 public:
2630   bool doHeapRegion(HeapRegion* r) {
2631     r->set_claim_value(HeapRegion::InitialClaimValue);
2632     return false;
2633   }
2634 };
2635 
2636 void G1CollectedHeap::reset_heap_region_claim_values() {
2637   ResetClaimValuesClosure blk;
2638   heap_region_iterate(&blk);
2639 }
2640 
2641 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2642   ResetClaimValuesClosure blk;
2643   collection_set_iterate(&blk);
2644 }
2645 


2825   while (cur != NULL) {
2826     HeapRegion* next = cur->next_in_collection_set();
2827     if (cl->doHeapRegion(cur) && false) {
2828       cl->incomplete();
2829       return;
2830     }
2831     cur = next;
2832   }
2833   cur = g1_policy()->collection_set();
2834   while (cur != r) {
2835     HeapRegion* next = cur->next_in_collection_set();
2836     if (cl->doHeapRegion(cur) && false) {
2837       cl->incomplete();
2838       return;
2839     }
2840     cur = next;
2841   }
2842 }
2843 
2844 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2845   HeapRegion* result = _hrs.next_region_in_heap(from);
2846   while (result != NULL && result->isHumongous()) {
2847     result = _hrs.next_region_in_heap(result);
2848   }
2849   return result;
2850 }
2851 
2852 Space* G1CollectedHeap::space_containing(const void* addr) const {
2853   return heap_region_containing(addr);
2854 }
2855 
2856 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2857   Space* sp = space_containing(addr);
2858   return sp->block_start(addr);
2859 }
2860 
2861 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2862   Space* sp = space_containing(addr);
2863   return sp->block_size(addr);
2864 }
2865 
2866 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2867   Space* sp = space_containing(addr);


2887 }
2888 
2889 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2890   // Return the remaining space in the cur alloc region, but not less than
2891   // the min TLAB size.
2892 
2893   // Also, this value can be at most the humongous object threshold,
2894   // since we can't allow tlabs to grow big enough to accommodate
2895   // humongous objects.
2896 
2897   HeapRegion* hr = _mutator_alloc_region.get();
2898   size_t max_tlab = max_tlab_size() * wordSize;
2899   if (hr == NULL) {
2900     return max_tlab;
2901   } else {
2902     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
2903   }
2904 }
2905 
2906 size_t G1CollectedHeap::max_capacity() const {
2907   return _hrs.reserved().byte_size();
2908 }
2909 
2910 jlong G1CollectedHeap::millis_since_last_gc() {
2911   // assert(false, "NYI");
2912   return 0;
2913 }
2914 
2915 void G1CollectedHeap::prepare_for_verify() {
2916   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2917     ensure_parsability(false);
2918   }
2919   g1_rem_set()->prepare_for_verify();
2920 }
2921 
2922 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2923                                               VerifyOption vo) {
2924   switch (vo) {
2925   case VerifyOption_G1UsePrevMarking:
2926     return hr->obj_allocated_since_prev_marking(obj);
2927   case VerifyOption_G1UseNextMarking:


3416   }
3417   return false; // keep some compilers happy
3418 }
3419 
3420 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3421                                        const VerifyOption vo) const {
3422   switch (vo) {
3423   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3424   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3425   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
3426   default:                            ShouldNotReachHere();
3427   }
3428   return false; // keep some compilers happy
3429 }
3430 
3431 void G1CollectedHeap::print_on(outputStream* st) const {
3432   st->print(" %-20s", "garbage-first heap");
3433   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3434             capacity()/K, used_unlocked()/K);
3435   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3436             _hrs.reserved().start(),
3437             _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
3438             _hrs.reserved().end());
3439   st->cr();
3440   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3441   uint young_regions = _young_list->length();
3442   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3443             (size_t) young_regions * HeapRegion::GrainBytes / K);
3444   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3445   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3446             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3447   st->cr();
3448   MetaspaceAux::print_on(st);
3449 }
3450 
3451 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3452   print_on(st);
3453 
3454   // Print the per-region information.
3455   st->cr();
3456   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3457                "HS=humongous(starts), HC=humongous(continues), "
3458                "CS=collection set, F=free, TS=gc time stamp, "


3661 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3662   HeapRegion* region = region_at(index);
3663   assert(region->startsHumongous(), "Must start a humongous object");
3664   return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3665 }
3666 
3667 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3668  private:
3669   size_t _total_humongous;
3670   size_t _candidate_humongous;
3671  public:
3672   RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
3673   }
3674 
3675   virtual bool doHeapRegion(HeapRegion* r) {
3676     if (!r->startsHumongous()) {
3677       return false;
3678     }
3679     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3680 
3681     uint region_idx = r->hrs_index();
3682     bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
3683     // Is_candidate already filters out humongous regions with some remembered set.
3684     // This will not lead to humongous object that we mistakenly keep alive because
3685     // during young collection the remembered sets will only be added to.
3686     if (is_candidate) {
3687       g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3688       _candidate_humongous++;
3689     }
3690     _total_humongous++;
3691 
3692     return false;
3693   }
3694 
3695   size_t total_humongous() const { return _total_humongous; }
3696   size_t candidate_humongous() const { return _candidate_humongous; }
3697 };
3698 
3699 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3700   if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
3701     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);


4183       // that all the COMMIT events are generated before the end GC
4184       // event, and after we retire the GC alloc regions so that all
4185       // RETIRE events are generated before the end GC event.
4186       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4187 
4188 #ifdef TRACESPINNING
4189       ParallelTaskTerminator::print_termination_counts();
4190 #endif
4191 
4192       gc_epilogue(false);
4193     }
4194 
4195     // Print the remainder of the GC log output.
4196     log_gc_footer(os::elapsedTime() - pause_start_sec);
4197 
4198     // It is not yet to safe to tell the concurrent mark to
4199     // start as we have some optional output below. We don't want the
4200     // output from the concurrent mark thread interfering with this
4201     // logging output either.
4202 
4203     _hrs.verify_optional();
4204     verify_region_sets_optional();
4205 
4206     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4207     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4208 
4209     print_heap_after_gc();
4210     trace_heap_after_gc(_gc_tracer_stw);
4211 
4212     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4213     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4214     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4215     // before any GC notifications are raised.
4216     g1mm()->update_sizes();
4217 
4218     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4219     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4220     _gc_timer_stw->register_gc_end();
4221     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4222   }
4223   // It should now be safe to tell the concurrent mark thread to start


6002   // reference processor's discovered lists. We need to do
6003   // this after the card table is cleaned (and verified) as
6004   // the act of enqueueing entries on to the pending list
6005   // will log these updates (and dirty their associated
6006   // cards). We need these updates logged to update any
6007   // RSets.
6008   enqueue_discovered_references(n_workers);
6009 
6010   if (G1DeferredRSUpdate) {
6011     redirty_logged_cards();
6012   }
6013   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
6014 }
6015 
6016 void G1CollectedHeap::free_region(HeapRegion* hr,
6017                                   FreeRegionList* free_list,
6018                                   bool par,
6019                                   bool locked) {
6020   assert(!hr->isHumongous(), "this is only for non-humongous regions");
6021   assert(!hr->is_empty(), "the region should not be empty");
6022   assert(_hrs.is_available(hr->hrs_index()), "region should be committed");
6023   assert(free_list != NULL, "pre-condition");
6024 
6025   if (G1VerifyBitmaps) {
6026     MemRegion mr(hr->bottom(), hr->end());
6027     concurrent_mark()->clearRangePrevBitmap(mr);
6028   }
6029 
6030   // Clear the card counts for this region.
6031   // Note: we only need to do this if the region is not young
6032   // (since we don't refine cards in young regions).
6033   if (!hr->is_young()) {
6034     _cg1r->hot_card_cache()->reset_card_counts(hr);
6035   }
6036   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
6037   free_list->add_ordered(hr);
6038 }
6039 
6040 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
6041                                      FreeRegionList* free_list,
6042                                      bool par) {
6043   assert(hr->startsHumongous(), "this is only for starts humongous regions");
6044   assert(free_list != NULL, "pre-condition");
6045 
6046   size_t hr_capacity = hr->capacity();
6047   // We need to read this before we make the region non-humongous,
6048   // otherwise the information will be gone.
6049   uint last_index = hr->last_hc_index();
6050   hr->set_notHumongous();
6051   free_region(hr, free_list, par);
6052 
6053   uint i = hr->hrs_index() + 1;
6054   while (i < last_index) {
6055     HeapRegion* curr_hr = region_at(i);
6056     assert(curr_hr->continuesHumongous(), "invariant");
6057     curr_hr->set_notHumongous();
6058     free_region(curr_hr, free_list, par);
6059     i += 1;
6060   }
6061 }
6062 
6063 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
6064                                        const HeapRegionSetCount& humongous_regions_removed) {
6065   if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
6066     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
6067     _old_set.bulk_remove(old_regions_removed);
6068     _humongous_set.bulk_remove(humongous_regions_removed);
6069   }
6070 
6071 }
6072 
6073 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
6074   assert(list != NULL, "list can't be null");
6075   if (!list->is_empty()) {
6076     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
6077     _hrs.insert_list_into_free_list(list);
6078   }
6079 }
6080 
6081 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
6082   assert(_summary_bytes_used >= bytes,
6083          err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
6084                   _summary_bytes_used, bytes));
6085   _summary_bytes_used -= bytes;
6086 }
6087 
6088 class G1ParCleanupCTTask : public AbstractGangTask {
6089   G1SATBCardTableModRefBS* _ct_bs;
6090   G1CollectedHeap* _g1h;
6091   HeapRegion* volatile _su_head;
6092 public:
6093   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
6094                      G1CollectedHeap* g1h) :
6095     AbstractGangTask("G1 Par Cleanup CT Task"),
6096     _ct_bs(ct_bs), _g1h(g1h) { }
6097 


6426     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6427     // until the end of a concurrent mark.
6428     //
6429     // It is not required to check whether the object has been found dead by marking
6430     // or not, in fact it would prevent reclamation within a concurrent cycle, as
6431     // all objects allocated during that time are considered live.
6432     // SATB marking is even more conservative than the remembered set.
6433     // So if at this point in the collection there is no remembered set entry,
6434     // nobody has a reference to it.
6435     // At the start of collection we flush all refinement logs, and remembered sets
6436     // are completely up-to-date wrt to references to the humongous object.
6437     //
6438     // Other implementation considerations:
6439     // - never consider object arrays: while they are a valid target, they have not
6440     // been observed to be used as temporary objects.
6441     // - they would also pose considerable effort for cleaning up the the remembered
6442     // sets.
6443     // While this cleanup is not strictly necessary to be done (or done instantly),
6444     // given that their occurrence is very low, this saves us this additional
6445     // complexity.
6446     uint region_idx = r->hrs_index();
6447     if (g1h->humongous_is_live(region_idx) ||
6448         g1h->humongous_region_is_always_live(region_idx)) {
6449 
6450       if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6451         gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6452                                r->isHumongous(),
6453                                region_idx,
6454                                r->rem_set()->occupied(),
6455                                r->rem_set()->strong_code_roots_list_length(),
6456                                next_bitmap->isMarked(r->bottom()),
6457                                g1h->humongous_is_live(region_idx),
6458                                obj->is_objArray()
6459                               );
6460       }
6461 
6462       return false;
6463     }
6464 
6465     guarantee(!obj->is_objArray(),
6466               err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",


6665     return false;
6666   }
6667 
6668   ~TearDownRegionSetsClosure() {
6669     assert(_old_set->is_empty(), "post-condition");
6670   }
6671 };
6672 
6673 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6674   assert_at_safepoint(true /* should_be_vm_thread */);
6675 
6676   if (!free_list_only) {
6677     TearDownRegionSetsClosure cl(&_old_set);
6678     heap_region_iterate(&cl);
6679 
6680     // Note that emptying the _young_list is postponed and instead done as
6681     // the first step when rebuilding the regions sets again. The reason for
6682     // this is that during a full GC string deduplication needs to know if
6683     // a collected region was young or old when the full GC was initiated.
6684   }
6685   _hrs.remove_all_free_regions();
6686 }
6687 
6688 class RebuildRegionSetsClosure : public HeapRegionClosure {
6689 private:
6690   bool            _free_list_only;
6691   HeapRegionSet*   _old_set;
6692   HeapRegionSeq*   _hrs;
6693   size_t          _total_used;
6694 
6695 public:
6696   RebuildRegionSetsClosure(bool free_list_only,
6697                            HeapRegionSet* old_set, HeapRegionSeq* hrs) :
6698     _free_list_only(free_list_only),
6699     _old_set(old_set), _hrs(hrs), _total_used(0) {
6700     assert(_hrs->num_free_regions() == 0, "pre-condition");
6701     if (!free_list_only) {
6702       assert(_old_set->is_empty(), "pre-condition");
6703     }
6704   }
6705 
6706   bool doHeapRegion(HeapRegion* r) {
6707     if (r->continuesHumongous()) {
6708       return false;
6709     }
6710 
6711     if (r->is_empty()) {
6712       // Add free regions to the free list
6713       _hrs->insert_into_free_list(r);
6714     } else if (!_free_list_only) {
6715       assert(!r->is_young(), "we should not come across young regions");
6716 
6717       if (r->isHumongous()) {
6718         // We ignore humongous regions, we left the humongous set unchanged
6719       } else {
6720         // The rest should be old, add them to the old set
6721         _old_set->add(r);
6722       }
6723       _total_used += r->used();
6724     }
6725 
6726     return false;
6727   }
6728 
6729   size_t total_used() {
6730     return _total_used;
6731   }
6732 };
6733 
6734 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6735   assert_at_safepoint(true /* should_be_vm_thread */);
6736 
6737   if (!free_list_only) {
6738     _young_list->empty_list();
6739   }
6740 
6741   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs);
6742   heap_region_iterate(&cl);
6743 
6744   if (!free_list_only) {
6745     _summary_bytes_used = cl.total_used();
6746   }
6747   assert(_summary_bytes_used == recalculate_used(),
6748          err_msg("inconsistent _summary_bytes_used, "
6749                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6750                  _summary_bytes_used, recalculate_used()));
6751 }
6752 
6753 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6754   _refine_cte_cl->set_concurrent(concurrent);
6755 }
6756 
6757 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6758   HeapRegion* hr = heap_region_containing(p);
6759   return hr->is_in(p);
6760 }
6761 


6911                                MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
6912 
6913       // Skip allocation if there is not enough space to allocate even the smallest
6914       // possible object. In this case this region will not be retained, so the
6915       // original problem cannot occur.
6916       if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
6917         HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
6918         CollectedHeap::fill_with_object(dummy, to_allocate_words);
6919       }
6920     }
6921   }
6922   return G1AllocRegion::release();
6923 }
6924 
6925 // Heap region set verification
6926 
6927 class VerifyRegionListsClosure : public HeapRegionClosure {
6928 private:
6929   HeapRegionSet*   _old_set;
6930   HeapRegionSet*   _humongous_set;
6931   HeapRegionSeq*   _hrs;
6932 
6933 public:
6934   HeapRegionSetCount _old_count;
6935   HeapRegionSetCount _humongous_count;
6936   HeapRegionSetCount _free_count;
6937 
6938   VerifyRegionListsClosure(HeapRegionSet* old_set,
6939                            HeapRegionSet* humongous_set,
6940                            HeapRegionSeq* hrs) :
6941     _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs),
6942     _old_count(), _humongous_count(), _free_count(){ }
6943 
6944   bool doHeapRegion(HeapRegion* hr) {
6945     if (hr->continuesHumongous()) {
6946       return false;
6947     }
6948 
6949     if (hr->is_young()) {
6950       // TODO
6951     } else if (hr->startsHumongous()) {
6952       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index()));
6953       _humongous_count.increment(1u, hr->capacity());
6954     } else if (hr->is_empty()) {
6955       assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index()));
6956       _free_count.increment(1u, hr->capacity());
6957     } else {
6958       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index()));
6959       _old_count.increment(1u, hr->capacity());
6960     }
6961     return false;
6962   }
6963 
6964   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) {
6965     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6966     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6967         old_set->total_capacity_bytes(), _old_count.capacity()));
6968 
6969     guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6970     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6971         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6972 
6973     guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6974     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6975         free_list->total_capacity_bytes(), _free_count.capacity()));
6976   }
6977 };
6978 
6979 void G1CollectedHeap::verify_region_sets() {
6980   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6981 
6982   // First, check the explicit lists.
6983   _hrs.verify();
6984   {
6985     // Given that a concurrent operation might be adding regions to
6986     // the secondary free list we have to take the lock before
6987     // verifying it.
6988     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6989     _secondary_free_list.verify_list();
6990   }
6991 
6992   // If a concurrent region freeing operation is in progress it will
6993   // be difficult to correctly attributed any free regions we come
6994   // across to the correct free list given that they might belong to
6995   // one of several (free_list, secondary_free_list, any local lists,
6996   // etc.). So, if that's the case we will skip the rest of the
6997   // verification operation. Alternatively, waiting for the concurrent
6998   // operation to complete will have a non-trivial effect on the GC's
6999   // operation (no concurrent operation will last longer than the
7000   // interval between two calls to verification) and it might hide
7001   // any issues that we would like to catch during testing.
7002   if (free_regions_coming()) {
7003     return;
7004   }
7005 
7006   // Make sure we append the secondary_free_list on the free_list so
7007   // that all free regions we will come across can be safely
7008   // attributed to the free_list.
7009   append_secondary_free_list_if_not_empty_with_lock();
7010 
7011   // Finally, make sure that the region accounting in the lists is
7012   // consistent with what we see in the heap.
7013 
7014   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs);
7015   heap_region_iterate(&cl);
7016   cl.verify_counts(&_old_set, &_humongous_set, &_hrs);
7017 }
7018 
7019 // Optimized nmethod scanning
7020 
7021 class RegisterNMethodOopClosure: public OopClosure {
7022   G1CollectedHeap* _g1h;
7023   nmethod* _nm;
7024 
7025   template <class T> void do_oop_work(T* p) {
7026     T heap_oop = oopDesc::load_heap_oop(p);
7027     if (!oopDesc::is_null(heap_oop)) {
7028       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
7029       HeapRegion* hr = _g1h->heap_region_containing(obj);
7030       assert(!hr->continuesHumongous(),
7031              err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
7032                      " starting at "HR_FORMAT,
7033                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
7034 
7035       // HeapRegion::add_strong_code_root() avoids adding duplicate
7036       // entries but having duplicates is  OK since we "mark" nmethods




 511 
 512 G1CollectedHeap* G1CollectedHeap::_g1h;
 513 
 514 // Private methods.
 515 
 516 HeapRegion*
 517 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 518   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 519   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 520     if (!_secondary_free_list.is_empty()) {
 521       if (G1ConcRegionFreeingVerbose) {
 522         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 523                                "secondary_free_list has %u entries",
 524                                _secondary_free_list.length());
 525       }
 526       // It looks as if there are free regions available on the
 527       // secondary_free_list. Let's move them to the free_list and try
 528       // again to allocate from it.
 529       append_secondary_free_list();
 530 
 531       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 532              "empty we should have moved at least one entry to the free_list");
 533       HeapRegion* res = _hrm.allocate_free_region(is_old);
 534       if (G1ConcRegionFreeingVerbose) {
 535         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 536                                "allocated "HR_FORMAT" from secondary_free_list",
 537                                HR_FORMAT_PARAMS(res));
 538       }
 539       return res;
 540     }
 541 
 542     // Wait here until we get notified either when (a) there are no
 543     // more free regions coming or (b) some regions have been moved on
 544     // the secondary_free_list.
 545     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 546   }
 547 
 548   if (G1ConcRegionFreeingVerbose) {
 549     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 550                            "could not allocate from secondary_free_list");
 551   }
 552   return NULL;
 553 }
 554 
 555 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 556   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 557          "the only time we use this to allocate a humongous region is "
 558          "when we are allocating a single humongous region");
 559 
 560   HeapRegion* res;
 561   if (G1StressConcRegionFreeing) {
 562     if (!_secondary_free_list.is_empty()) {
 563       if (G1ConcRegionFreeingVerbose) {
 564         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 565                                "forced to look at the secondary_free_list");
 566       }
 567       res = new_region_try_secondary_free_list(is_old);
 568       if (res != NULL) {
 569         return res;
 570       }
 571     }
 572   }
 573 
 574   res = _hrm.allocate_free_region(is_old);
 575 
 576   if (res == NULL) {
 577     if (G1ConcRegionFreeingVerbose) {
 578       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 579                              "res == NULL, trying the secondary_free_list");
 580     }
 581     res = new_region_try_secondary_free_list(is_old);
 582   }
 583   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 584     // Currently, only attempts to allocate GC alloc regions set
 585     // do_expand to true. So, we should only reach here during a
 586     // safepoint. If this assumption changes we might have to
 587     // reconsider the use of _expand_heap_after_alloc_failure.
 588     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 589 
 590     ergo_verbose1(ErgoHeapSizing,
 591                   "attempt heap expansion",
 592                   ergo_format_reason("region allocation request failed")
 593                   ergo_format_byte("allocation request"),
 594                   word_size * HeapWordSize);
 595     if (expand(word_size * HeapWordSize)) {
 596       // Given that expand() succeeded in expanding the heap, and we
 597       // always expand the heap by an amount aligned to the heap
 598       // region size, the free list should in theory not be empty.
 599       // In either case allocate_free_region() will check for NULL.
 600       res = _hrm.allocate_free_region(is_old);
 601     } else {
 602       _expand_heap_after_alloc_failure = false;
 603     }
 604   }
 605   return res;
 606 }
 607 
 608 HeapWord*
 609 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 610                                                            uint num_regions,
 611                                                            size_t word_size) {
 612   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 613   assert(isHumongous(word_size), "word_size should be humongous");
 614   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 615 
 616   // Index of last region in the series + 1.
 617   uint last = first + num_regions;
 618 
 619   // We need to initialize the region(s) we just discovered. This is
 620   // a bit tricky given that it can happen concurrently with
 621   // refinement threads refining cards on these regions and
 622   // potentially wanting to refine the BOT as they are scanning
 623   // those cards (this can happen shortly after a cleanup; see CR
 624   // 6991377). So we have to set up the region(s) carefully and in
 625   // a specific order.
 626 
 627   // The word size sum of all the regions we will allocate.
 628   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 629   assert(word_size <= word_size_sum, "sanity");
 630 
 631   // This will be the "starts humongous" region.
 632   HeapRegion* first_hr = region_at(first);


 730   // match new_top.
 731   assert(hr == NULL ||
 732          (hr->end() == new_end && hr->top() == new_top), "sanity");
 733   check_bitmaps("Humongous Region Allocation", first_hr);
 734 
 735   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 736   _summary_bytes_used += first_hr->used();
 737   _humongous_set.add(first_hr);
 738 
 739   return new_obj;
 740 }
 741 
 742 // If could fit into free regions w/o expansion, try.
 743 // Otherwise, if can expand, do so.
 744 // Otherwise, if using ex regions might help, try with ex given back.
 745 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
 746   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 747 
 748   verify_region_sets_optional();
 749 
 750   uint first = G1_NO_HRM_INDEX;
 751   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 752 
 753   if (obj_regions == 1) {
 754     // Only one region to allocate, try to use a fast path by directly allocating
 755     // from the free lists. Do not try to expand here, we will potentially do that
 756     // later.
 757     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 758     if (hr != NULL) {
 759       first = hr->hrm_index();
 760     }
 761   } else {
 762     // We can't allocate humongous regions spanning more than one region while
 763     // cleanupComplete() is running, since some of the regions we find to be
 764     // empty might not yet be added to the free list. It is not straightforward
 765     // to know in which list they are on so that we can remove them. We only
 766     // need to do this if we need to allocate more than one region to satisfy the
 767     // current humongous allocation request. If we are only allocating one region
 768     // we use the one-region region allocation code (see above), that already
 769     // potentially waits for regions from the secondary free list.
 770     wait_while_free_regions_coming();
 771     append_secondary_free_list_if_not_empty_with_lock();
 772 
 773     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 774     // are lucky enough to find some.
 775     first = _hrm.find_contiguous_only_empty(obj_regions);
 776     if (first != G1_NO_HRM_INDEX) {
 777       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 778     }
 779   }
 780 
 781   if (first == G1_NO_HRM_INDEX) {
 782     // Policy: We could not find enough regions for the humongous object in the
 783     // free list. Look through the heap to find a mix of free and uncommitted regions.
 784     // If so, try expansion.
 785     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 786     if (first != G1_NO_HRM_INDEX) {
 787       // We found something. Make sure these regions are committed, i.e. expand
 788       // the heap. Alternatively we could do a defragmentation GC.
 789       ergo_verbose1(ErgoHeapSizing,
 790                     "attempt heap expansion",
 791                     ergo_format_reason("humongous allocation request failed")
 792                     ergo_format_byte("allocation request"),
 793                     word_size * HeapWordSize);
 794 
 795       _hrm.expand_at(first, obj_regions);
 796       g1_policy()->record_new_heap_size(num_regions());
 797 
 798 #ifdef ASSERT
 799       for (uint i = first; i < first + obj_regions; ++i) {
 800         HeapRegion* hr = region_at(i);
 801         assert(hr->is_empty(), "sanity");
 802         assert(is_on_master_free_list(hr), "sanity");
 803       }
 804 #endif
 805       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 806     } else {
 807       // Policy: Potentially trigger a defragmentation GC.
 808     }
 809   }
 810 
 811   HeapWord* result = NULL;
 812   if (first != G1_NO_HRM_INDEX) {
 813     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
 814     assert(result != NULL, "it should always return a valid result");
 815 
 816     // A successful humongous object allocation changes the used space
 817     // information of the old generation so we need to recalculate the
 818     // sizes and update the jstat counters here.
 819     g1mm()->update_sizes();
 820   }
 821 
 822   verify_region_sets_optional();
 823 
 824   return result;
 825 }
 826 
 827 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 828   assert_heap_not_locked_and_not_at_safepoint();
 829   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 830 
 831   unsigned int dummy_gc_count_before;
 832   int dummy_gclocker_retry_count = 0;


1227         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1228       } else if (hr->startsHumongous()) {
1229         if (hr->region_num() == 1) {
1230           // single humongous region
1231           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1232         } else {
1233           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1234         }
1235       } else {
1236         assert(hr->continuesHumongous(), "only way to get here");
1237         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1238       }
1239     }
1240     return false;
1241   }
1242 
1243   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1244     : _hr_printer(hr_printer) { }
1245 };
1246 
1247 void G1CollectedHeap::print_hrm_post_compaction() {
1248   PostCompactionPrinterClosure cl(hr_printer());
1249   heap_region_iterate(&cl);
1250 }
1251 
1252 bool G1CollectedHeap::do_collection(bool explicit_gc,
1253                                     bool clear_all_soft_refs,
1254                                     size_t word_size) {
1255   assert_at_safepoint(true /* should_be_vm_thread */);
1256 
1257   if (GC_locker::check_active_before_gc()) {
1258     return false;
1259   }
1260 
1261   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1262   gc_timer->register_gc_start();
1263 
1264   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1265   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1266 
1267   SvcGCMarker sgcm(SvcGCMarker::FULL);


1396       // re-enable reference discovery for the CM ref processor.
1397       // That will be done at the start of the next marking cycle.
1398       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1399       ref_processor_cm()->verify_no_references_recorded();
1400 
1401       reset_gc_time_stamp();
1402       // Since everything potentially moved, we will clear all remembered
1403       // sets, and clear all cards.  Later we will rebuild remembered
1404       // sets. We will also reset the GC time stamps of the regions.
1405       clear_rsets_post_compaction();
1406       check_gc_time_stamps();
1407 
1408       // Resize the heap if necessary.
1409       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1410 
1411       if (_hr_printer.is_active()) {
1412         // We should do this after we potentially resize the heap so
1413         // that all the COMMIT / UNCOMMIT events are generated before
1414         // the end GC event.
1415 
1416         print_hrm_post_compaction();
1417         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1418       }
1419 
1420       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1421       if (hot_card_cache->use_cache()) {
1422         hot_card_cache->reset_card_counts();
1423         hot_card_cache->reset_hot_cache();
1424       }
1425 
1426       // Rebuild remembered sets of all regions.
1427       if (G1CollectedHeap::use_parallel_gc_threads()) {
1428         uint n_workers =
1429           AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1430                                                   workers()->active_workers(),
1431                                                   Threads::number_of_non_daemon_threads());
1432         assert(UseDynamicNumberOfGCThreads ||
1433                n_workers == workers()->total_workers(),
1434                "If not dynamic should be using all the  workers");
1435         workers()->set_active_workers(n_workers);
1436         // Set parallel threads in the heap (_n_par_threads) only


1469 
1470 #ifdef TRACESPINNING
1471       ParallelTaskTerminator::print_termination_counts();
1472 #endif
1473 
1474       // Discard all rset updates
1475       JavaThread::dirty_card_queue_set().abandon_logs();
1476       assert(!G1DeferredRSUpdate
1477              || (G1DeferredRSUpdate &&
1478                 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1479 
1480       _young_list->reset_sampled_info();
1481       // At this point there should be no regions in the
1482       // entire heap tagged as young.
1483       assert(check_young_list_empty(true /* check_heap */),
1484              "young list should be empty at this point");
1485 
1486       // Update the number of full collections that have been completed.
1487       increment_old_marking_cycles_completed(false /* concurrent */);
1488 
1489       _hrm.verify_optional();
1490       verify_region_sets_optional();
1491 
1492       verify_after_gc();
1493 
1494       // Clear the previous marking bitmap, if needed for bitmap verification.
1495       // Note we cannot do this when we clear the next marking bitmap in
1496       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1497       // objects marked during a full GC against the previous bitmap.
1498       // But we need to clear it before calling check_bitmaps below since
1499       // the full GC has compacted objects and updated TAMS but not updated
1500       // the prev bitmap.
1501       if (G1VerifyBitmaps) {
1502         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1503       }
1504       check_bitmaps("Full GC End");
1505 
1506       // Start a new incremental collection set for the next pause
1507       assert(g1_policy()->collection_set() == NULL, "must be");
1508       g1_policy()->start_incremental_cset_building();
1509 


1713   return NULL;
1714 }
1715 
1716 // Attempting to expand the heap sufficiently
1717 // to support an allocation of the given "word_size".  If
1718 // successful, perform the allocation and return the address of the
1719 // allocated block, or else "NULL".
1720 
1721 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1722   assert_at_safepoint(true /* should_be_vm_thread */);
1723 
1724   verify_region_sets_optional();
1725 
1726   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1727   ergo_verbose1(ErgoHeapSizing,
1728                 "attempt heap expansion",
1729                 ergo_format_reason("allocation request failed")
1730                 ergo_format_byte("allocation request"),
1731                 word_size * HeapWordSize);
1732   if (expand(expand_bytes)) {
1733     _hrm.verify_optional();
1734     verify_region_sets_optional();
1735     return attempt_allocation_at_safepoint(word_size,
1736                                  false /* expect_null_mutator_alloc_region */);
1737   }
1738   return NULL;
1739 }
1740 
1741 bool G1CollectedHeap::expand(size_t expand_bytes) {
1742   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1743   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1744                                        HeapRegion::GrainBytes);
1745   ergo_verbose2(ErgoHeapSizing,
1746                 "expand the heap",
1747                 ergo_format_byte("requested expansion amount")
1748                 ergo_format_byte("attempted expansion amount"),
1749                 expand_bytes, aligned_expand_bytes);
1750 
1751   if (is_maximal_no_gc()) {
1752     ergo_verbose0(ErgoHeapSizing,
1753                       "did not expand the heap",
1754                       ergo_format_reason("heap already fully expanded"));
1755     return false;
1756   }
1757 
1758   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1759   assert(regions_to_expand > 0, "Must expand by at least one region");
1760 
1761   uint expanded_by = _hrm.expand_by(regions_to_expand);
1762 
1763   if (expanded_by > 0) {
1764     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1765     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1766     g1_policy()->record_new_heap_size(num_regions());
1767   } else {
1768     ergo_verbose0(ErgoHeapSizing,
1769                   "did not expand the heap",
1770                   ergo_format_reason("heap expansion operation failed"));
1771     // The expansion of the virtual storage space was unsuccessful.
1772     // Let's see if it was because we ran out of swap.
1773     if (G1ExitOnExpansionFailure &&
1774         _hrm.available() >= regions_to_expand) {
1775       // We had head room...
1776       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1777     }
1778   }
1779   return regions_to_expand > 0;
1780 }
1781 
1782 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1783   size_t aligned_shrink_bytes =
1784     ReservedSpace::page_align_size_down(shrink_bytes);
1785   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1786                                          HeapRegion::GrainBytes);
1787   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1788 
1789   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1790   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1791 
1792   ergo_verbose3(ErgoHeapSizing,
1793                 "shrink the heap",
1794                 ergo_format_byte("requested shrinking amount")
1795                 ergo_format_byte("aligned shrinking amount")
1796                 ergo_format_byte("attempted shrinking amount"),
1797                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1798   if (num_regions_removed > 0) {
1799     g1_policy()->record_new_heap_size(num_regions());
1800   } else {
1801     ergo_verbose0(ErgoHeapSizing,
1802                   "did not shrink the heap",
1803                   ergo_format_reason("heap shrinking operation failed"));
1804   }
1805 }
1806 
1807 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1808   verify_region_sets_optional();
1809 
1810   // We should only reach here at the end of a Full GC which means we
1811   // should not not be holding to any GC alloc regions. The method
1812   // below will make sure of that and do any remaining clean up.
1813   abandon_gc_alloc_regions();
1814 
1815   // Instead of tearing down / rebuilding the free lists here, we
1816   // could instead use the remove_all_pending() method on free_list to
1817   // remove only the ones that we need to remove.
1818   tear_down_region_sets(true /* free_list_only */);
1819   shrink_helper(shrink_bytes);
1820   rebuild_region_sets(true /* free_list_only */);
1821 
1822   _hrm.verify_optional();
1823   verify_region_sets_optional();
1824 }
1825 
1826 // Public methods.
1827 
1828 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1829 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1830 #endif // _MSC_VER
1831 
1832 
1833 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1834   SharedHeap(policy_),
1835   _g1_policy(policy_),
1836   _dirty_card_queue_set(false),
1837   _into_cset_dirty_card_queue_set(false),
1838   _is_alive_closure_cm(this),
1839   _is_alive_closure_stw(this),
1840   _ref_processor_cm(NULL),
1841   _ref_processor_stw(NULL),
1842   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),


2011 
2012   // Reserve space for prev and next bitmap.
2013   size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
2014 
2015   ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2016   G1RegionToSpaceMapper* prev_bitmap_storage =
2017     G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
2018                                          os::vm_page_size(),
2019                                          HeapRegion::GrainBytes,
2020                                          CMBitMap::mark_distance(),
2021                                          mtGC);
2022 
2023   ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2024   G1RegionToSpaceMapper* next_bitmap_storage =
2025     G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
2026                                          os::vm_page_size(),
2027                                          HeapRegion::GrainBytes,
2028                                          CMBitMap::mark_distance(),
2029                                          mtGC);
2030 
2031   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
2032   g1_barrier_set()->initialize(cardtable_storage);
2033    // Do later initialization work for concurrent refinement.
2034   _cg1r->init(card_counts_storage);
2035 
2036   // 6843694 - ensure that the maximum region index can fit
2037   // in the remembered set structures.
2038   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2039   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2040 
2041   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2042   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2043   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2044             "too many cards per region");
2045 
2046   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2047 
2048   _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2049 
2050   _g1h = this;
2051 
2052   _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
2053   _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
2054 
2055   // Create the ConcurrentMark data structure and thread.
2056   // (Must do this late, so that "max_regions" is defined.)
2057   _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2058   if (_cm == NULL || !_cm->completed_initialization()) {
2059     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2060     return JNI_ENOMEM;
2061   }
2062   _cmThread = _cm->cmThread();
2063 
2064   // Initialize the from_card cache structure of HeapRegionRemSet.
2065   HeapRegionRemSet::init_heap(max_regions());
2066 
2067   // Now expand into the initial heap size.
2068   if (!expand(init_byte_size)) {
2069     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2070     return JNI_ENOMEM;
2071   }
2072 
2073   // Perform any initialization actions delegated to the policy.


2094                                       Shared_DirtyCardQ_lock,
2095                                       &JavaThread::dirty_card_queue_set());
2096   }
2097 
2098   // Initialize the card queue set used to hold cards containing
2099   // references into the collection set.
2100   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2101                                              DirtyCardQ_CBL_mon,
2102                                              DirtyCardQ_FL_lock,
2103                                              -1, // never trigger processing
2104                                              -1, // no limit on length
2105                                              Shared_DirtyCardQ_lock,
2106                                              &JavaThread::dirty_card_queue_set());
2107 
2108   // In case we're keeping closure specialization stats, initialize those
2109   // counts and that mechanism.
2110   SpecializationStats::clear();
2111 
2112   // Here we allocate the dummy HeapRegion that is required by the
2113   // G1AllocRegion class.
2114   HeapRegion* dummy_region = _hrm.get_dummy_region();
2115 
2116   // We'll re-use the same region whether the alloc region will
2117   // require BOT updates or not and, if it doesn't, then a non-young
2118   // region will complain that it cannot support allocations without
2119   // BOT updates. So we'll tag the dummy region as young to avoid that.
2120   dummy_region->set_young();
2121   // Make sure it's full.
2122   dummy_region->set_top(dummy_region->end());
2123   G1AllocRegion::setup(this, dummy_region);
2124 
2125   init_mutator_alloc_region();
2126 
2127   // Do create of the monitoring and management support so that
2128   // values in the heap have been properly initialized.
2129   _g1mm = new G1MonitoringSupport(this);
2130 
2131   G1StringDedup::initialize();
2132 
2133   return JNI_OK;
2134 }


2211 
2212   // STW ref processor
2213   _ref_processor_stw =
2214     new ReferenceProcessor(mr,    // span
2215                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2216                                 // mt processing
2217                            MAX2((int)ParallelGCThreads, 1),
2218                                 // degree of mt processing
2219                            (ParallelGCThreads > 1),
2220                                 // mt discovery
2221                            MAX2((int)ParallelGCThreads, 1),
2222                                 // degree of mt discovery
2223                            true,
2224                                 // Reference discovery is atomic
2225                            &_is_alive_closure_stw);
2226                                 // is alive closure
2227                                 // (for efficiency/performance)
2228 }
2229 
2230 size_t G1CollectedHeap::capacity() const {
2231   return _hrm.length() * HeapRegion::GrainBytes;
2232 }
2233 
2234 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2235   assert(!hr->continuesHumongous(), "pre-condition");
2236   hr->reset_gc_time_stamp();
2237   if (hr->startsHumongous()) {
2238     uint first_index = hr->hrm_index() + 1;
2239     uint last_index = hr->last_hc_index();
2240     for (uint i = first_index; i < last_index; i += 1) {
2241       HeapRegion* chr = region_at(i);
2242       assert(chr->continuesHumongous(), "sanity");
2243       chr->reset_gc_time_stamp();
2244     }
2245   }
2246 }
2247 
2248 #ifndef PRODUCT
2249 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2250 private:
2251   unsigned _gc_time_stamp;
2252   bool _failures;
2253 
2254 public:
2255   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2256     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2257 
2258   virtual bool doHeapRegion(HeapRegion* hr) {


2516           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2517 
2518         // Schedule a standard evacuation pause. We're setting word_size
2519         // to 0 which means that we are not requesting a post-GC allocation.
2520         VM_G1IncCollectionPause op(gc_count_before,
2521                                    0,     /* word_size */
2522                                    false, /* should_initiate_conc_mark */
2523                                    g1_policy()->max_pause_time_ms(),
2524                                    cause);
2525         VMThread::execute(&op);
2526       } else {
2527         // Schedule a Full GC.
2528         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2529         VMThread::execute(&op);
2530       }
2531     }
2532   } while (retry_gc);
2533 }
2534 
2535 bool G1CollectedHeap::is_in(const void* p) const {
2536   if (_hrm.reserved().contains(p)) {
2537     // Given that we know that p is in the reserved space,
2538     // heap_region_containing_raw() should successfully
2539     // return the containing region.
2540     HeapRegion* hr = heap_region_containing_raw(p);
2541     return hr->is_in(p);
2542   } else {
2543     return false;
2544   }
2545 }
2546 
2547 #ifdef ASSERT
2548 bool G1CollectedHeap::is_in_exact(const void* p) const {
2549   bool contains = reserved_region().contains(p);
2550   bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2551   if (contains && available) {
2552     return true;
2553   } else {
2554     return false;
2555   }
2556 }
2557 #endif
2558 
2559 // Iteration functions.
2560 
2561 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2562 
2563 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2564   ExtendedOopClosure* _cl;
2565 public:
2566   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2567   bool doHeapRegion(HeapRegion* r) {
2568     if (!r->continuesHumongous()) {
2569       r->oop_iterate(_cl);
2570     }


2597 }
2598 
2599 // Calls a SpaceClosure on a HeapRegion.
2600 
2601 class SpaceClosureRegionClosure: public HeapRegionClosure {
2602   SpaceClosure* _cl;
2603 public:
2604   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2605   bool doHeapRegion(HeapRegion* r) {
2606     _cl->do_space(r);
2607     return false;
2608   }
2609 };
2610 
2611 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2612   SpaceClosureRegionClosure blk(cl);
2613   heap_region_iterate(&blk);
2614 }
2615 
2616 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2617   _hrm.iterate(cl);
2618 }
2619 
2620 void
2621 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2622                                                  uint worker_id,
2623                                                  uint num_workers,
2624                                                  jint claim_value) const {
2625   _hrm.par_iterate(cl, worker_id, num_workers, claim_value);
2626 }
2627 
2628 class ResetClaimValuesClosure: public HeapRegionClosure {
2629 public:
2630   bool doHeapRegion(HeapRegion* r) {
2631     r->set_claim_value(HeapRegion::InitialClaimValue);
2632     return false;
2633   }
2634 };
2635 
2636 void G1CollectedHeap::reset_heap_region_claim_values() {
2637   ResetClaimValuesClosure blk;
2638   heap_region_iterate(&blk);
2639 }
2640 
2641 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2642   ResetClaimValuesClosure blk;
2643   collection_set_iterate(&blk);
2644 }
2645 


2825   while (cur != NULL) {
2826     HeapRegion* next = cur->next_in_collection_set();
2827     if (cl->doHeapRegion(cur) && false) {
2828       cl->incomplete();
2829       return;
2830     }
2831     cur = next;
2832   }
2833   cur = g1_policy()->collection_set();
2834   while (cur != r) {
2835     HeapRegion* next = cur->next_in_collection_set();
2836     if (cl->doHeapRegion(cur) && false) {
2837       cl->incomplete();
2838       return;
2839     }
2840     cur = next;
2841   }
2842 }
2843 
2844 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2845   HeapRegion* result = _hrm.next_region_in_heap(from);
2846   while (result != NULL && result->isHumongous()) {
2847     result = _hrm.next_region_in_heap(result);
2848   }
2849   return result;
2850 }
2851 
2852 Space* G1CollectedHeap::space_containing(const void* addr) const {
2853   return heap_region_containing(addr);
2854 }
2855 
2856 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2857   Space* sp = space_containing(addr);
2858   return sp->block_start(addr);
2859 }
2860 
2861 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2862   Space* sp = space_containing(addr);
2863   return sp->block_size(addr);
2864 }
2865 
2866 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2867   Space* sp = space_containing(addr);


2887 }
2888 
2889 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2890   // Return the remaining space in the cur alloc region, but not less than
2891   // the min TLAB size.
2892 
2893   // Also, this value can be at most the humongous object threshold,
2894   // since we can't allow tlabs to grow big enough to accommodate
2895   // humongous objects.
2896 
2897   HeapRegion* hr = _mutator_alloc_region.get();
2898   size_t max_tlab = max_tlab_size() * wordSize;
2899   if (hr == NULL) {
2900     return max_tlab;
2901   } else {
2902     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
2903   }
2904 }
2905 
2906 size_t G1CollectedHeap::max_capacity() const {
2907   return _hrm.reserved().byte_size();
2908 }
2909 
2910 jlong G1CollectedHeap::millis_since_last_gc() {
2911   // assert(false, "NYI");
2912   return 0;
2913 }
2914 
2915 void G1CollectedHeap::prepare_for_verify() {
2916   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2917     ensure_parsability(false);
2918   }
2919   g1_rem_set()->prepare_for_verify();
2920 }
2921 
2922 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2923                                               VerifyOption vo) {
2924   switch (vo) {
2925   case VerifyOption_G1UsePrevMarking:
2926     return hr->obj_allocated_since_prev_marking(obj);
2927   case VerifyOption_G1UseNextMarking:


3416   }
3417   return false; // keep some compilers happy
3418 }
3419 
3420 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3421                                        const VerifyOption vo) const {
3422   switch (vo) {
3423   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3424   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3425   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
3426   default:                            ShouldNotReachHere();
3427   }
3428   return false; // keep some compilers happy
3429 }
3430 
3431 void G1CollectedHeap::print_on(outputStream* st) const {
3432   st->print(" %-20s", "garbage-first heap");
3433   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3434             capacity()/K, used_unlocked()/K);
3435   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3436             _hrm.reserved().start(),
3437             _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
3438             _hrm.reserved().end());
3439   st->cr();
3440   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3441   uint young_regions = _young_list->length();
3442   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3443             (size_t) young_regions * HeapRegion::GrainBytes / K);
3444   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3445   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3446             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3447   st->cr();
3448   MetaspaceAux::print_on(st);
3449 }
3450 
3451 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3452   print_on(st);
3453 
3454   // Print the per-region information.
3455   st->cr();
3456   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3457                "HS=humongous(starts), HC=humongous(continues), "
3458                "CS=collection set, F=free, TS=gc time stamp, "


3661 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3662   HeapRegion* region = region_at(index);
3663   assert(region->startsHumongous(), "Must start a humongous object");
3664   return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3665 }
3666 
3667 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3668  private:
3669   size_t _total_humongous;
3670   size_t _candidate_humongous;
3671  public:
3672   RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
3673   }
3674 
3675   virtual bool doHeapRegion(HeapRegion* r) {
3676     if (!r->startsHumongous()) {
3677       return false;
3678     }
3679     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3680 
3681     uint region_idx = r->hrm_index();
3682     bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
3683     // Is_candidate already filters out humongous regions with some remembered set.
3684     // This will not lead to humongous object that we mistakenly keep alive because
3685     // during young collection the remembered sets will only be added to.
3686     if (is_candidate) {
3687       g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3688       _candidate_humongous++;
3689     }
3690     _total_humongous++;
3691 
3692     return false;
3693   }
3694 
3695   size_t total_humongous() const { return _total_humongous; }
3696   size_t candidate_humongous() const { return _candidate_humongous; }
3697 };
3698 
3699 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3700   if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
3701     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);


4183       // that all the COMMIT events are generated before the end GC
4184       // event, and after we retire the GC alloc regions so that all
4185       // RETIRE events are generated before the end GC event.
4186       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4187 
4188 #ifdef TRACESPINNING
4189       ParallelTaskTerminator::print_termination_counts();
4190 #endif
4191 
4192       gc_epilogue(false);
4193     }
4194 
4195     // Print the remainder of the GC log output.
4196     log_gc_footer(os::elapsedTime() - pause_start_sec);
4197 
4198     // It is not yet to safe to tell the concurrent mark to
4199     // start as we have some optional output below. We don't want the
4200     // output from the concurrent mark thread interfering with this
4201     // logging output either.
4202 
4203     _hrm.verify_optional();
4204     verify_region_sets_optional();
4205 
4206     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4207     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4208 
4209     print_heap_after_gc();
4210     trace_heap_after_gc(_gc_tracer_stw);
4211 
4212     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4213     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4214     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4215     // before any GC notifications are raised.
4216     g1mm()->update_sizes();
4217 
4218     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4219     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4220     _gc_timer_stw->register_gc_end();
4221     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4222   }
4223   // It should now be safe to tell the concurrent mark thread to start


6002   // reference processor's discovered lists. We need to do
6003   // this after the card table is cleaned (and verified) as
6004   // the act of enqueueing entries on to the pending list
6005   // will log these updates (and dirty their associated
6006   // cards). We need these updates logged to update any
6007   // RSets.
6008   enqueue_discovered_references(n_workers);
6009 
6010   if (G1DeferredRSUpdate) {
6011     redirty_logged_cards();
6012   }
6013   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
6014 }
6015 
6016 void G1CollectedHeap::free_region(HeapRegion* hr,
6017                                   FreeRegionList* free_list,
6018                                   bool par,
6019                                   bool locked) {
6020   assert(!hr->isHumongous(), "this is only for non-humongous regions");
6021   assert(!hr->is_empty(), "the region should not be empty");
6022   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
6023   assert(free_list != NULL, "pre-condition");
6024 
6025   if (G1VerifyBitmaps) {
6026     MemRegion mr(hr->bottom(), hr->end());
6027     concurrent_mark()->clearRangePrevBitmap(mr);
6028   }
6029 
6030   // Clear the card counts for this region.
6031   // Note: we only need to do this if the region is not young
6032   // (since we don't refine cards in young regions).
6033   if (!hr->is_young()) {
6034     _cg1r->hot_card_cache()->reset_card_counts(hr);
6035   }
6036   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
6037   free_list->add_ordered(hr);
6038 }
6039 
6040 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
6041                                      FreeRegionList* free_list,
6042                                      bool par) {
6043   assert(hr->startsHumongous(), "this is only for starts humongous regions");
6044   assert(free_list != NULL, "pre-condition");
6045 
6046   size_t hr_capacity = hr->capacity();
6047   // We need to read this before we make the region non-humongous,
6048   // otherwise the information will be gone.
6049   uint last_index = hr->last_hc_index();
6050   hr->set_notHumongous();
6051   free_region(hr, free_list, par);
6052 
6053   uint i = hr->hrm_index() + 1;
6054   while (i < last_index) {
6055     HeapRegion* curr_hr = region_at(i);
6056     assert(curr_hr->continuesHumongous(), "invariant");
6057     curr_hr->set_notHumongous();
6058     free_region(curr_hr, free_list, par);
6059     i += 1;
6060   }
6061 }
6062 
6063 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
6064                                        const HeapRegionSetCount& humongous_regions_removed) {
6065   if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
6066     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
6067     _old_set.bulk_remove(old_regions_removed);
6068     _humongous_set.bulk_remove(humongous_regions_removed);
6069   }
6070 
6071 }
6072 
6073 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
6074   assert(list != NULL, "list can't be null");
6075   if (!list->is_empty()) {
6076     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
6077     _hrm.insert_list_into_free_list(list);
6078   }
6079 }
6080 
6081 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
6082   assert(_summary_bytes_used >= bytes,
6083          err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
6084                   _summary_bytes_used, bytes));
6085   _summary_bytes_used -= bytes;
6086 }
6087 
6088 class G1ParCleanupCTTask : public AbstractGangTask {
6089   G1SATBCardTableModRefBS* _ct_bs;
6090   G1CollectedHeap* _g1h;
6091   HeapRegion* volatile _su_head;
6092 public:
6093   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
6094                      G1CollectedHeap* g1h) :
6095     AbstractGangTask("G1 Par Cleanup CT Task"),
6096     _ct_bs(ct_bs), _g1h(g1h) { }
6097 


6426     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6427     // until the end of a concurrent mark.
6428     //
6429     // It is not required to check whether the object has been found dead by marking
6430     // or not, in fact it would prevent reclamation within a concurrent cycle, as
6431     // all objects allocated during that time are considered live.
6432     // SATB marking is even more conservative than the remembered set.
6433     // So if at this point in the collection there is no remembered set entry,
6434     // nobody has a reference to it.
6435     // At the start of collection we flush all refinement logs, and remembered sets
6436     // are completely up-to-date wrt to references to the humongous object.
6437     //
6438     // Other implementation considerations:
6439     // - never consider object arrays: while they are a valid target, they have not
6440     // been observed to be used as temporary objects.
6441     // - they would also pose considerable effort for cleaning up the the remembered
6442     // sets.
6443     // While this cleanup is not strictly necessary to be done (or done instantly),
6444     // given that their occurrence is very low, this saves us this additional
6445     // complexity.
6446     uint region_idx = r->hrm_index();
6447     if (g1h->humongous_is_live(region_idx) ||
6448         g1h->humongous_region_is_always_live(region_idx)) {
6449 
6450       if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6451         gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6452                                r->isHumongous(),
6453                                region_idx,
6454                                r->rem_set()->occupied(),
6455                                r->rem_set()->strong_code_roots_list_length(),
6456                                next_bitmap->isMarked(r->bottom()),
6457                                g1h->humongous_is_live(region_idx),
6458                                obj->is_objArray()
6459                               );
6460       }
6461 
6462       return false;
6463     }
6464 
6465     guarantee(!obj->is_objArray(),
6466               err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",


6665     return false;
6666   }
6667 
6668   ~TearDownRegionSetsClosure() {
6669     assert(_old_set->is_empty(), "post-condition");
6670   }
6671 };
6672 
6673 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6674   assert_at_safepoint(true /* should_be_vm_thread */);
6675 
6676   if (!free_list_only) {
6677     TearDownRegionSetsClosure cl(&_old_set);
6678     heap_region_iterate(&cl);
6679 
6680     // Note that emptying the _young_list is postponed and instead done as
6681     // the first step when rebuilding the regions sets again. The reason for
6682     // this is that during a full GC string deduplication needs to know if
6683     // a collected region was young or old when the full GC was initiated.
6684   }
6685   _hrm.remove_all_free_regions();
6686 }
6687 
6688 class RebuildRegionSetsClosure : public HeapRegionClosure {
6689 private:
6690   bool            _free_list_only;
6691   HeapRegionSet*   _old_set;
6692   HeapRegionManager*   _hrm;
6693   size_t          _total_used;
6694 
6695 public:
6696   RebuildRegionSetsClosure(bool free_list_only,
6697                            HeapRegionSet* old_set, HeapRegionManager* hrm) :
6698     _free_list_only(free_list_only),
6699     _old_set(old_set), _hrm(hrm), _total_used(0) {
6700     assert(_hrm->num_free_regions() == 0, "pre-condition");
6701     if (!free_list_only) {
6702       assert(_old_set->is_empty(), "pre-condition");
6703     }
6704   }
6705 
6706   bool doHeapRegion(HeapRegion* r) {
6707     if (r->continuesHumongous()) {
6708       return false;
6709     }
6710 
6711     if (r->is_empty()) {
6712       // Add free regions to the free list
6713       _hrm->insert_into_free_list(r);
6714     } else if (!_free_list_only) {
6715       assert(!r->is_young(), "we should not come across young regions");
6716 
6717       if (r->isHumongous()) {
6718         // We ignore humongous regions, we left the humongous set unchanged
6719       } else {
6720         // The rest should be old, add them to the old set
6721         _old_set->add(r);
6722       }
6723       _total_used += r->used();
6724     }
6725 
6726     return false;
6727   }
6728 
6729   size_t total_used() {
6730     return _total_used;
6731   }
6732 };
6733 
6734 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6735   assert_at_safepoint(true /* should_be_vm_thread */);
6736 
6737   if (!free_list_only) {
6738     _young_list->empty_list();
6739   }
6740 
6741   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6742   heap_region_iterate(&cl);
6743 
6744   if (!free_list_only) {
6745     _summary_bytes_used = cl.total_used();
6746   }
6747   assert(_summary_bytes_used == recalculate_used(),
6748          err_msg("inconsistent _summary_bytes_used, "
6749                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6750                  _summary_bytes_used, recalculate_used()));
6751 }
6752 
6753 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6754   _refine_cte_cl->set_concurrent(concurrent);
6755 }
6756 
6757 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6758   HeapRegion* hr = heap_region_containing(p);
6759   return hr->is_in(p);
6760 }
6761 


6911                                MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
6912 
6913       // Skip allocation if there is not enough space to allocate even the smallest
6914       // possible object. In this case this region will not be retained, so the
6915       // original problem cannot occur.
6916       if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
6917         HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
6918         CollectedHeap::fill_with_object(dummy, to_allocate_words);
6919       }
6920     }
6921   }
6922   return G1AllocRegion::release();
6923 }
6924 
6925 // Heap region set verification
6926 
6927 class VerifyRegionListsClosure : public HeapRegionClosure {
6928 private:
6929   HeapRegionSet*   _old_set;
6930   HeapRegionSet*   _humongous_set;
6931   HeapRegionManager*   _hrm;
6932 
6933 public:
6934   HeapRegionSetCount _old_count;
6935   HeapRegionSetCount _humongous_count;
6936   HeapRegionSetCount _free_count;
6937 
6938   VerifyRegionListsClosure(HeapRegionSet* old_set,
6939                            HeapRegionSet* humongous_set,
6940                            HeapRegionManager* hrm) :
6941     _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6942     _old_count(), _humongous_count(), _free_count(){ }
6943 
6944   bool doHeapRegion(HeapRegion* hr) {
6945     if (hr->continuesHumongous()) {
6946       return false;
6947     }
6948 
6949     if (hr->is_young()) {
6950       // TODO
6951     } else if (hr->startsHumongous()) {
6952       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6953       _humongous_count.increment(1u, hr->capacity());
6954     } else if (hr->is_empty()) {
6955       assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6956       _free_count.increment(1u, hr->capacity());
6957     } else {
6958       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6959       _old_count.increment(1u, hr->capacity());
6960     }
6961     return false;
6962   }
6963 
6964   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6965     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6966     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6967         old_set->total_capacity_bytes(), _old_count.capacity()));
6968 
6969     guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6970     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6971         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6972 
6973     guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6974     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6975         free_list->total_capacity_bytes(), _free_count.capacity()));
6976   }
6977 };
6978 
6979 void G1CollectedHeap::verify_region_sets() {
6980   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6981 
6982   // First, check the explicit lists.
6983   _hrm.verify();
6984   {
6985     // Given that a concurrent operation might be adding regions to
6986     // the secondary free list we have to take the lock before
6987     // verifying it.
6988     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6989     _secondary_free_list.verify_list();
6990   }
6991 
6992   // If a concurrent region freeing operation is in progress it will
6993   // be difficult to correctly attributed any free regions we come
6994   // across to the correct free list given that they might belong to
6995   // one of several (free_list, secondary_free_list, any local lists,
6996   // etc.). So, if that's the case we will skip the rest of the
6997   // verification operation. Alternatively, waiting for the concurrent
6998   // operation to complete will have a non-trivial effect on the GC's
6999   // operation (no concurrent operation will last longer than the
7000   // interval between two calls to verification) and it might hide
7001   // any issues that we would like to catch during testing.
7002   if (free_regions_coming()) {
7003     return;
7004   }
7005 
7006   // Make sure we append the secondary_free_list on the free_list so
7007   // that all free regions we will come across can be safely
7008   // attributed to the free_list.
7009   append_secondary_free_list_if_not_empty_with_lock();
7010 
7011   // Finally, make sure that the region accounting in the lists is
7012   // consistent with what we see in the heap.
7013 
7014   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
7015   heap_region_iterate(&cl);
7016   cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
7017 }
7018 
7019 // Optimized nmethod scanning
7020 
7021 class RegisterNMethodOopClosure: public OopClosure {
7022   G1CollectedHeap* _g1h;
7023   nmethod* _nm;
7024 
7025   template <class T> void do_oop_work(T* p) {
7026     T heap_oop = oopDesc::load_heap_oop(p);
7027     if (!oopDesc::is_null(heap_oop)) {
7028       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
7029       HeapRegion* hr = _g1h->heap_region_containing(obj);
7030       assert(!hr->continuesHumongous(),
7031              err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
7032                      " starting at "HR_FORMAT,
7033                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
7034 
7035       // HeapRegion::add_strong_code_root() avoids adding duplicate
7036       // entries but having duplicates is  OK since we "mark" nmethods