1008 _ref_processor_cm->verify_no_references_recorded();
1009
1010 // Abandon current iterations of concurrent marking and concurrent
1011 // refinement, if any are in progress.
1012 concurrent_mark()->concurrent_cycle_abort();
1013 }
1014
1015 void G1CollectedHeap::prepare_heap_for_full_collection() {
1016 // Make sure we'll choose a new allocation region afterwards.
1017 _allocator->release_mutator_alloc_region();
1018 _allocator->abandon_gc_alloc_regions();
1019 g1_rem_set()->cleanupHRRS();
1020
1021 // We may have added regions to the current incremental collection
1022 // set between the last GC or pause and now. We need to clear the
1023 // incremental collection set and then start rebuilding it afresh
1024 // after this full GC.
1025 abandon_collection_set(collection_set());
1026
1027 tear_down_region_sets(false /* free_list_only */);
1028 }
1029
1030 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1031 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1032 assert(used() == recalculate_used(), "Should be equal");
1033 _verifier->verify_region_sets_optional();
1034 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1035 _verifier->check_bitmaps("Full GC Start");
1036 }
1037
1038 void G1CollectedHeap::prepare_heap_for_mutators() {
1039 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1040 ClassLoaderDataGraph::purge();
1041 MetaspaceUtils::verify_metrics();
1042
1043 // Prepare heap for normal collections.
1044 assert(num_free_regions() == 0, "we should not have added any free regions");
1045 rebuild_region_sets(false /* free_list_only */);
1046 abort_refinement();
1047 resize_heap_if_necessary();
1048
1049 // Rebuild the strong code root lists for each region
1050 rebuild_strong_code_roots();
1051
1052 // Purge code root memory
1053 purge_code_root_memory();
1054
1055 // Start a new incremental collection set for the next pause
1056 start_new_collection_set();
1057
1058 _allocator->init_mutator_alloc_region();
1469 "master humongous set MT safety protocol outside a safepoint");
1470 }
1471 }
1472 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1473 const char* get_description() { return "Humongous Regions"; }
1474 };
1475
1476 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1477 CollectedHeap(),
1478 _young_gen_sampling_thread(NULL),
1479 _workers(NULL),
1480 _collector_policy(collector_policy),
1481 _card_table(NULL),
1482 _soft_ref_policy(),
1483 _old_set("Old Region Set", new OldRegionSetChecker()),
1484 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1485 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1486 _bot(NULL),
1487 _listener(),
1488 _hrm(NULL),
1489 _is_hetero_heap(false),
1490 _allocator(NULL),
1491 _verifier(NULL),
1492 _summary_bytes_used(0),
1493 _archive_allocator(NULL),
1494 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1495 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1496 _expand_heap_after_alloc_failure(true),
1497 _g1mm(NULL),
1498 _humongous_reclaim_candidates(),
1499 _has_humongous_reclaim_candidates(false),
1500 _hr_printer(),
1501 _collector_state(),
1502 _old_marking_cycles_started(0),
1503 _old_marking_cycles_completed(0),
1504 _eden(),
1505 _survivor(),
1506 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1507 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1508 _g1_policy(new G1Policy(_gc_timer_stw)),
1509 _heap_sizing_policy(NULL),
1602 return JNI_ENOMEM;
1603 }
1604 return JNI_OK;
1605 }
1606
1607 jint G1CollectedHeap::initialize() {
1608 os::enable_vtime();
1609
1610 // Necessary to satisfy locking discipline assertions.
1611
1612 MutexLocker x(Heap_lock);
1613
1614 // While there are no constraints in the GC code that HeapWordSize
1615 // be any particular value, there are multiple other areas in the
1616 // system which believe this to be true (e.g. oop->object_size in some
1617 // cases incorrectly returns the size in wordSize units rather than
1618 // HeapWordSize).
1619 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1620
1621 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1622 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1623 size_t heap_alignment = collector_policy()->heap_alignment();
1624
1625 // Ensure that the sizes are properly aligned.
1626 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1627 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1628 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1629
1630 // Reserve the maximum.
1631
1632 // When compressed oops are enabled, the preferred heap base
1633 // is calculated by subtracting the requested size from the
1634 // 32Gb boundary and using the result as the base address for
1635 // heap reservation. If the requested size is not aligned to
1636 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1637 // into the ReservedHeapSpace constructor) then the actual
1638 // base of the reserved heap may end up differing from the
1639 // address that was requested (i.e. the preferred heap base).
1640 // If this happens then we could end up using a non-optimal
1641 // compressed oops mode.
1642 if (AllocateOldGenAt != NULL) {
1643 _is_hetero_heap = true;
1644 max_byte_size *= 2;
1645 }
1646
1647 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1648 heap_alignment);
1649
1650 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1651
1652 // Create the barrier set for the entire reserved region.
1653 G1CardTable* ct = new G1CardTable(reserved_region());
1654 ct->initialize();
1655 G1BarrierSet* bs = new G1BarrierSet(ct);
1656 bs->initialize();
1657 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1658 BarrierSet::set_barrier_set(bs);
1659 _card_table = ct;
1660
1661 G1BarrierSet::satb_mark_queue_set().initialize(this,
1662 SATB_Q_CBL_mon,
1663 &bs->satb_mark_queue_buffer_allocator(),
1664 G1SATBProcessCompletedThreshold,
1665 G1SATBBufferEnqueueingThresholdPercent,
1676
1677 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1678 &bs->dirty_card_queue_buffer_allocator(),
1679 -1, // never trigger processing
1680 -1, // no limit on length
1681 Shared_DirtyCardQ_lock);
1682
1683 // Create the hot card cache.
1684 _hot_card_cache = new G1HotCardCache(this);
1685
1686 // Carve out the G1 part of the heap.
1687 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1688 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1689 G1RegionToSpaceMapper* heap_storage =
1690 G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
1691 g1_rs.size(),
1692 page_size,
1693 HeapRegion::GrainBytes,
1694 1,
1695 mtJavaHeap);
1696 os::trace_page_sizes("Heap",
1697 collector_policy()->min_heap_byte_size(),
1698 max_byte_size,
1699 page_size,
1700 heap_rs.base(),
1701 heap_rs.size());
1702 heap_storage->set_mapping_changed_listener(&_listener);
1703
1704 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1705 G1RegionToSpaceMapper* bot_storage =
1706 create_aux_memory_mapper("Block Offset Table",
1707 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1708 G1BlockOffsetTable::heap_map_factor());
1709
1710 G1RegionToSpaceMapper* cardtable_storage =
1711 create_aux_memory_mapper("Card Table",
1712 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1713 G1CardTable::heap_map_factor());
1714
1715 G1RegionToSpaceMapper* card_counts_storage =
1716 create_aux_memory_mapper("Card Counts Table",
1717 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1718 G1CardCounts::heap_map_factor());
1719
1720 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1721 G1RegionToSpaceMapper* prev_bitmap_storage =
1722 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1723 G1RegionToSpaceMapper* next_bitmap_storage =
1724 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1725
1726 if (is_hetero_heap()) {
1727 _hrm = new HeapRegionManagerForHeteroHeap((uint)((max_byte_size / 2) / HeapRegion::GrainBytes /*heap size as num of regions*/));
1728 }
1729 else {
1730 _hrm = new HeapRegionManager();
1731 }
1732 _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1733 _card_table->initialize(cardtable_storage);
1734 // Do later initialization work for concurrent refinement.
1735 _hot_card_cache->initialize(card_counts_storage);
1736
1737 // 6843694 - ensure that the maximum region index can fit
1738 // in the remembered set structures.
1739 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1740 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1741
1742 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1743 // start within the first card.
1744 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1745 // Also create a G1 rem set.
1746 _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1747 _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
1748
1749 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1750 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1751 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1765 }
1766
1767 // Create the G1ConcurrentMark data structure and thread.
1768 // (Must do this late, so that "max_regions" is defined.)
1769 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1770 if (_cm == NULL || !_cm->completed_initialization()) {
1771 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1772 return JNI_ENOMEM;
1773 }
1774 _cm_thread = _cm->cm_thread();
1775
1776 // Now expand into the initial heap size.
1777 if (!expand(init_byte_size, _workers)) {
1778 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1779 return JNI_ENOMEM;
1780 }
1781
1782 // Perform any initialization actions delegated to the policy.
1783 g1_policy()->init(this, &_collection_set);
1784
1785 jint ecode = initialize_concurrent_refinement();
1786 if (ecode != JNI_OK) {
1787 return ecode;
1788 }
1789
1790 ecode = initialize_young_gen_sampling_thread();
1791 if (ecode != JNI_OK) {
1792 return ecode;
1793 }
1794
1795 {
1796 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1797 dcqs.set_process_completed_threshold((int)concurrent_refine()->yellow_zone());
1798 dcqs.set_max_completed_queue((int)concurrent_refine()->red_zone());
1799 }
1800
1801 // Here we allocate the dummy HeapRegion that is required by the
1802 // G1AllocRegion class.
1803 HeapRegion* dummy_region = _hrm->get_dummy_region();
1804
1903 false, // Reference discovery is not atomic
1904 &_is_alive_closure_cm, // is alive closure
1905 true); // allow changes to number of processing threads
1906
1907 // STW ref processor
1908 _ref_processor_stw =
1909 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1910 mt_processing, // mt processing
1911 ParallelGCThreads, // degree of mt processing
1912 (ParallelGCThreads > 1), // mt discovery
1913 ParallelGCThreads, // degree of mt discovery
1914 true, // Reference discovery is atomic
1915 &_is_alive_closure_stw, // is alive closure
1916 true); // allow changes to number of processing threads
1917 }
1918
1919 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1920 return _collector_policy;
1921 }
1922
1923 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1924 return &_soft_ref_policy;
1925 }
1926
1927 size_t G1CollectedHeap::capacity() const {
1928 return _hrm->length() * HeapRegion::GrainBytes;
1929 }
1930
1931 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1932 return _hrm->total_free_bytes();
1933 }
1934
1935 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1936 _hot_card_cache->drain(cl, worker_i);
1937 }
1938
1939 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1940 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1941 size_t n_completed_buffers = 0;
1942 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
2502 // This summary needs to be printed before incrementing total collections.
2503 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2504
2505 // Update common counters.
2506 increment_total_collections(full /* full gc */);
2507 if (full) {
2508 increment_old_marking_cycles_started();
2509 }
2510
2511 // Fill TLAB's and such
2512 double start = os::elapsedTime();
2513 ensure_parsability(true);
2514 g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2515 }
2516
2517 void G1CollectedHeap::gc_epilogue(bool full) {
2518 // Update common counters.
2519 if (full) {
2520 // Update the number of full collections that have been completed.
2521 increment_old_marking_cycles_completed(false /* concurrent */);
2522 }
2523
2524 // We are at the end of the GC. Total collections has already been increased.
2525 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2526
2527 // FIXME: what is this about?
2528 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2529 // is set.
2530 #if COMPILER2_OR_JVMCI
2531 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2532 #endif
2533 // always_do_update_barrier = true;
2534
2535 double start = os::elapsedTime();
2536 resize_all_tlabs();
2537 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2538
2539 MemoryService::track_memory_usage();
2540 // We have just completed a GC. Update the soft reference
2541 // policy with the new heap occupancy
|
1008 _ref_processor_cm->verify_no_references_recorded();
1009
1010 // Abandon current iterations of concurrent marking and concurrent
1011 // refinement, if any are in progress.
1012 concurrent_mark()->concurrent_cycle_abort();
1013 }
1014
1015 void G1CollectedHeap::prepare_heap_for_full_collection() {
1016 // Make sure we'll choose a new allocation region afterwards.
1017 _allocator->release_mutator_alloc_region();
1018 _allocator->abandon_gc_alloc_regions();
1019 g1_rem_set()->cleanupHRRS();
1020
1021 // We may have added regions to the current incremental collection
1022 // set between the last GC or pause and now. We need to clear the
1023 // incremental collection set and then start rebuilding it afresh
1024 // after this full GC.
1025 abandon_collection_set(collection_set());
1026
1027 tear_down_region_sets(false /* free_list_only */);
1028
1029 hrm()->prepare_for_full_collection_start();
1030 }
1031
1032 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1033 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1034 assert(used() == recalculate_used(), "Should be equal");
1035 _verifier->verify_region_sets_optional();
1036 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1037 _verifier->check_bitmaps("Full GC Start");
1038 }
1039
1040 void G1CollectedHeap::prepare_heap_for_mutators() {
1041 hrm()->prepare_for_full_collection_end();
1042
1043 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1044 ClassLoaderDataGraph::purge();
1045 MetaspaceUtils::verify_metrics();
1046
1047 // Prepare heap for normal collections.
1048 assert(num_free_regions() == 0, "we should not have added any free regions");
1049 rebuild_region_sets(false /* free_list_only */);
1050 abort_refinement();
1051 resize_heap_if_necessary();
1052
1053 // Rebuild the strong code root lists for each region
1054 rebuild_strong_code_roots();
1055
1056 // Purge code root memory
1057 purge_code_root_memory();
1058
1059 // Start a new incremental collection set for the next pause
1060 start_new_collection_set();
1061
1062 _allocator->init_mutator_alloc_region();
1473 "master humongous set MT safety protocol outside a safepoint");
1474 }
1475 }
1476 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1477 const char* get_description() { return "Humongous Regions"; }
1478 };
1479
1480 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1481 CollectedHeap(),
1482 _young_gen_sampling_thread(NULL),
1483 _workers(NULL),
1484 _collector_policy(collector_policy),
1485 _card_table(NULL),
1486 _soft_ref_policy(),
1487 _old_set("Old Region Set", new OldRegionSetChecker()),
1488 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1489 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1490 _bot(NULL),
1491 _listener(),
1492 _hrm(NULL),
1493 _is_hetero_heap(AllocateOldGenAt != NULL),
1494 _allocator(NULL),
1495 _verifier(NULL),
1496 _summary_bytes_used(0),
1497 _archive_allocator(NULL),
1498 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1499 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1500 _expand_heap_after_alloc_failure(true),
1501 _g1mm(NULL),
1502 _humongous_reclaim_candidates(),
1503 _has_humongous_reclaim_candidates(false),
1504 _hr_printer(),
1505 _collector_state(),
1506 _old_marking_cycles_started(0),
1507 _old_marking_cycles_completed(0),
1508 _eden(),
1509 _survivor(),
1510 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1511 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1512 _g1_policy(new G1Policy(_gc_timer_stw)),
1513 _heap_sizing_policy(NULL),
1606 return JNI_ENOMEM;
1607 }
1608 return JNI_OK;
1609 }
1610
1611 jint G1CollectedHeap::initialize() {
1612 os::enable_vtime();
1613
1614 // Necessary to satisfy locking discipline assertions.
1615
1616 MutexLocker x(Heap_lock);
1617
1618 // While there are no constraints in the GC code that HeapWordSize
1619 // be any particular value, there are multiple other areas in the
1620 // system which believe this to be true (e.g. oop->object_size in some
1621 // cases incorrectly returns the size in wordSize units rather than
1622 // HeapWordSize).
1623 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1624
1625 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1626 size_t max_byte_size = g1_collector_policy()->heap_reservation_size_bytes();
1627 size_t heap_alignment = collector_policy()->heap_alignment();
1628
1629 // Ensure that the sizes are properly aligned.
1630 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1631 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1632 Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1633
1634 // Reserve the maximum.
1635
1636 // When compressed oops are enabled, the preferred heap base
1637 // is calculated by subtracting the requested size from the
1638 // 32Gb boundary and using the result as the base address for
1639 // heap reservation. If the requested size is not aligned to
1640 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1641 // into the ReservedHeapSpace constructor) then the actual
1642 // base of the reserved heap may end up differing from the
1643 // address that was requested (i.e. the preferred heap base).
1644 // If this happens then we could end up using a non-optimal
1645 // compressed oops mode.
1646
1647 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1648 heap_alignment);
1649
1650 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1651
1652 // Create the barrier set for the entire reserved region.
1653 G1CardTable* ct = new G1CardTable(reserved_region());
1654 ct->initialize();
1655 G1BarrierSet* bs = new G1BarrierSet(ct);
1656 bs->initialize();
1657 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1658 BarrierSet::set_barrier_set(bs);
1659 _card_table = ct;
1660
1661 G1BarrierSet::satb_mark_queue_set().initialize(this,
1662 SATB_Q_CBL_mon,
1663 &bs->satb_mark_queue_buffer_allocator(),
1664 G1SATBProcessCompletedThreshold,
1665 G1SATBBufferEnqueueingThresholdPercent,
1676
1677 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1678 &bs->dirty_card_queue_buffer_allocator(),
1679 -1, // never trigger processing
1680 -1, // no limit on length
1681 Shared_DirtyCardQ_lock);
1682
1683 // Create the hot card cache.
1684 _hot_card_cache = new G1HotCardCache(this);
1685
1686 // Carve out the G1 part of the heap.
1687 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1688 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1689 G1RegionToSpaceMapper* heap_storage =
1690 G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
1691 g1_rs.size(),
1692 page_size,
1693 HeapRegion::GrainBytes,
1694 1,
1695 mtJavaHeap);
1696 if(heap_storage == NULL) {
1697 vm_shutdown_during_initialization("Could not initialize G1 heap");
1698 return JNI_ERR;
1699 }
1700
1701 os::trace_page_sizes("Heap",
1702 collector_policy()->min_heap_byte_size(),
1703 max_byte_size,
1704 page_size,
1705 heap_rs.base(),
1706 heap_rs.size());
1707 heap_storage->set_mapping_changed_listener(&_listener);
1708
1709 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1710 G1RegionToSpaceMapper* bot_storage =
1711 create_aux_memory_mapper("Block Offset Table",
1712 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1713 G1BlockOffsetTable::heap_map_factor());
1714
1715 G1RegionToSpaceMapper* cardtable_storage =
1716 create_aux_memory_mapper("Card Table",
1717 G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
1718 G1CardTable::heap_map_factor());
1719
1720 G1RegionToSpaceMapper* card_counts_storage =
1721 create_aux_memory_mapper("Card Counts Table",
1722 G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1723 G1CardCounts::heap_map_factor());
1724
1725 size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1726 G1RegionToSpaceMapper* prev_bitmap_storage =
1727 create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1728 G1RegionToSpaceMapper* next_bitmap_storage =
1729 create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1730
1731 _hrm = HeapRegionManager::create_manager(this, collector_policy());
1732
1733 _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1734 _card_table->initialize(cardtable_storage);
1735 // Do later initialization work for concurrent refinement.
1736 _hot_card_cache->initialize(card_counts_storage);
1737
1738 // 6843694 - ensure that the maximum region index can fit
1739 // in the remembered set structures.
1740 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1741 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1742
1743 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1744 // start within the first card.
1745 guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1746 // Also create a G1 rem set.
1747 _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1748 _g1_rem_set->initialize(max_reserved_capacity(), max_regions());
1749
1750 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1751 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1752 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1766 }
1767
1768 // Create the G1ConcurrentMark data structure and thread.
1769 // (Must do this late, so that "max_regions" is defined.)
1770 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1771 if (_cm == NULL || !_cm->completed_initialization()) {
1772 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1773 return JNI_ENOMEM;
1774 }
1775 _cm_thread = _cm->cm_thread();
1776
1777 // Now expand into the initial heap size.
1778 if (!expand(init_byte_size, _workers)) {
1779 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1780 return JNI_ENOMEM;
1781 }
1782
1783 // Perform any initialization actions delegated to the policy.
1784 g1_policy()->init(this, &_collection_set);
1785
1786 // Now we know the target length of young list. So adjust the heap to provision that many regions on dram.
1787 if (is_hetero_heap()) {
1788 static_cast<HeterogeneousHeapRegionManager*>(hrm())->adjust_dram_regions((uint)g1_policy()->young_list_target_length(), workers());
1789 }
1790
1791 jint ecode = initialize_concurrent_refinement();
1792 if (ecode != JNI_OK) {
1793 return ecode;
1794 }
1795
1796 ecode = initialize_young_gen_sampling_thread();
1797 if (ecode != JNI_OK) {
1798 return ecode;
1799 }
1800
1801 {
1802 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1803 dcqs.set_process_completed_threshold((int)concurrent_refine()->yellow_zone());
1804 dcqs.set_max_completed_queue((int)concurrent_refine()->red_zone());
1805 }
1806
1807 // Here we allocate the dummy HeapRegion that is required by the
1808 // G1AllocRegion class.
1809 HeapRegion* dummy_region = _hrm->get_dummy_region();
1810
1909 false, // Reference discovery is not atomic
1910 &_is_alive_closure_cm, // is alive closure
1911 true); // allow changes to number of processing threads
1912
1913 // STW ref processor
1914 _ref_processor_stw =
1915 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1916 mt_processing, // mt processing
1917 ParallelGCThreads, // degree of mt processing
1918 (ParallelGCThreads > 1), // mt discovery
1919 ParallelGCThreads, // degree of mt discovery
1920 true, // Reference discovery is atomic
1921 &_is_alive_closure_stw, // is alive closure
1922 true); // allow changes to number of processing threads
1923 }
1924
1925 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1926 return _collector_policy;
1927 }
1928
1929 G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
1930 return _collector_policy;
1931 }
1932
1933 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1934 return &_soft_ref_policy;
1935 }
1936
1937 size_t G1CollectedHeap::capacity() const {
1938 return _hrm->length() * HeapRegion::GrainBytes;
1939 }
1940
1941 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1942 return _hrm->total_free_bytes();
1943 }
1944
1945 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1946 _hot_card_cache->drain(cl, worker_i);
1947 }
1948
1949 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1950 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1951 size_t n_completed_buffers = 0;
1952 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
2512 // This summary needs to be printed before incrementing total collections.
2513 g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2514
2515 // Update common counters.
2516 increment_total_collections(full /* full gc */);
2517 if (full) {
2518 increment_old_marking_cycles_started();
2519 }
2520
2521 // Fill TLAB's and such
2522 double start = os::elapsedTime();
2523 ensure_parsability(true);
2524 g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2525 }
2526
2527 void G1CollectedHeap::gc_epilogue(bool full) {
2528 // Update common counters.
2529 if (full) {
2530 // Update the number of full collections that have been completed.
2531 increment_old_marking_cycles_completed(false /* concurrent */);
2532 // Now we know the target length of young list. So adjust the heap to provision that many regions on dram.
2533 if (is_hetero_heap()) {
2534 static_cast<HeterogeneousHeapRegionManager*>(hrm())->adjust_dram_regions((uint)g1_policy()->young_list_target_length(), workers());
2535 }
2536 }
2537
2538 // We are at the end of the GC. Total collections has already been increased.
2539 g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2540
2541 // FIXME: what is this about?
2542 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2543 // is set.
2544 #if COMPILER2_OR_JVMCI
2545 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2546 #endif
2547 // always_do_update_barrier = true;
2548
2549 double start = os::elapsedTime();
2550 resize_all_tlabs();
2551 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2552
2553 MemoryService::track_memory_usage();
2554 // We have just completed a GC. Update the soft reference
2555 // policy with the new heap occupancy
|