1312 assert_at_safepoint_on_vm_thread();
1313
1314 _verifier->verify_region_sets_optional();
1315
1316 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1317 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1318 word_size * HeapWordSize);
1319
1320
1321 if (expand(expand_bytes, _workers)) {
1322 _hrm.verify_optional();
1323 _verifier->verify_region_sets_optional();
1324 return attempt_allocation_at_safepoint(word_size,
1325 false /* expect_null_mutator_alloc_region */);
1326 }
1327 return NULL;
1328 }
1329
1330 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1331 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1332 aligned_expand_bytes = align_up(aligned_expand_bytes,
1333 HeapRegion::GrainBytes);
1334
1335 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1336 expand_bytes, aligned_expand_bytes);
1337
1338 if (is_maximal_no_gc()) {
1339 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1340 return false;
1341 }
1342
1343 double expand_heap_start_time_sec = os::elapsedTime();
1344 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1345 assert(regions_to_expand > 0, "Must expand by at least one region");
1346
1347 uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1348 if (expand_time_ms != NULL) {
1349 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1350 }
1351
1352 if (expanded_by > 0) {
1353 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1559 // compressed oops mode.
1560
1561 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1562 heap_alignment);
1563
1564 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1565
1566 // Create the barrier set for the entire reserved region.
1567 G1CardTable* ct = new G1CardTable(reserved_region());
1568 ct->initialize();
1569 G1BarrierSet* bs = new G1BarrierSet(ct);
1570 bs->initialize();
1571 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1572 BarrierSet::set_barrier_set(bs);
1573 _card_table = ct;
1574
1575 // Create the hot card cache.
1576 _hot_card_cache = new G1HotCardCache(this);
1577
1578 // Carve out the G1 part of the heap.
1579 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1580 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1581 G1RegionToSpaceMapper* heap_storage =
1582 G1RegionToSpaceMapper::create_mapper(g1_rs,
1583 g1_rs.size(),
1584 page_size,
1585 HeapRegion::GrainBytes,
1586 1,
1587 mtJavaHeap);
1588 os::trace_page_sizes("Heap",
1589 collector_policy()->min_heap_byte_size(),
1590 max_byte_size,
1591 page_size,
1592 heap_rs.base(),
1593 heap_rs.size());
1594 heap_storage->set_mapping_changed_listener(&_listener);
1595
1596 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1597 G1RegionToSpaceMapper* bot_storage =
1598 create_aux_memory_mapper("Block Offset Table",
1599 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1642 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1643
1644 {
1645 HeapWord* start = _hrm.reserved().start();
1646 HeapWord* end = _hrm.reserved().end();
1647 size_t granularity = HeapRegion::GrainBytes;
1648
1649 _in_cset_fast_test.initialize(start, end, granularity);
1650 _humongous_reclaim_candidates.initialize(start, end, granularity);
1651 }
1652
1653 // Create the G1ConcurrentMark data structure and thread.
1654 // (Must do this late, so that "max_regions" is defined.)
1655 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1656 if (_cm == NULL || !_cm->completed_initialization()) {
1657 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1658 return JNI_ENOMEM;
1659 }
1660 _cm_thread = _cm->cm_thread();
1661
1662 // Now expand into the initial heap size.
1663 if (!expand(init_byte_size, _workers)) {
1664 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1665 return JNI_ENOMEM;
1666 }
1667
1668 // Perform any initialization actions delegated to the policy.
1669 g1_policy()->init(this, &_collection_set);
1670
1671 G1BarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1672 SATB_Q_FL_lock,
1673 G1SATBProcessCompletedThreshold,
1674 Shared_SATB_Q_lock);
1675
1676 jint ecode = initialize_concurrent_refinement();
1677 if (ecode != JNI_OK) {
1678 return ecode;
1679 }
1680
1681 ecode = initialize_young_gen_sampling_thread();
1682 if (ecode != JNI_OK) {
1683 return ecode;
1684 }
1685
1686 G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1687 DirtyCardQ_FL_lock,
1707 // region will complain that it cannot support allocations without
1708 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1709 dummy_region->set_eden();
1710 // Make sure it's full.
1711 dummy_region->set_top(dummy_region->end());
1712 G1AllocRegion::setup(this, dummy_region);
1713
1714 _allocator->init_mutator_alloc_region();
1715
1716 // Do create of the monitoring and management support so that
1717 // values in the heap have been properly initialized.
1718 _g1mm = new G1MonitoringSupport(this);
1719
1720 G1StringDedup::initialize();
1721
1722 _preserved_marks_set.init(ParallelGCThreads);
1723
1724 _collection_set.initialize(max_regions());
1725
1726 return JNI_OK;
1727 }
1728
1729 void G1CollectedHeap::initialize_serviceability() {
1730 _eden_pool = new G1EdenPool(this);
1731 _survivor_pool = new G1SurvivorPool(this);
1732 _old_pool = new G1OldGenPool(this);
1733
1734 _full_gc_memory_manager.add_pool(_eden_pool);
1735 _full_gc_memory_manager.add_pool(_survivor_pool);
1736 _full_gc_memory_manager.add_pool(_old_pool);
1737
1738 _memory_manager.add_pool(_eden_pool);
1739 _memory_manager.add_pool(_survivor_pool);
1740
1741 }
1742
1743 void G1CollectedHeap::stop() {
1744 // Stop all concurrent threads. We do this to make sure these threads
1745 // do not continue to execute and access resources (e.g. logging)
1746 // that are destroyed during shutdown.
|
1312 assert_at_safepoint_on_vm_thread();
1313
1314 _verifier->verify_region_sets_optional();
1315
1316 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1317 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1318 word_size * HeapWordSize);
1319
1320
1321 if (expand(expand_bytes, _workers)) {
1322 _hrm.verify_optional();
1323 _verifier->verify_region_sets_optional();
1324 return attempt_allocation_at_safepoint(word_size,
1325 false /* expect_null_mutator_alloc_region */);
1326 }
1327 return NULL;
1328 }
1329
1330 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1331 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1332 aligned_expand_bytes = align_up(aligned_expand_bytes, HeapRegion::GrainBytes);
1333
1334 log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1335 expand_bytes, aligned_expand_bytes);
1336
1337 if (is_maximal_no_gc()) {
1338 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1339 return false;
1340 }
1341
1342 double expand_heap_start_time_sec = os::elapsedTime();
1343 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1344 assert(regions_to_expand > 0, "Must expand by at least one region");
1345
1346 uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1347 if (expand_time_ms != NULL) {
1348 *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1349 }
1350
1351 if (expanded_by > 0) {
1352 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1558 // compressed oops mode.
1559
1560 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1561 heap_alignment);
1562
1563 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1564
1565 // Create the barrier set for the entire reserved region.
1566 G1CardTable* ct = new G1CardTable(reserved_region());
1567 ct->initialize();
1568 G1BarrierSet* bs = new G1BarrierSet(ct);
1569 bs->initialize();
1570 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1571 BarrierSet::set_barrier_set(bs);
1572 _card_table = ct;
1573
1574 // Create the hot card cache.
1575 _hot_card_cache = new G1HotCardCache(this);
1576
1577 // Carve out the G1 part of the heap.
1578 ReservedSpace g1_rs = heap_rs.first_part(heap_rs.size());
1579 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1580 G1RegionToSpaceMapper* heap_storage =
1581 G1RegionToSpaceMapper::create_mapper(g1_rs,
1582 g1_rs.size(),
1583 page_size,
1584 HeapRegion::GrainBytes,
1585 1,
1586 mtJavaHeap);
1587 os::trace_page_sizes("Heap",
1588 collector_policy()->min_heap_byte_size(),
1589 max_byte_size,
1590 page_size,
1591 heap_rs.base(),
1592 heap_rs.size());
1593 heap_storage->set_mapping_changed_listener(&_listener);
1594
1595 // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1596 G1RegionToSpaceMapper* bot_storage =
1597 create_aux_memory_mapper("Block Offset Table",
1598 G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1641 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1642
1643 {
1644 HeapWord* start = _hrm.reserved().start();
1645 HeapWord* end = _hrm.reserved().end();
1646 size_t granularity = HeapRegion::GrainBytes;
1647
1648 _in_cset_fast_test.initialize(start, end, granularity);
1649 _humongous_reclaim_candidates.initialize(start, end, granularity);
1650 }
1651
1652 // Create the G1ConcurrentMark data structure and thread.
1653 // (Must do this late, so that "max_regions" is defined.)
1654 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1655 if (_cm == NULL || !_cm->completed_initialization()) {
1656 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1657 return JNI_ENOMEM;
1658 }
1659 _cm_thread = _cm->cm_thread();
1660
1661 // Expand NVDIMM to maximum old gen size.
1662 size_t aligned_expand_bytes = 0;
1663 if (os::has_nvdimm()) {
1664 aligned_expand_bytes = expand_old_gen_on_nvdimm(max_byte_size);
1665 }
1666 // Now expand into the initial heap size.
1667 if (!expand(init_byte_size, _workers)) {
1668 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1669 return JNI_ENOMEM;
1670 }
1671
1672 if (os::has_nvdimm()) {
1673 // Show how much memory was committed on NVDIMM and DRAM.
1674 log_info(gc, heap)("NVDIMM Reserved Bytes : %ld DRAM Reserved Bytes : %ld \n", aligned_expand_bytes, init_byte_size);
1675 log_info(gc, heap)("When NVDIMM is present, we always reserve and commit Maximum OldGen Size on NVDIMM");
1676 log_info(gc, heap)("JVM will have more size reserved and committed than specified by Xmn or Xms options (but never more than Xmx).");
1677 }
1678 // Perform any initialization actions delegated to the policy.
1679 g1_policy()->init(this, &_collection_set);
1680
1681 G1BarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1682 SATB_Q_FL_lock,
1683 G1SATBProcessCompletedThreshold,
1684 Shared_SATB_Q_lock);
1685
1686 jint ecode = initialize_concurrent_refinement();
1687 if (ecode != JNI_OK) {
1688 return ecode;
1689 }
1690
1691 ecode = initialize_young_gen_sampling_thread();
1692 if (ecode != JNI_OK) {
1693 return ecode;
1694 }
1695
1696 G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1697 DirtyCardQ_FL_lock,
1717 // region will complain that it cannot support allocations without
1718 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1719 dummy_region->set_eden();
1720 // Make sure it's full.
1721 dummy_region->set_top(dummy_region->end());
1722 G1AllocRegion::setup(this, dummy_region);
1723
1724 _allocator->init_mutator_alloc_region();
1725
1726 // Do create of the monitoring and management support so that
1727 // values in the heap have been properly initialized.
1728 _g1mm = new G1MonitoringSupport(this);
1729
1730 G1StringDedup::initialize();
1731
1732 _preserved_marks_set.init(ParallelGCThreads);
1733
1734 _collection_set.initialize(max_regions());
1735
1736 return JNI_OK;
1737 }
1738
1739 size_t G1CollectedHeap::expand_old_gen_on_nvdimm(size_t max_byte_size) {
1740 size_t nvdimm_bytes = max_byte_size - (size_t)(max_byte_size * G1MaxNewSizePercent)/100;
1741 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(nvdimm_bytes);
1742 aligned_expand_bytes = align_up(aligned_expand_bytes, HeapRegion::GrainBytes);
1743 uint nvdimm_regions = (uint)(aligned_expand_bytes/HeapRegion::GrainBytes);
1744 os::set_nvdimm_regionlength(nvdimm_regions);
1745 expand(aligned_expand_bytes, _workers);
1746 return aligned_expand_bytes;
1747 }
1748
1749 void G1CollectedHeap::initialize_serviceability() {
1750 _eden_pool = new G1EdenPool(this);
1751 _survivor_pool = new G1SurvivorPool(this);
1752 _old_pool = new G1OldGenPool(this);
1753
1754 _full_gc_memory_manager.add_pool(_eden_pool);
1755 _full_gc_memory_manager.add_pool(_survivor_pool);
1756 _full_gc_memory_manager.add_pool(_old_pool);
1757
1758 _memory_manager.add_pool(_eden_pool);
1759 _memory_manager.add_pool(_survivor_pool);
1760
1761 }
1762
1763 void G1CollectedHeap::stop() {
1764 // Stop all concurrent threads. We do this to make sure these threads
1765 // do not continue to execute and access resources (e.g. logging)
1766 // that are destroyed during shutdown.
|