< prev index next >

share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 1 : G1GC+POGC+NVDIMM Patch with latest comments incorporated from all.


1299   assert_at_safepoint_on_vm_thread();
1300 
1301   _verifier->verify_region_sets_optional();
1302 
1303   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1304   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1305                             word_size * HeapWordSize);
1306 
1307 
1308   if (expand(expand_bytes, _workers)) {
1309     _hrm.verify_optional();
1310     _verifier->verify_region_sets_optional();
1311     return attempt_allocation_at_safepoint(word_size,
1312                                            false /* expect_null_mutator_alloc_region */);
1313   }
1314   return NULL;
1315 }
1316 
1317 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1318   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1319   aligned_expand_bytes = align_up(aligned_expand_bytes,
1320                                        HeapRegion::GrainBytes);
1321 
1322   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1323                             expand_bytes, aligned_expand_bytes);
1324 
1325   if (is_maximal_no_gc()) {
1326     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1327     return false;
1328   }
1329 
1330   double expand_heap_start_time_sec = os::elapsedTime();
1331   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1332   assert(regions_to_expand > 0, "Must expand by at least one region");
1333 
1334   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1335   if (expand_time_ms != NULL) {
1336     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1337   }
1338 
1339   if (expanded_by > 0) {
1340     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;


1546   // compressed oops mode.
1547 
1548   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1549                                                  heap_alignment);
1550 
1551   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1552 
1553   // Create the barrier set for the entire reserved region.
1554   G1CardTable* ct = new G1CardTable(reserved_region());
1555   ct->initialize();
1556   G1BarrierSet* bs = new G1BarrierSet(ct);
1557   bs->initialize();
1558   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1559   BarrierSet::set_barrier_set(bs);
1560   _card_table = ct;
1561 
1562   // Create the hot card cache.
1563   _hot_card_cache = new G1HotCardCache(this);
1564 
1565   // Carve out the G1 part of the heap.
1566   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1567   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1568   G1RegionToSpaceMapper* heap_storage =
1569     G1RegionToSpaceMapper::create_mapper(g1_rs,
1570                                          g1_rs.size(),
1571                                          page_size,
1572                                          HeapRegion::GrainBytes,
1573                                          1,
1574                                          mtJavaHeap);
1575   os::trace_page_sizes("Heap",
1576                        collector_policy()->min_heap_byte_size(),
1577                        max_byte_size,
1578                        page_size,
1579                        heap_rs.base(),
1580                        heap_rs.size());
1581   heap_storage->set_mapping_changed_listener(&_listener);
1582 
1583   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1584   G1RegionToSpaceMapper* bot_storage =
1585     create_aux_memory_mapper("Block Offset Table",
1586                              G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),


1629   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1630 
1631   {
1632     HeapWord* start = _hrm.reserved().start();
1633     HeapWord* end = _hrm.reserved().end();
1634     size_t granularity = HeapRegion::GrainBytes;
1635 
1636     _in_cset_fast_test.initialize(start, end, granularity);
1637     _humongous_reclaim_candidates.initialize(start, end, granularity);
1638   }
1639 
1640   // Create the G1ConcurrentMark data structure and thread.
1641   // (Must do this late, so that "max_regions" is defined.)
1642   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1643   if (_cm == NULL || !_cm->completed_initialization()) {
1644     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1645     return JNI_ENOMEM;
1646   }
1647   _cm_thread = _cm->cm_thread();
1648 





1649   // Now expand into the initial heap size.
1650   if (!expand(init_byte_size, _workers)) {
1651     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1652     return JNI_ENOMEM;
1653   }
1654 






1655   // Perform any initialization actions delegated to the policy.
1656   g1_policy()->init(this, &_collection_set);
1657 
1658   G1BarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1659                                                  SATB_Q_FL_lock,
1660                                                  G1SATBProcessCompletedThreshold,
1661                                                  Shared_SATB_Q_lock);
1662 
1663   jint ecode = initialize_concurrent_refinement();
1664   if (ecode != JNI_OK) {
1665     return ecode;
1666   }
1667 
1668   ecode = initialize_young_gen_sampling_thread();
1669   if (ecode != JNI_OK) {
1670     return ecode;
1671   }
1672 
1673   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1674                                                   DirtyCardQ_FL_lock,


1694   // region will complain that it cannot support allocations without
1695   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1696   dummy_region->set_eden();
1697   // Make sure it's full.
1698   dummy_region->set_top(dummy_region->end());
1699   G1AllocRegion::setup(this, dummy_region);
1700 
1701   _allocator->init_mutator_alloc_region();
1702 
1703   // Do create of the monitoring and management support so that
1704   // values in the heap have been properly initialized.
1705   _g1mm = new G1MonitoringSupport(this);
1706 
1707   G1StringDedup::initialize();
1708 
1709   _preserved_marks_set.init(ParallelGCThreads);
1710 
1711   _collection_set.initialize(max_regions());
1712 
1713   return JNI_OK;










1714 }
1715 
1716 void G1CollectedHeap::initialize_serviceability() {
1717   _eden_pool = new G1EdenPool(this);
1718   _survivor_pool = new G1SurvivorPool(this);
1719   _old_pool = new G1OldGenPool(this);
1720 
1721   _full_gc_memory_manager.add_pool(_eden_pool);
1722   _full_gc_memory_manager.add_pool(_survivor_pool);
1723   _full_gc_memory_manager.add_pool(_old_pool);
1724 
1725   _memory_manager.add_pool(_eden_pool);
1726   _memory_manager.add_pool(_survivor_pool);
1727 
1728 }
1729 
1730 void G1CollectedHeap::stop() {
1731   // Stop all concurrent threads. We do this to make sure these threads
1732   // do not continue to execute and access resources (e.g. logging)
1733   // that are destroyed during shutdown.




1299   assert_at_safepoint_on_vm_thread();
1300 
1301   _verifier->verify_region_sets_optional();
1302 
1303   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1304   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1305                             word_size * HeapWordSize);
1306 
1307 
1308   if (expand(expand_bytes, _workers)) {
1309     _hrm.verify_optional();
1310     _verifier->verify_region_sets_optional();
1311     return attempt_allocation_at_safepoint(word_size,
1312                                            false /* expect_null_mutator_alloc_region */);
1313   }
1314   return NULL;
1315 }
1316 
1317 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1318   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1319   aligned_expand_bytes = align_up(aligned_expand_bytes, HeapRegion::GrainBytes);

1320 
1321   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1322                             expand_bytes, aligned_expand_bytes);
1323 
1324   if (is_maximal_no_gc()) {
1325     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1326     return false;
1327   }
1328 
1329   double expand_heap_start_time_sec = os::elapsedTime();
1330   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1331   assert(regions_to_expand > 0, "Must expand by at least one region");
1332 
1333   uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1334   if (expand_time_ms != NULL) {
1335     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1336   }
1337 
1338   if (expanded_by > 0) {
1339     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;


1545   // compressed oops mode.
1546 
1547   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1548                                                  heap_alignment);
1549 
1550   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1551 
1552   // Create the barrier set for the entire reserved region.
1553   G1CardTable* ct = new G1CardTable(reserved_region());
1554   ct->initialize();
1555   G1BarrierSet* bs = new G1BarrierSet(ct);
1556   bs->initialize();
1557   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1558   BarrierSet::set_barrier_set(bs);
1559   _card_table = ct;
1560 
1561   // Create the hot card cache.
1562   _hot_card_cache = new G1HotCardCache(this);
1563 
1564   // Carve out the G1 part of the heap.
1565   ReservedSpace g1_rs = heap_rs.first_part(heap_rs.size());
1566   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1567   G1RegionToSpaceMapper* heap_storage =
1568     G1RegionToSpaceMapper::create_mapper(g1_rs,
1569                                          g1_rs.size(),
1570                                          page_size,
1571                                          HeapRegion::GrainBytes,
1572                                          1,
1573                                          mtJavaHeap);
1574   os::trace_page_sizes("Heap",
1575                        collector_policy()->min_heap_byte_size(),
1576                        max_byte_size,
1577                        page_size,
1578                        heap_rs.base(),
1579                        heap_rs.size());
1580   heap_storage->set_mapping_changed_listener(&_listener);
1581 
1582   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1583   G1RegionToSpaceMapper* bot_storage =
1584     create_aux_memory_mapper("Block Offset Table",
1585                              G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),


1628   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1629 
1630   {
1631     HeapWord* start = _hrm.reserved().start();
1632     HeapWord* end = _hrm.reserved().end();
1633     size_t granularity = HeapRegion::GrainBytes;
1634 
1635     _in_cset_fast_test.initialize(start, end, granularity);
1636     _humongous_reclaim_candidates.initialize(start, end, granularity);
1637   }
1638 
1639   // Create the G1ConcurrentMark data structure and thread.
1640   // (Must do this late, so that "max_regions" is defined.)
1641   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1642   if (_cm == NULL || !_cm->completed_initialization()) {
1643     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1644     return JNI_ENOMEM;
1645   }
1646   _cm_thread = _cm->cm_thread();
1647 
1648   // Expand NVDIMM to maximum old gen size.
1649   size_t aligned_expand_bytes = 0;
1650   if (os::has_nvdimm()) {
1651     aligned_expand_bytes = expand_old_gen_on_nvdimm(max_byte_size);
1652   }
1653   // Now expand into the initial heap size.
1654   if (!expand(init_byte_size, _workers)) {
1655     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1656     return JNI_ENOMEM;
1657   }
1658 
1659   if (os::has_nvdimm()) {
1660     // Show how much memory was committed on NVDIMM and DRAM.
1661     log_info(gc, heap)("NVDIMM Reserved Bytes : %ld DRAM Reserved Bytes : %ld \n", aligned_expand_bytes, init_byte_size);
1662     log_info(gc, heap)("When NVDIMM is present, we always reserve and commit Maximum OldGen Size on NVDIMM");
1663     log_info(gc, heap)("JVM will have more size reserved and committed than specified by Xmn or Xms options (but never more than Xmx).");
1664   }
1665   // Perform any initialization actions delegated to the policy.
1666   g1_policy()->init(this, &_collection_set);
1667 
1668   G1BarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1669                                                  SATB_Q_FL_lock,
1670                                                  G1SATBProcessCompletedThreshold,
1671                                                  Shared_SATB_Q_lock);
1672 
1673   jint ecode = initialize_concurrent_refinement();
1674   if (ecode != JNI_OK) {
1675     return ecode;
1676   }
1677 
1678   ecode = initialize_young_gen_sampling_thread();
1679   if (ecode != JNI_OK) {
1680     return ecode;
1681   }
1682 
1683   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1684                                                   DirtyCardQ_FL_lock,


1704   // region will complain that it cannot support allocations without
1705   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1706   dummy_region->set_eden();
1707   // Make sure it's full.
1708   dummy_region->set_top(dummy_region->end());
1709   G1AllocRegion::setup(this, dummy_region);
1710 
1711   _allocator->init_mutator_alloc_region();
1712 
1713   // Do create of the monitoring and management support so that
1714   // values in the heap have been properly initialized.
1715   _g1mm = new G1MonitoringSupport(this);
1716 
1717   G1StringDedup::initialize();
1718 
1719   _preserved_marks_set.init(ParallelGCThreads);
1720 
1721   _collection_set.initialize(max_regions());
1722 
1723   return JNI_OK;
1724 }
1725 
1726 size_t G1CollectedHeap::expand_old_gen_on_nvdimm(size_t max_byte_size) {
1727   size_t nvdimm_bytes = max_byte_size - (size_t)(max_byte_size * G1MaxNewSizePercent)/100;
1728   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(nvdimm_bytes);
1729   aligned_expand_bytes = align_up(aligned_expand_bytes, HeapRegion::GrainBytes);
1730   uint nvdimm_regions = (uint)(aligned_expand_bytes/HeapRegion::GrainBytes);
1731   os::set_nvdimm_regionlength(nvdimm_regions);
1732   expand(aligned_expand_bytes, _workers);
1733   return aligned_expand_bytes;
1734 }
1735 
1736 void G1CollectedHeap::initialize_serviceability() {
1737   _eden_pool = new G1EdenPool(this);
1738   _survivor_pool = new G1SurvivorPool(this);
1739   _old_pool = new G1OldGenPool(this);
1740 
1741   _full_gc_memory_manager.add_pool(_eden_pool);
1742   _full_gc_memory_manager.add_pool(_survivor_pool);
1743   _full_gc_memory_manager.add_pool(_old_pool);
1744 
1745   _memory_manager.add_pool(_eden_pool);
1746   _memory_manager.add_pool(_survivor_pool);
1747 
1748 }
1749 
1750 void G1CollectedHeap::stop() {
1751   // Stop all concurrent threads. We do this to make sure these threads
1752   // do not continue to execute and access resources (e.g. logging)
1753   // that are destroyed during shutdown.


< prev index next >