< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13232 : imported patch parallel-fullgc-stefanj
rev 13237 : imported patch 8183226-periodic-rem-set-summary-accesses-uninitialized-stuff


1667   // Necessary to satisfy locking discipline assertions.
1668 
1669   MutexLocker x(Heap_lock);
1670 
1671   // While there are no constraints in the GC code that HeapWordSize
1672   // be any particular value, there are multiple other areas in the
1673   // system which believe this to be true (e.g. oop->object_size in some
1674   // cases incorrectly returns the size in wordSize units rather than
1675   // HeapWordSize).
1676   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1677 
1678   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1679   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1680   size_t heap_alignment = collector_policy()->heap_alignment();
1681 
1682   // Ensure that the sizes are properly aligned.
1683   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1684   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1685   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1686 
1687   _refine_cte_cl = new RefineCardTableEntryClosure();
1688 
1689   jint ecode = JNI_OK;
1690   _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
1691   if (_cg1r == NULL) {
1692     return ecode;
1693   }
1694 
1695   // Reserve the maximum.
1696 
1697   // When compressed oops are enabled, the preferred heap base
1698   // is calculated by subtracting the requested size from the
1699   // 32Gb boundary and using the result as the base address for
1700   // heap reservation. If the requested size is not aligned to
1701   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1702   // into the ReservedHeapSpace constructor) then the actual
1703   // base of the reserved heap may end up differing from the
1704   // address that was requested (i.e. the preferred heap base).
1705   // If this happens then we could end up using a non-optimal
1706   // compressed oops mode.
1707 
1708   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1709                                                  heap_alignment);
1710 
1711   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1712 
1713   // Create the barrier set for the entire reserved region.
1714   G1SATBCardTableLoggingModRefBS* bs
1715     = new G1SATBCardTableLoggingModRefBS(reserved_region());
1716   bs->initialize();
1717   assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1718   set_barrier_set(bs);
1719 
1720   // Create the hot card cache.
1721   _hot_card_cache = new G1HotCardCache(this);
1722 
1723   // Also create a G1 rem set.
1724   _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
1725 
1726   // Carve out the G1 part of the heap.
1727   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1728   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1729   G1RegionToSpaceMapper* heap_storage =
1730     G1RegionToSpaceMapper::create_mapper(g1_rs,
1731                                          g1_rs.size(),
1732                                          page_size,
1733                                          HeapRegion::GrainBytes,
1734                                          1,
1735                                          mtJavaHeap);
1736   os::trace_page_sizes("Heap",
1737                        collector_policy()->min_heap_byte_size(),
1738                        max_byte_size,
1739                        page_size,
1740                        heap_rs.base(),
1741                        heap_rs.size());
1742   heap_storage->set_mapping_changed_listener(&_listener);
1743 
1744   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1745   G1RegionToSpaceMapper* bot_storage =


1757     create_aux_memory_mapper("Card Counts Table",
1758                              G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1759                              G1CardCounts::heap_map_factor());
1760 
1761   size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1762   G1RegionToSpaceMapper* prev_bitmap_storage =
1763     create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1764   G1RegionToSpaceMapper* next_bitmap_storage =
1765     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1766 
1767   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1768   g1_barrier_set()->initialize(cardtable_storage);
1769   // Do later initialization work for concurrent refinement.
1770   _hot_card_cache->initialize(card_counts_storage);
1771 
1772   // 6843694 - ensure that the maximum region index can fit
1773   // in the remembered set structures.
1774   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1775   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1776 
1777   g1_rem_set()->initialize(max_capacity(), max_regions());


1778 
1779   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1780   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1781   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1782             "too many cards per region");
1783 
1784   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1785 
1786   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1787 
1788   {
1789     HeapWord* start = _hrm.reserved().start();
1790     HeapWord* end = _hrm.reserved().end();
1791     size_t granularity = HeapRegion::GrainBytes;
1792 
1793     _in_cset_fast_test.initialize(start, end, granularity);
1794     _humongous_reclaim_candidates.initialize(start, end, granularity);
1795   }
1796 
1797   // Create the G1ConcurrentMark data structure and thread.


1800   if (_cm == NULL || !_cm->completed_initialization()) {
1801     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1802     return JNI_ENOMEM;
1803   }
1804   _cmThread = _cm->cmThread();
1805 
1806   // Now expand into the initial heap size.
1807   if (!expand(init_byte_size, _workers)) {
1808     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1809     return JNI_ENOMEM;
1810   }
1811 
1812   // Perform any initialization actions delegated to the policy.
1813   g1_policy()->init(this, &_collection_set);
1814 
1815   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1816                                                SATB_Q_FL_lock,
1817                                                G1SATBProcessCompletedThreshold,
1818                                                Shared_SATB_Q_lock);
1819 








1820   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1821                                                 DirtyCardQ_CBL_mon,
1822                                                 DirtyCardQ_FL_lock,
1823                                                 (int)concurrent_g1_refine()->yellow_zone(),
1824                                                 (int)concurrent_g1_refine()->red_zone(),
1825                                                 Shared_DirtyCardQ_lock,
1826                                                 NULL,  // fl_owner
1827                                                 true); // init_free_ids
1828 
1829   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1830                                     DirtyCardQ_CBL_mon,
1831                                     DirtyCardQ_FL_lock,
1832                                     -1, // never trigger processing
1833                                     -1, // no limit on length
1834                                     Shared_DirtyCardQ_lock,
1835                                     &JavaThread::dirty_card_queue_set());
1836 
1837   // Here we allocate the dummy HeapRegion that is required by the
1838   // G1AllocRegion class.
1839   HeapRegion* dummy_region = _hrm.get_dummy_region();


1841   // We'll re-use the same region whether the alloc region will
1842   // require BOT updates or not and, if it doesn't, then a non-young
1843   // region will complain that it cannot support allocations without
1844   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1845   dummy_region->set_eden();
1846   // Make sure it's full.
1847   dummy_region->set_top(dummy_region->end());
1848   G1AllocRegion::setup(this, dummy_region);
1849 
1850   _allocator->init_mutator_alloc_region();
1851 
1852   // Do create of the monitoring and management support so that
1853   // values in the heap have been properly initialized.
1854   _g1mm = new G1MonitoringSupport(this);
1855 
1856   G1StringDedup::initialize();
1857 
1858   _preserved_marks_set.init(ParallelGCThreads);
1859 
1860   _collection_set.initialize(max_regions());


1861 
1862   return JNI_OK;
1863 }
1864 
1865 void G1CollectedHeap::stop() {
1866   // Stop all concurrent threads. We do this to make sure these threads
1867   // do not continue to execute and access resources (e.g. logging)
1868   // that are destroyed during shutdown.
1869   _cg1r->stop();
1870   _cmThread->stop();
1871   if (G1StringDedup::is_enabled()) {
1872     G1StringDedup::stop();
1873   }
1874 }
1875 
1876 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1877   return HeapRegion::max_region_size();
1878 }
1879 
1880 void G1CollectedHeap::post_initialize() {




1667   // Necessary to satisfy locking discipline assertions.
1668 
1669   MutexLocker x(Heap_lock);
1670 
1671   // While there are no constraints in the GC code that HeapWordSize
1672   // be any particular value, there are multiple other areas in the
1673   // system which believe this to be true (e.g. oop->object_size in some
1674   // cases incorrectly returns the size in wordSize units rather than
1675   // HeapWordSize).
1676   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1677 
1678   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1679   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1680   size_t heap_alignment = collector_policy()->heap_alignment();
1681 
1682   // Ensure that the sizes are properly aligned.
1683   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1684   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1685   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1686 








1687   // Reserve the maximum.
1688 
1689   // When compressed oops are enabled, the preferred heap base
1690   // is calculated by subtracting the requested size from the
1691   // 32Gb boundary and using the result as the base address for
1692   // heap reservation. If the requested size is not aligned to
1693   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1694   // into the ReservedHeapSpace constructor) then the actual
1695   // base of the reserved heap may end up differing from the
1696   // address that was requested (i.e. the preferred heap base).
1697   // If this happens then we could end up using a non-optimal
1698   // compressed oops mode.
1699 
1700   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1701                                                  heap_alignment);
1702 
1703   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1704 
1705   // Create the barrier set for the entire reserved region.
1706   G1SATBCardTableLoggingModRefBS* bs
1707     = new G1SATBCardTableLoggingModRefBS(reserved_region());
1708   bs->initialize();
1709   assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1710   set_barrier_set(bs);
1711 
1712   // Create the hot card cache.
1713   _hot_card_cache = new G1HotCardCache(this);
1714 



1715   // Carve out the G1 part of the heap.
1716   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1717   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1718   G1RegionToSpaceMapper* heap_storage =
1719     G1RegionToSpaceMapper::create_mapper(g1_rs,
1720                                          g1_rs.size(),
1721                                          page_size,
1722                                          HeapRegion::GrainBytes,
1723                                          1,
1724                                          mtJavaHeap);
1725   os::trace_page_sizes("Heap",
1726                        collector_policy()->min_heap_byte_size(),
1727                        max_byte_size,
1728                        page_size,
1729                        heap_rs.base(),
1730                        heap_rs.size());
1731   heap_storage->set_mapping_changed_listener(&_listener);
1732 
1733   // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1734   G1RegionToSpaceMapper* bot_storage =


1746     create_aux_memory_mapper("Card Counts Table",
1747                              G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1748                              G1CardCounts::heap_map_factor());
1749 
1750   size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1751   G1RegionToSpaceMapper* prev_bitmap_storage =
1752     create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1753   G1RegionToSpaceMapper* next_bitmap_storage =
1754     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1755 
1756   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1757   g1_barrier_set()->initialize(cardtable_storage);
1758   // Do later initialization work for concurrent refinement.
1759   _hot_card_cache->initialize(card_counts_storage);
1760 
1761   // 6843694 - ensure that the maximum region index can fit
1762   // in the remembered set structures.
1763   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1764   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1765 
1766   // Also create a G1 rem set.
1767   _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
1768   _g1_rem_set->initialize(max_capacity(), max_regions());
1769 
1770   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1771   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1772   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1773             "too many cards per region");
1774 
1775   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1776 
1777   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1778 
1779   {
1780     HeapWord* start = _hrm.reserved().start();
1781     HeapWord* end = _hrm.reserved().end();
1782     size_t granularity = HeapRegion::GrainBytes;
1783 
1784     _in_cset_fast_test.initialize(start, end, granularity);
1785     _humongous_reclaim_candidates.initialize(start, end, granularity);
1786   }
1787 
1788   // Create the G1ConcurrentMark data structure and thread.


1791   if (_cm == NULL || !_cm->completed_initialization()) {
1792     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1793     return JNI_ENOMEM;
1794   }
1795   _cmThread = _cm->cmThread();
1796 
1797   // Now expand into the initial heap size.
1798   if (!expand(init_byte_size, _workers)) {
1799     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1800     return JNI_ENOMEM;
1801   }
1802 
1803   // Perform any initialization actions delegated to the policy.
1804   g1_policy()->init(this, &_collection_set);
1805 
1806   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1807                                                SATB_Q_FL_lock,
1808                                                G1SATBProcessCompletedThreshold,
1809                                                Shared_SATB_Q_lock);
1810 
1811   _refine_cte_cl = new RefineCardTableEntryClosure();
1812 
1813   jint ecode = JNI_OK;
1814   _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
1815   if (_cg1r == NULL) {
1816     return ecode;
1817   }
1818   
1819   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1820                                                 DirtyCardQ_CBL_mon,
1821                                                 DirtyCardQ_FL_lock,
1822                                                 (int)concurrent_g1_refine()->yellow_zone(),
1823                                                 (int)concurrent_g1_refine()->red_zone(),
1824                                                 Shared_DirtyCardQ_lock,
1825                                                 NULL,  // fl_owner
1826                                                 true); // init_free_ids
1827 
1828   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1829                                     DirtyCardQ_CBL_mon,
1830                                     DirtyCardQ_FL_lock,
1831                                     -1, // never trigger processing
1832                                     -1, // no limit on length
1833                                     Shared_DirtyCardQ_lock,
1834                                     &JavaThread::dirty_card_queue_set());
1835 
1836   // Here we allocate the dummy HeapRegion that is required by the
1837   // G1AllocRegion class.
1838   HeapRegion* dummy_region = _hrm.get_dummy_region();


1840   // We'll re-use the same region whether the alloc region will
1841   // require BOT updates or not and, if it doesn't, then a non-young
1842   // region will complain that it cannot support allocations without
1843   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1844   dummy_region->set_eden();
1845   // Make sure it's full.
1846   dummy_region->set_top(dummy_region->end());
1847   G1AllocRegion::setup(this, dummy_region);
1848 
1849   _allocator->init_mutator_alloc_region();
1850 
1851   // Do create of the monitoring and management support so that
1852   // values in the heap have been properly initialized.
1853   _g1mm = new G1MonitoringSupport(this);
1854 
1855   G1StringDedup::initialize();
1856 
1857   _preserved_marks_set.init(ParallelGCThreads);
1858 
1859   _collection_set.initialize(max_regions());
1860 
1861   _g1_rem_set->initialize_periodic_summary_info();
1862 
1863   return JNI_OK;
1864 }
1865 
1866 void G1CollectedHeap::stop() {
1867   // Stop all concurrent threads. We do this to make sure these threads
1868   // do not continue to execute and access resources (e.g. logging)
1869   // that are destroyed during shutdown.
1870   _cg1r->stop();
1871   _cmThread->stop();
1872   if (G1StringDedup::is_enabled()) {
1873     G1StringDedup::stop();
1874   }
1875 }
1876 
1877 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1878   return HeapRegion::max_region_size();
1879 }
1880 
1881 void G1CollectedHeap::post_initialize() {


< prev index next >