< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13232 : imported patch parallel-fullgc-stefanj
rev 13237 : imported patch 8183226-periodic-rem-set-summary-accesses-uninitialized-stuff
rev 13238 : imported patch 8183226-eridk-sjohanss-review
rev 13240 : imported patch 8183128-cleanup-refinecardtableentryclosure
rev 13241 : imported patch 8183128-erikd-review


  78 #include "prims/resolvedMethodTable.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/init.hpp"
  81 #include "runtime/orderAccess.inline.hpp"
  82 #include "runtime/vmThread.hpp"
  83 #include "utilities/align.hpp"
  84 #include "utilities/globalDefinitions.hpp"
  85 #include "utilities/stack.inline.hpp"
  86 
  87 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  88 
  89 // INVARIANTS/NOTES
  90 //
  91 // All allocation activity covered by the G1CollectedHeap interface is
  92 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  93 // and allocate_new_tlab, which are the "entry" points to the
  94 // allocation code from the rest of the JVM.  (Note that this does not
  95 // apply to TLAB allocation, which is not part of this interface: it
  96 // is done by clients of this interface.)
  97 
  98 // Local to this file.
  99 
 100 class RefineCardTableEntryClosure: public CardTableEntryClosure {
 101   bool _concurrent;
 102 public:
 103   RefineCardTableEntryClosure() : _concurrent(true) { }
 104 
 105   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 106     G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
 107 
 108     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 109       // Caller will actually yield.
 110       return false;
 111     }
 112     // Otherwise, we finished successfully; return true.
 113     return true;
 114   }
 115 
 116   void set_concurrent(bool b) { _concurrent = b; }
 117 };
 118 
 119 
 120 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 121  private:
 122   size_t _num_dirtied;
 123   G1CollectedHeap* _g1h;
 124   G1SATBCardTableLoggingModRefBS* _g1_bs;
 125 
 126   HeapRegion* region_for_card(jbyte* card_ptr) const {
 127     return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
 128   }
 129 
 130   bool will_become_free(HeapRegion* hr) const {
 131     // A region will be freed by free_collection_set if the region is in the
 132     // collection set and has not had an evacuation failure.
 133     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 134   }
 135 
 136  public:
 137   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
 138     _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 139 


1565   _verifier->verify_region_sets_optional();
1566 }
1567 
1568 // Public methods.
1569 
1570 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1571   CollectedHeap(),
1572   _collector_policy(collector_policy),
1573   _g1_policy(create_g1_policy()),
1574   _collection_set(this, _g1_policy),
1575   _dirty_card_queue_set(false),
1576   _is_alive_closure_cm(this),
1577   _is_alive_closure_stw(this),
1578   _ref_processor_cm(NULL),
1579   _ref_processor_stw(NULL),
1580   _bot(NULL),
1581   _hot_card_cache(NULL),
1582   _g1_rem_set(NULL),
1583   _cg1r(NULL),
1584   _g1mm(NULL),
1585   _refine_cte_cl(NULL),
1586   _preserved_marks_set(true /* in_c_heap */),
1587   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1588   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1589   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1590   _humongous_reclaim_candidates(),
1591   _has_humongous_reclaim_candidates(false),
1592   _archive_allocator(NULL),
1593   _free_regions_coming(false),
1594   _gc_time_stamp(0),
1595   _summary_bytes_used(0),
1596   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1597   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1598   _expand_heap_after_alloc_failure(true),
1599   _old_marking_cycles_started(0),
1600   _old_marking_cycles_completed(0),
1601   _in_cset_fast_test(),
1602   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1603   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1604 
1605   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1644   ReservedSpace rs(size, preferred_page_size);
1645   G1RegionToSpaceMapper* result  =
1646     G1RegionToSpaceMapper::create_mapper(rs,
1647                                          size,
1648                                          rs.alignment(),
1649                                          HeapRegion::GrainBytes,
1650                                          translation_factor,
1651                                          mtGC);
1652 
1653   os::trace_page_sizes_for_requested_size(description,
1654                                           size,
1655                                           preferred_page_size,
1656                                           rs.alignment(),
1657                                           rs.base(),
1658                                           rs.size());
1659 
1660   return result;
1661 }
1662 
1663 jint G1CollectedHeap::initialize_concurrent_refinement() {
1664   _refine_cte_cl = new RefineCardTableEntryClosure();
1665 
1666   jint ecode = JNI_OK;
1667   _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
1668   return ecode;
1669 }
1670 
1671 jint G1CollectedHeap::initialize() {
1672   CollectedHeap::pre_initialize();
1673   os::enable_vtime();
1674 
1675   // Necessary to satisfy locking discipline assertions.
1676 
1677   MutexLocker x(Heap_lock);
1678 
1679   // While there are no constraints in the GC code that HeapWordSize
1680   // be any particular value, there are multiple other areas in the
1681   // system which believe this to be true (e.g. oop->object_size in some
1682   // cases incorrectly returns the size in wordSize units rather than
1683   // HeapWordSize).
1684   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1685 
1686   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1687   size_t max_byte_size = collector_policy()->max_heap_byte_size();


1804 
1805   // Now expand into the initial heap size.
1806   if (!expand(init_byte_size, _workers)) {
1807     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1808     return JNI_ENOMEM;
1809   }
1810 
1811   // Perform any initialization actions delegated to the policy.
1812   g1_policy()->init(this, &_collection_set);
1813 
1814   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1815                                                SATB_Q_FL_lock,
1816                                                G1SATBProcessCompletedThreshold,
1817                                                Shared_SATB_Q_lock);
1818 
1819   jint ecode = initialize_concurrent_refinement();
1820   if (ecode != JNI_OK) {
1821     return ecode;
1822   }
1823   
1824   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1825                                                 DirtyCardQ_CBL_mon,
1826                                                 DirtyCardQ_FL_lock,
1827                                                 (int)concurrent_g1_refine()->yellow_zone(),
1828                                                 (int)concurrent_g1_refine()->red_zone(),
1829                                                 Shared_DirtyCardQ_lock,
1830                                                 NULL,  // fl_owner
1831                                                 true); // init_free_ids
1832 
1833   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1834                                     DirtyCardQ_CBL_mon,
1835                                     DirtyCardQ_FL_lock,
1836                                     -1, // never trigger processing
1837                                     -1, // no limit on length
1838                                     Shared_DirtyCardQ_lock,
1839                                     &JavaThread::dirty_card_queue_set());
1840 
1841   // Here we allocate the dummy HeapRegion that is required by the
1842   // G1AllocRegion class.
1843   HeapRegion* dummy_region = _hrm.get_dummy_region();
1844 
1845   // We'll re-use the same region whether the alloc region will
1846   // require BOT updates or not and, if it doesn't, then a non-young
1847   // region will complain that it cannot support allocations without
1848   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1849   dummy_region->set_eden();
1850   // Make sure it's full.
1851   dummy_region->set_top(dummy_region->end());
1852   G1AllocRegion::setup(this, dummy_region);
1853 
1854   _allocator->init_mutator_alloc_region();


1991     return false;
1992   }
1993 
1994   bool failures() { return _failures; }
1995 };
1996 
1997 void G1CollectedHeap::check_gc_time_stamps() {
1998   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
1999   heap_region_iterate(&cl);
2000   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2001 }
2002 #endif // PRODUCT
2003 
2004 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2005   _hot_card_cache->drain(cl, worker_i);
2006 }
2007 
2008 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
2009   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2010   size_t n_completed_buffers = 0;
2011   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2012     n_completed_buffers++;
2013   }
2014   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2015   dcqs.clear_n_completed_buffers();
2016   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2017 }
2018 
2019 // Computes the sum of the storage used by the various regions.
2020 size_t G1CollectedHeap::used() const {
2021   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2022   if (_archive_allocator != NULL) {
2023     result += _archive_allocator->used();
2024   }
2025   return result;
2026 }
2027 
2028 size_t G1CollectedHeap::used_unlocked() const {
2029   return _summary_bytes_used;
2030 }
2031 


5148   assert_at_safepoint(true /* should_be_vm_thread */);
5149 
5150   if (!free_list_only) {
5151     _eden.clear();
5152     _survivor.clear();
5153   }
5154 
5155   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
5156   heap_region_iterate(&cl);
5157 
5158   if (!free_list_only) {
5159     set_used(cl.total_used());
5160     if (_archive_allocator != NULL) {
5161       _archive_allocator->clear_used();
5162     }
5163   }
5164   assert(used_unlocked() == recalculate_used(),
5165          "inconsistent used_unlocked(), "
5166          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
5167          used_unlocked(), recalculate_used());
5168 }
5169 
5170 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5171   _refine_cte_cl->set_concurrent(concurrent);
5172 }
5173 
5174 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5175   HeapRegion* hr = heap_region_containing(p);
5176   return hr->is_in(p);
5177 }
5178 
5179 // Methods for the mutator alloc region
5180 
5181 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5182                                                       bool force) {
5183   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5184   bool should_allocate = g1_policy()->should_allocate_mutator_region();
5185   if (force || should_allocate) {
5186     HeapRegion* new_alloc_region = new_region(word_size,
5187                                               false /* is_old */,
5188                                               false /* do_expand */);
5189     if (new_alloc_region != NULL) {
5190       set_region_short_lived_locked(new_alloc_region);
5191       _hr_printer.alloc(new_alloc_region, !should_allocate);




  78 #include "prims/resolvedMethodTable.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/init.hpp"
  81 #include "runtime/orderAccess.inline.hpp"
  82 #include "runtime/vmThread.hpp"
  83 #include "utilities/align.hpp"
  84 #include "utilities/globalDefinitions.hpp"
  85 #include "utilities/stack.inline.hpp"
  86 
  87 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  88 
  89 // INVARIANTS/NOTES
  90 //
  91 // All allocation activity covered by the G1CollectedHeap interface is
  92 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  93 // and allocate_new_tlab, which are the "entry" points to the
  94 // allocation code from the rest of the JVM.  (Note that this does not
  95 // apply to TLAB allocation, which is not part of this interface: it
  96 // is done by clients of this interface.)
  97 






















  98 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
  99  private:
 100   size_t _num_dirtied;
 101   G1CollectedHeap* _g1h;
 102   G1SATBCardTableLoggingModRefBS* _g1_bs;
 103 
 104   HeapRegion* region_for_card(jbyte* card_ptr) const {
 105     return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
 106   }
 107 
 108   bool will_become_free(HeapRegion* hr) const {
 109     // A region will be freed by free_collection_set if the region is in the
 110     // collection set and has not had an evacuation failure.
 111     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 112   }
 113 
 114  public:
 115   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
 116     _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 117 


1543   _verifier->verify_region_sets_optional();
1544 }
1545 
1546 // Public methods.
1547 
1548 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1549   CollectedHeap(),
1550   _collector_policy(collector_policy),
1551   _g1_policy(create_g1_policy()),
1552   _collection_set(this, _g1_policy),
1553   _dirty_card_queue_set(false),
1554   _is_alive_closure_cm(this),
1555   _is_alive_closure_stw(this),
1556   _ref_processor_cm(NULL),
1557   _ref_processor_stw(NULL),
1558   _bot(NULL),
1559   _hot_card_cache(NULL),
1560   _g1_rem_set(NULL),
1561   _cg1r(NULL),
1562   _g1mm(NULL),

1563   _preserved_marks_set(true /* in_c_heap */),
1564   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1565   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1566   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1567   _humongous_reclaim_candidates(),
1568   _has_humongous_reclaim_candidates(false),
1569   _archive_allocator(NULL),
1570   _free_regions_coming(false),
1571   _gc_time_stamp(0),
1572   _summary_bytes_used(0),
1573   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1574   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1575   _expand_heap_after_alloc_failure(true),
1576   _old_marking_cycles_started(0),
1577   _old_marking_cycles_completed(0),
1578   _in_cset_fast_test(),
1579   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1580   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1581 
1582   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1621   ReservedSpace rs(size, preferred_page_size);
1622   G1RegionToSpaceMapper* result  =
1623     G1RegionToSpaceMapper::create_mapper(rs,
1624                                          size,
1625                                          rs.alignment(),
1626                                          HeapRegion::GrainBytes,
1627                                          translation_factor,
1628                                          mtGC);
1629 
1630   os::trace_page_sizes_for_requested_size(description,
1631                                           size,
1632                                           preferred_page_size,
1633                                           rs.alignment(),
1634                                           rs.base(),
1635                                           rs.size());
1636 
1637   return result;
1638 }
1639 
1640 jint G1CollectedHeap::initialize_concurrent_refinement() {


1641   jint ecode = JNI_OK;
1642   _cg1r = ConcurrentG1Refine::create(&ecode);
1643   return ecode;
1644 }
1645 
1646 jint G1CollectedHeap::initialize() {
1647   CollectedHeap::pre_initialize();
1648   os::enable_vtime();
1649 
1650   // Necessary to satisfy locking discipline assertions.
1651 
1652   MutexLocker x(Heap_lock);
1653 
1654   // While there are no constraints in the GC code that HeapWordSize
1655   // be any particular value, there are multiple other areas in the
1656   // system which believe this to be true (e.g. oop->object_size in some
1657   // cases incorrectly returns the size in wordSize units rather than
1658   // HeapWordSize).
1659   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1660 
1661   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1662   size_t max_byte_size = collector_policy()->max_heap_byte_size();


1779 
1780   // Now expand into the initial heap size.
1781   if (!expand(init_byte_size, _workers)) {
1782     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1783     return JNI_ENOMEM;
1784   }
1785 
1786   // Perform any initialization actions delegated to the policy.
1787   g1_policy()->init(this, &_collection_set);
1788 
1789   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1790                                                SATB_Q_FL_lock,
1791                                                G1SATBProcessCompletedThreshold,
1792                                                Shared_SATB_Q_lock);
1793 
1794   jint ecode = initialize_concurrent_refinement();
1795   if (ecode != JNI_OK) {
1796     return ecode;
1797   }
1798   
1799   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,

1800                                                 DirtyCardQ_FL_lock,
1801                                                 (int)concurrent_g1_refine()->yellow_zone(),
1802                                                 (int)concurrent_g1_refine()->red_zone(),
1803                                                 Shared_DirtyCardQ_lock,
1804                                                 NULL,  // fl_owner
1805                                                 true); // init_free_ids
1806 
1807   dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,

1808                                     DirtyCardQ_FL_lock,
1809                                     -1, // never trigger processing
1810                                     -1, // no limit on length
1811                                     Shared_DirtyCardQ_lock,
1812                                     &JavaThread::dirty_card_queue_set());
1813 
1814   // Here we allocate the dummy HeapRegion that is required by the
1815   // G1AllocRegion class.
1816   HeapRegion* dummy_region = _hrm.get_dummy_region();
1817 
1818   // We'll re-use the same region whether the alloc region will
1819   // require BOT updates or not and, if it doesn't, then a non-young
1820   // region will complain that it cannot support allocations without
1821   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1822   dummy_region->set_eden();
1823   // Make sure it's full.
1824   dummy_region->set_top(dummy_region->end());
1825   G1AllocRegion::setup(this, dummy_region);
1826 
1827   _allocator->init_mutator_alloc_region();


1964     return false;
1965   }
1966 
1967   bool failures() { return _failures; }
1968 };
1969 
1970 void G1CollectedHeap::check_gc_time_stamps() {
1971   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
1972   heap_region_iterate(&cl);
1973   guarantee(!cl.failures(), "all GC time stamps should have been reset");
1974 }
1975 #endif // PRODUCT
1976 
1977 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1978   _hot_card_cache->drain(cl, worker_i);
1979 }
1980 
1981 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1982   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1983   size_t n_completed_buffers = 0;
1984   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1985     n_completed_buffers++;
1986   }
1987   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
1988   dcqs.clear_n_completed_buffers();
1989   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1990 }
1991 
1992 // Computes the sum of the storage used by the various regions.
1993 size_t G1CollectedHeap::used() const {
1994   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1995   if (_archive_allocator != NULL) {
1996     result += _archive_allocator->used();
1997   }
1998   return result;
1999 }
2000 
2001 size_t G1CollectedHeap::used_unlocked() const {
2002   return _summary_bytes_used;
2003 }
2004 


5121   assert_at_safepoint(true /* should_be_vm_thread */);
5122 
5123   if (!free_list_only) {
5124     _eden.clear();
5125     _survivor.clear();
5126   }
5127 
5128   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
5129   heap_region_iterate(&cl);
5130 
5131   if (!free_list_only) {
5132     set_used(cl.total_used());
5133     if (_archive_allocator != NULL) {
5134       _archive_allocator->clear_used();
5135     }
5136   }
5137   assert(used_unlocked() == recalculate_used(),
5138          "inconsistent used_unlocked(), "
5139          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
5140          used_unlocked(), recalculate_used());




5141 }
5142 
5143 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5144   HeapRegion* hr = heap_region_containing(p);
5145   return hr->is_in(p);
5146 }
5147 
5148 // Methods for the mutator alloc region
5149 
5150 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5151                                                       bool force) {
5152   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5153   bool should_allocate = g1_policy()->should_allocate_mutator_region();
5154   if (force || should_allocate) {
5155     HeapRegion* new_alloc_region = new_region(word_size,
5156                                               false /* is_old */,
5157                                               false /* do_expand */);
5158     if (new_alloc_region != NULL) {
5159       set_region_short_lived_locked(new_alloc_region);
5160       _hr_printer.alloc(new_alloc_region, !should_allocate);


< prev index next >