< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13232 : imported patch parallel-fullgc-stefanj
rev 13237 : imported patch 8183226-periodic-rem-set-summary-accesses-uninitialized-stuff
rev 13238 : imported patch 8183226-eridk-sjohanss-review
rev 13239 : imported patch 8183128-cleanup-refinecardtableentryclosure


  78 #include "prims/resolvedMethodTable.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/init.hpp"
  81 #include "runtime/orderAccess.inline.hpp"
  82 #include "runtime/vmThread.hpp"
  83 #include "utilities/align.hpp"
  84 #include "utilities/globalDefinitions.hpp"
  85 #include "utilities/stack.inline.hpp"
  86 
  87 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  88 
  89 // INVARIANTS/NOTES
  90 //
  91 // All allocation activity covered by the G1CollectedHeap interface is
  92 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  93 // and allocate_new_tlab, which are the "entry" points to the
  94 // allocation code from the rest of the JVM.  (Note that this does not
  95 // apply to TLAB allocation, which is not part of this interface: it
  96 // is done by clients of this interface.)
  97 
  98 // Local to this file.
  99 
 100 class RefineCardTableEntryClosure: public CardTableEntryClosure {
 101   bool _concurrent;
 102 public:
 103   RefineCardTableEntryClosure() : _concurrent(true) { }
 104 
 105   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 106     G1CollectedHeap::heap()->g1_rem_set()->refine_card_concurrently(card_ptr, worker_i);
 107 
 108     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 109       // Caller will actually yield.
 110       return false;
 111     }
 112     // Otherwise, we finished successfully; return true.
 113     return true;
 114   }
 115 
 116   void set_concurrent(bool b) { _concurrent = b; }
 117 };
 118 
 119 
 120 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 121  private:
 122   size_t _num_dirtied;
 123   G1CollectedHeap* _g1h;
 124   G1SATBCardTableLoggingModRefBS* _g1_bs;
 125 
 126   HeapRegion* region_for_card(jbyte* card_ptr) const {
 127     return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
 128   }
 129 
 130   bool will_become_free(HeapRegion* hr) const {
 131     // A region will be freed by free_collection_set if the region is in the
 132     // collection set and has not had an evacuation failure.
 133     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 134   }
 135 
 136  public:
 137   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
 138     _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 139 


1565   _verifier->verify_region_sets_optional();
1566 }
1567 
1568 // Public methods.
1569 
1570 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1571   CollectedHeap(),
1572   _collector_policy(collector_policy),
1573   _g1_policy(create_g1_policy()),
1574   _collection_set(this, _g1_policy),
1575   _dirty_card_queue_set(false),
1576   _is_alive_closure_cm(this),
1577   _is_alive_closure_stw(this),
1578   _ref_processor_cm(NULL),
1579   _ref_processor_stw(NULL),
1580   _bot(NULL),
1581   _hot_card_cache(NULL),
1582   _g1_rem_set(NULL),
1583   _cg1r(NULL),
1584   _g1mm(NULL),
1585   _refine_cte_cl(NULL),
1586   _preserved_marks_set(true /* in_c_heap */),
1587   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1588   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1589   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1590   _humongous_reclaim_candidates(),
1591   _has_humongous_reclaim_candidates(false),
1592   _archive_allocator(NULL),
1593   _free_regions_coming(false),
1594   _gc_time_stamp(0),
1595   _summary_bytes_used(0),
1596   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1597   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1598   _expand_heap_after_alloc_failure(true),
1599   _old_marking_cycles_started(0),
1600   _old_marking_cycles_completed(0),
1601   _in_cset_fast_test(),
1602   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1603   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1604 
1605   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1644   ReservedSpace rs(size, preferred_page_size);
1645   G1RegionToSpaceMapper* result  =
1646     G1RegionToSpaceMapper::create_mapper(rs,
1647                                          size,
1648                                          rs.alignment(),
1649                                          HeapRegion::GrainBytes,
1650                                          translation_factor,
1651                                          mtGC);
1652 
1653   os::trace_page_sizes_for_requested_size(description,
1654                                           size,
1655                                           preferred_page_size,
1656                                           rs.alignment(),
1657                                           rs.base(),
1658                                           rs.size());
1659 
1660   return result;
1661 }
1662 
1663 jint G1CollectedHeap::initialize_concurrent_refinement() {
1664   _refine_cte_cl = new RefineCardTableEntryClosure();
1665 
1666   jint ecode = JNI_OK;
1667   _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
1668   return ecode;
1669 }
1670 
1671 jint G1CollectedHeap::initialize() {
1672   CollectedHeap::pre_initialize();
1673   os::enable_vtime();
1674 
1675   // Necessary to satisfy locking discipline assertions.
1676 
1677   MutexLocker x(Heap_lock);
1678 
1679   // While there are no constraints in the GC code that HeapWordSize
1680   // be any particular value, there are multiple other areas in the
1681   // system which believe this to be true (e.g. oop->object_size in some
1682   // cases incorrectly returns the size in wordSize units rather than
1683   // HeapWordSize).
1684   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1685 
1686   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1687   size_t max_byte_size = collector_policy()->max_heap_byte_size();


1804 
1805   // Now expand into the initial heap size.
1806   if (!expand(init_byte_size, _workers)) {
1807     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1808     return JNI_ENOMEM;
1809   }
1810 
1811   // Perform any initialization actions delegated to the policy.
1812   g1_policy()->init(this, &_collection_set);
1813 
1814   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1815                                                SATB_Q_FL_lock,
1816                                                G1SATBProcessCompletedThreshold,
1817                                                Shared_SATB_Q_lock);
1818 
1819   jint ecode = initialize_concurrent_refinement();
1820   if (ecode != JNI_OK) {
1821     return ecode;
1822   }
1823   
1824   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1825                                                 DirtyCardQ_CBL_mon,
1826                                                 DirtyCardQ_FL_lock,
1827                                                 (int)concurrent_g1_refine()->yellow_zone(),
1828                                                 (int)concurrent_g1_refine()->red_zone(),
1829                                                 Shared_DirtyCardQ_lock,
1830                                                 NULL,  // fl_owner
1831                                                 true); // init_free_ids
1832 
1833   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1834                                     DirtyCardQ_CBL_mon,
1835                                     DirtyCardQ_FL_lock,
1836                                     -1, // never trigger processing
1837                                     -1, // no limit on length
1838                                     Shared_DirtyCardQ_lock,
1839                                     &JavaThread::dirty_card_queue_set());
1840 
1841   // Here we allocate the dummy HeapRegion that is required by the
1842   // G1AllocRegion class.
1843   HeapRegion* dummy_region = _hrm.get_dummy_region();
1844 


5148   assert_at_safepoint(true /* should_be_vm_thread */);
5149 
5150   if (!free_list_only) {
5151     _eden.clear();
5152     _survivor.clear();
5153   }
5154 
5155   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
5156   heap_region_iterate(&cl);
5157 
5158   if (!free_list_only) {
5159     set_used(cl.total_used());
5160     if (_archive_allocator != NULL) {
5161       _archive_allocator->clear_used();
5162     }
5163   }
5164   assert(used_unlocked() == recalculate_used(),
5165          "inconsistent used_unlocked(), "
5166          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
5167          used_unlocked(), recalculate_used());
5168 }
5169 
5170 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5171   _refine_cte_cl->set_concurrent(concurrent);
5172 }
5173 
5174 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5175   HeapRegion* hr = heap_region_containing(p);
5176   return hr->is_in(p);
5177 }
5178 
5179 // Methods for the mutator alloc region
5180 
5181 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5182                                                       bool force) {
5183   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5184   bool should_allocate = g1_policy()->should_allocate_mutator_region();
5185   if (force || should_allocate) {
5186     HeapRegion* new_alloc_region = new_region(word_size,
5187                                               false /* is_old */,
5188                                               false /* do_expand */);
5189     if (new_alloc_region != NULL) {
5190       set_region_short_lived_locked(new_alloc_region);
5191       _hr_printer.alloc(new_alloc_region, !should_allocate);




  78 #include "prims/resolvedMethodTable.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/init.hpp"
  81 #include "runtime/orderAccess.inline.hpp"
  82 #include "runtime/vmThread.hpp"
  83 #include "utilities/align.hpp"
  84 #include "utilities/globalDefinitions.hpp"
  85 #include "utilities/stack.inline.hpp"
  86 
  87 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  88 
  89 // INVARIANTS/NOTES
  90 //
  91 // All allocation activity covered by the G1CollectedHeap interface is
  92 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  93 // and allocate_new_tlab, which are the "entry" points to the
  94 // allocation code from the rest of the JVM.  (Note that this does not
  95 // apply to TLAB allocation, which is not part of this interface: it
  96 // is done by clients of this interface.)
  97 






















  98 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
  99  private:
 100   size_t _num_dirtied;
 101   G1CollectedHeap* _g1h;
 102   G1SATBCardTableLoggingModRefBS* _g1_bs;
 103 
 104   HeapRegion* region_for_card(jbyte* card_ptr) const {
 105     return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
 106   }
 107 
 108   bool will_become_free(HeapRegion* hr) const {
 109     // A region will be freed by free_collection_set if the region is in the
 110     // collection set and has not had an evacuation failure.
 111     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 112   }
 113 
 114  public:
 115   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
 116     _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
 117 


1543   _verifier->verify_region_sets_optional();
1544 }
1545 
1546 // Public methods.
1547 
1548 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1549   CollectedHeap(),
1550   _collector_policy(collector_policy),
1551   _g1_policy(create_g1_policy()),
1552   _collection_set(this, _g1_policy),
1553   _dirty_card_queue_set(false),
1554   _is_alive_closure_cm(this),
1555   _is_alive_closure_stw(this),
1556   _ref_processor_cm(NULL),
1557   _ref_processor_stw(NULL),
1558   _bot(NULL),
1559   _hot_card_cache(NULL),
1560   _g1_rem_set(NULL),
1561   _cg1r(NULL),
1562   _g1mm(NULL),

1563   _preserved_marks_set(true /* in_c_heap */),
1564   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1565   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1566   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1567   _humongous_reclaim_candidates(),
1568   _has_humongous_reclaim_candidates(false),
1569   _archive_allocator(NULL),
1570   _free_regions_coming(false),
1571   _gc_time_stamp(0),
1572   _summary_bytes_used(0),
1573   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1574   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1575   _expand_heap_after_alloc_failure(true),
1576   _old_marking_cycles_started(0),
1577   _old_marking_cycles_completed(0),
1578   _in_cset_fast_test(),
1579   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1580   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1581 
1582   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1621   ReservedSpace rs(size, preferred_page_size);
1622   G1RegionToSpaceMapper* result  =
1623     G1RegionToSpaceMapper::create_mapper(rs,
1624                                          size,
1625                                          rs.alignment(),
1626                                          HeapRegion::GrainBytes,
1627                                          translation_factor,
1628                                          mtGC);
1629 
1630   os::trace_page_sizes_for_requested_size(description,
1631                                           size,
1632                                           preferred_page_size,
1633                                           rs.alignment(),
1634                                           rs.base(),
1635                                           rs.size());
1636 
1637   return result;
1638 }
1639 
1640 jint G1CollectedHeap::initialize_concurrent_refinement() {


1641   jint ecode = JNI_OK;
1642   _cg1r = ConcurrentG1Refine::create(_g1_rem_set->refine_card_concurrently_closure(), &ecode);
1643   return ecode;
1644 }
1645 
1646 jint G1CollectedHeap::initialize() {
1647   CollectedHeap::pre_initialize();
1648   os::enable_vtime();
1649 
1650   // Necessary to satisfy locking discipline assertions.
1651 
1652   MutexLocker x(Heap_lock);
1653 
1654   // While there are no constraints in the GC code that HeapWordSize
1655   // be any particular value, there are multiple other areas in the
1656   // system which believe this to be true (e.g. oop->object_size in some
1657   // cases incorrectly returns the size in wordSize units rather than
1658   // HeapWordSize).
1659   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1660 
1661   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1662   size_t max_byte_size = collector_policy()->max_heap_byte_size();


1779 
1780   // Now expand into the initial heap size.
1781   if (!expand(init_byte_size, _workers)) {
1782     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1783     return JNI_ENOMEM;
1784   }
1785 
1786   // Perform any initialization actions delegated to the policy.
1787   g1_policy()->init(this, &_collection_set);
1788 
1789   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1790                                                SATB_Q_FL_lock,
1791                                                G1SATBProcessCompletedThreshold,
1792                                                Shared_SATB_Q_lock);
1793 
1794   jint ecode = initialize_concurrent_refinement();
1795   if (ecode != JNI_OK) {
1796     return ecode;
1797   }
1798   
1799   JavaThread::dirty_card_queue_set().initialize(_g1_rem_set->refine_card_concurrently_closure(),
1800                                                 DirtyCardQ_CBL_mon,
1801                                                 DirtyCardQ_FL_lock,
1802                                                 (int)concurrent_g1_refine()->yellow_zone(),
1803                                                 (int)concurrent_g1_refine()->red_zone(),
1804                                                 Shared_DirtyCardQ_lock,
1805                                                 NULL,  // fl_owner
1806                                                 true); // init_free_ids
1807 
1808   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1809                                     DirtyCardQ_CBL_mon,
1810                                     DirtyCardQ_FL_lock,
1811                                     -1, // never trigger processing
1812                                     -1, // no limit on length
1813                                     Shared_DirtyCardQ_lock,
1814                                     &JavaThread::dirty_card_queue_set());
1815 
1816   // Here we allocate the dummy HeapRegion that is required by the
1817   // G1AllocRegion class.
1818   HeapRegion* dummy_region = _hrm.get_dummy_region();
1819 


5123   assert_at_safepoint(true /* should_be_vm_thread */);
5124 
5125   if (!free_list_only) {
5126     _eden.clear();
5127     _survivor.clear();
5128   }
5129 
5130   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
5131   heap_region_iterate(&cl);
5132 
5133   if (!free_list_only) {
5134     set_used(cl.total_used());
5135     if (_archive_allocator != NULL) {
5136       _archive_allocator->clear_used();
5137     }
5138   }
5139   assert(used_unlocked() == recalculate_used(),
5140          "inconsistent used_unlocked(), "
5141          "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
5142          used_unlocked(), recalculate_used());




5143 }
5144 
5145 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5146   HeapRegion* hr = heap_region_containing(p);
5147   return hr->is_in(p);
5148 }
5149 
5150 // Methods for the mutator alloc region
5151 
5152 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5153                                                       bool force) {
5154   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5155   bool should_allocate = g1_policy()->should_allocate_mutator_region();
5156   if (force || should_allocate) {
5157     HeapRegion* new_alloc_region = new_region(word_size,
5158                                               false /* is_old */,
5159                                               false /* do_expand */);
5160     if (new_alloc_region != NULL) {
5161       set_region_short_lived_locked(new_alloc_region);
5162       _hr_printer.alloc(new_alloc_region, !should_allocate);


< prev index next >