src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 6323 : 8027553: Change the in_cset_fast_test functionality to use the G1BiasedArray abstraction
Summary: Instead of using a manually managed array for the in_cset_fast_test array, use a G1BiasedArray instance.
Reviewed-by: brutisso, mgerdin
rev 6326 : 8028710: G1 does not retire allocation buffers after reference processing work
Summary: G1 does not retire allocation buffers after reference processing work when -XX:+ParallelRefProcEnabled is enabled. This causes wrong calculation of PLAB sizes, as the amount of space wasted is not updated correctly.
Reviewed-by: brutisso
rev 6334 : 8035400: Move G1ParScanThreadState into its own files
Summary: Extract the G1ParScanThreadState class from G1CollectedHeap.?pp into its own files.
Reviewed-by: brutisso, mgerdin


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/evacuationInfo.hpp"
  30 #include "gc_implementation/g1/g1AllocRegion.hpp"
  31 #include "gc_implementation/g1/g1BiasedArray.hpp"
  32 #include "gc_implementation/g1/g1HRPrinter.hpp"
  33 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
  34 #include "gc_implementation/g1/g1RemSet.hpp"
  35 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  36 #include "gc_implementation/g1/g1YCTypes.hpp"
  37 #include "gc_implementation/g1/heapRegionSeq.hpp"
  38 #include "gc_implementation/g1/heapRegionSet.hpp"
  39 #include "gc_implementation/shared/hSpaceCounters.hpp"
  40 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  41 #include "memory/barrierSet.hpp"
  42 #include "memory/memRegion.hpp"
  43 #include "memory/sharedHeap.hpp"
  44 #include "utilities/stack.hpp"
  45 
  46 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  47 // It uses the "Garbage First" heap organization and algorithm, which
  48 // may combine concurrent marking with parallel, incremental compaction of
  49 // heap subsets that will yield large amounts of garbage.
  50 
  51 // Forward declarations
  52 class HeapRegion;
  53 class HRRSCleanupTask;
  54 class GenerationSpec;


1692 public:
1693   G1ParGCAllocBuffer(size_t gclab_word_size);
1694   virtual ~G1ParGCAllocBuffer() {
1695     guarantee(_retired, "Allocation buffer has not been retired");
1696   }
1697 
1698   virtual void set_buf(HeapWord* buf) {
1699     ParGCAllocBuffer::set_buf(buf);
1700     _retired = false;
1701   }
1702 
1703   virtual void retire(bool end_of_gc, bool retain) {
1704     if (_retired) {
1705       return;
1706     }
1707     ParGCAllocBuffer::retire(end_of_gc, retain);
1708     _retired = true;
1709   }
1710 };
1711 
1712 class G1ParScanThreadState : public StackObj {
1713 protected:
1714   G1CollectedHeap* _g1h;
1715   RefToScanQueue*  _refs;
1716   DirtyCardQueue   _dcq;
1717   G1SATBCardTableModRefBS* _ct_bs;
1718   G1RemSet* _g1_rem;
1719 
1720   G1ParGCAllocBuffer  _surviving_alloc_buffer;
1721   G1ParGCAllocBuffer  _tenured_alloc_buffer;
1722   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1723   ageTable            _age_table;
1724 
1725   G1ParScanClosure    _scanner;
1726 
1727   size_t           _alloc_buffer_waste;
1728   size_t           _undo_waste;
1729 
1730   OopsInHeapRegionClosure*      _evac_failure_cl;
1731 
1732   int  _hash_seed;
1733   uint _queue_num;
1734 
1735   size_t _term_attempts;
1736 
1737   double _start;
1738   double _start_strong_roots;
1739   double _strong_roots_time;
1740   double _start_term;
1741   double _term_time;
1742 
1743   // Map from young-age-index (0 == not young, 1 is youngest) to
1744   // surviving words. base is what we get back from the malloc call
1745   size_t* _surviving_young_words_base;
1746   // this points into the array, as we use the first few entries for padding
1747   size_t* _surviving_young_words;
1748 
1749 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1750 
1751   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1752 
1753   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
1754 
1755   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
1756   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
1757 
1758   template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
1759 
1760   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1761     // If the new value of the field points to the same region or
1762     // is the to-space, we don't need to include it in the Rset updates.
1763     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1764       size_t card_index = ctbs()->index_for(p);
1765       // If the card hasn't been added to the buffer, do it.
1766       if (ctbs()->mark_card_deferred(card_index)) {
1767         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1768       }
1769     }
1770   }
1771 
1772 public:
1773   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1774 
1775   ~G1ParScanThreadState() {
1776     retire_alloc_buffers();
1777     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1778   }
1779 
1780   RefToScanQueue*   refs()            { return _refs;             }
1781   ageTable*         age_table()       { return &_age_table;       }
1782 
1783   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1784     return _alloc_buffers[purpose];
1785   }
1786 
1787   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1788   size_t undo_waste() const                      { return _undo_waste; }
1789 
1790 #ifdef ASSERT
1791   bool verify_ref(narrowOop* ref) const;
1792   bool verify_ref(oop* ref) const;
1793   bool verify_task(StarTask ref) const;
1794 #endif // ASSERT
1795 
1796   template <class T> void push_on_queue(T* ref) {
1797     assert(verify_ref(ref), "sanity");
1798     refs()->push(ref);
1799   }
1800 
1801   template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
1802 
1803   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1804     HeapWord* obj = NULL;
1805     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1806     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1807       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1808       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1809       alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1810 
1811       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1812       if (buf == NULL) return NULL; // Let caller handle allocation failure.
1813       // Otherwise.
1814       alloc_buf->set_word_size(gclab_word_size);
1815       alloc_buf->set_buf(buf);
1816 
1817       obj = alloc_buf->allocate(word_sz);
1818       assert(obj != NULL, "buffer was definitely big enough...");
1819     } else {
1820       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1821     }
1822     return obj;
1823   }
1824 
1825   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1826     HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1827     if (obj != NULL) return obj;
1828     return allocate_slow(purpose, word_sz);
1829   }
1830 
1831   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1832     if (alloc_buffer(purpose)->contains(obj)) {
1833       assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1834              "should contain whole object");
1835       alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1836     } else {
1837       CollectedHeap::fill_with_object(obj, word_sz);
1838       add_to_undo_waste(word_sz);
1839     }
1840   }
1841 
1842   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1843     _evac_failure_cl = evac_failure_cl;
1844   }
1845   OopsInHeapRegionClosure* evac_failure_closure() {
1846     return _evac_failure_cl;
1847   }
1848 
1849   int* hash_seed() { return &_hash_seed; }
1850   uint queue_num() { return _queue_num; }
1851 
1852   size_t term_attempts() const  { return _term_attempts; }
1853   void note_term_attempt() { _term_attempts++; }
1854 
1855   void start_strong_roots() {
1856     _start_strong_roots = os::elapsedTime();
1857   }
1858   void end_strong_roots() {
1859     _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
1860   }
1861   double strong_roots_time() const { return _strong_roots_time; }
1862 
1863   void start_term_time() {
1864     note_term_attempt();
1865     _start_term = os::elapsedTime();
1866   }
1867   void end_term_time() {
1868     _term_time += (os::elapsedTime() - _start_term);
1869   }
1870   double term_time() const { return _term_time; }
1871 
1872   double elapsed_time() const {
1873     return os::elapsedTime() - _start;
1874   }
1875 
1876   static void
1877     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1878   void
1879     print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1880 
1881   size_t* surviving_young_words() {
1882     // We add on to hide entry 0 which accumulates surviving words for
1883     // age -1 regions (i.e. non-young ones)
1884     return _surviving_young_words;
1885   }
1886 
1887 private:
1888   void retire_alloc_buffers() {
1889     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1890       size_t waste = _alloc_buffers[ap]->words_remaining();
1891       add_to_alloc_buffer_waste(waste);
1892       _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1893                                                  true /* end_of_gc */,
1894                                                  false /* retain */);
1895     }
1896   }
1897 
1898 #define G1_PARTIAL_ARRAY_MASK 0x2
1899 
1900   inline bool has_partial_array_mask(oop* ref) const {
1901     return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
1902   }
1903 
1904   // We never encode partial array oops as narrowOop*, so return false immediately.
1905   // This allows the compiler to create optimized code when popping references from
1906   // the work queue.
1907   inline bool has_partial_array_mask(narrowOop* ref) const {
1908     assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1909     return false;
1910   }
1911 
1912   // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1913   // We always encode partial arrays as regular oop, to allow the
1914   // specialization for has_partial_array_mask() for narrowOops above.
1915   // This means that unintentional use of this method with narrowOops are caught
1916   // by the compiler.
1917   inline oop* set_partial_array_mask(oop obj) const {
1918     assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
1919     return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
1920   }
1921 
1922   inline oop clear_partial_array_mask(oop* ref) const {
1923     return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
1924   }
1925 
1926   inline void do_oop_partial_array(oop* p);
1927 
1928   // This method is applied to the fields of the objects that have just been copied.
1929   template <class T> void do_oop_evac(T* p, HeapRegion* from) {
1930     assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
1931            "Reference should not be NULL here as such are never pushed to the task queue.");
1932     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1933 
1934     // Although we never intentionally push references outside of the collection
1935     // set, due to (benign) races in the claim mechanism during RSet scanning more
1936     // than one thread might claim the same card. So the same card may be
1937     // processed multiple times. So redo this check.
1938     if (_g1h->in_cset_fast_test(obj)) {
1939       oop forwardee;
1940       if (obj->is_forwarded()) {
1941         forwardee = obj->forwardee();
1942       } else {
1943         forwardee = copy_to_survivor_space(obj);
1944       }
1945       assert(forwardee != NULL, "forwardee should not be NULL");
1946       oopDesc::encode_store_heap_oop(p, forwardee);
1947     }
1948 
1949     assert(obj != NULL, "Must be");
1950     update_rs(from, p, queue_num());
1951   }
1952 public:
1953 
1954   oop copy_to_survivor_space(oop const obj);
1955 
1956   template <class T> inline void deal_with_reference(T* ref_to_scan);
1957 
1958   inline void deal_with_reference(StarTask ref);
1959 
1960 public:
1961   void trim_queue();
1962 };
1963 
1964 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/evacuationInfo.hpp"
  30 #include "gc_implementation/g1/g1AllocRegion.hpp"
  31 #include "gc_implementation/g1/g1BiasedArray.hpp"
  32 #include "gc_implementation/g1/g1HRPrinter.hpp"
  33 #include "gc_implementation/g1/g1MonitoringSupport.hpp"

  34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  35 #include "gc_implementation/g1/g1YCTypes.hpp"
  36 #include "gc_implementation/g1/heapRegionSeq.hpp"
  37 #include "gc_implementation/g1/heapRegionSet.hpp"
  38 #include "gc_implementation/shared/hSpaceCounters.hpp"
  39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  40 #include "memory/barrierSet.hpp"
  41 #include "memory/memRegion.hpp"
  42 #include "memory/sharedHeap.hpp"
  43 #include "utilities/stack.hpp"
  44 
  45 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  46 // It uses the "Garbage First" heap organization and algorithm, which
  47 // may combine concurrent marking with parallel, incremental compaction of
  48 // heap subsets that will yield large amounts of garbage.
  49 
  50 // Forward declarations
  51 class HeapRegion;
  52 class HRRSCleanupTask;
  53 class GenerationSpec;


1691 public:
1692   G1ParGCAllocBuffer(size_t gclab_word_size);
1693   virtual ~G1ParGCAllocBuffer() {
1694     guarantee(_retired, "Allocation buffer has not been retired");
1695   }
1696 
1697   virtual void set_buf(HeapWord* buf) {
1698     ParGCAllocBuffer::set_buf(buf);
1699     _retired = false;
1700   }
1701 
1702   virtual void retire(bool end_of_gc, bool retain) {
1703     if (_retired) {
1704       return;
1705     }
1706     ParGCAllocBuffer::retire(end_of_gc, retain);
1707     _retired = true;
1708   }
1709 };
1710 




























































































































































































































































1711 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP