src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 6323 : 8027553: Change the in_cset_fast_test functionality to use the G1BiasedArray abstraction
Summary: Instead of using a manually managed array for the in_cset_fast_test array, use a G1BiasedArray instance.
Reviewed-by: brutisso, mgerdin
rev 6326 : 8028710: G1 does not retire allocation buffers after reference processing work
Summary: G1 does not retire allocation buffers after reference processing work when -XX:+ParallelRefProcEnabled is enabled. This causes wrong calculation of PLAB sizes, as the amount of space wasted is not updated correctly.
Reviewed-by: brutisso


1674   void print_tracing_info() const;
1675 
1676   // The following two methods are helpful for debugging RSet issues.
1677   void print_cset_rsets() PRODUCT_RETURN;
1678   void print_all_rsets() PRODUCT_RETURN;
1679 
1680 public:
1681   size_t pending_card_num();
1682   size_t cards_scanned();
1683 
1684 protected:
1685   size_t _max_heap_capacity;
1686 };
1687 
1688 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1689 private:
1690   bool        _retired;
1691 
1692 public:
1693   G1ParGCAllocBuffer(size_t gclab_word_size);



1694 
1695   void set_buf(HeapWord* buf) {
1696     ParGCAllocBuffer::set_buf(buf);
1697     _retired = false;
1698   }
1699 
1700   void retire(bool end_of_gc, bool retain) {
1701     if (_retired)
1702       return;

1703     ParGCAllocBuffer::retire(end_of_gc, retain);
1704     _retired = true;
1705   }
1706 };
1707 
1708 class G1ParScanThreadState : public StackObj {
1709 protected:
1710   G1CollectedHeap* _g1h;
1711   RefToScanQueue*  _refs;
1712   DirtyCardQueue   _dcq;
1713   G1SATBCardTableModRefBS* _ct_bs;
1714   G1RemSet* _g1_rem;
1715 
1716   G1ParGCAllocBuffer  _surviving_alloc_buffer;
1717   G1ParGCAllocBuffer  _tenured_alloc_buffer;
1718   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1719   ageTable            _age_table;
1720 
1721   G1ParScanClosure    _scanner;
1722 


1752   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
1753 
1754   template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
1755 
1756   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1757     // If the new value of the field points to the same region or
1758     // is the to-space, we don't need to include it in the Rset updates.
1759     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1760       size_t card_index = ctbs()->index_for(p);
1761       // If the card hasn't been added to the buffer, do it.
1762       if (ctbs()->mark_card_deferred(card_index)) {
1763         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1764       }
1765     }
1766   }
1767 
1768 public:
1769   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1770 
1771   ~G1ParScanThreadState() {

1772     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1773   }
1774 
1775   RefToScanQueue*   refs()            { return _refs;             }
1776   ageTable*         age_table()       { return &_age_table;       }
1777 
1778   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1779     return _alloc_buffers[purpose];
1780   }
1781 
1782   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1783   size_t undo_waste() const                      { return _undo_waste; }
1784 
1785 #ifdef ASSERT
1786   bool verify_ref(narrowOop* ref) const;
1787   bool verify_ref(oop* ref) const;
1788   bool verify_task(StarTask ref) const;
1789 #endif // ASSERT
1790 
1791   template <class T> void push_on_queue(T* ref) {


1862   void end_term_time() {
1863     _term_time += (os::elapsedTime() - _start_term);
1864   }
1865   double term_time() const { return _term_time; }
1866 
1867   double elapsed_time() const {
1868     return os::elapsedTime() - _start;
1869   }
1870 
1871   static void
1872     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1873   void
1874     print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1875 
1876   size_t* surviving_young_words() {
1877     // We add on to hide entry 0 which accumulates surviving words for
1878     // age -1 regions (i.e. non-young ones)
1879     return _surviving_young_words;
1880   }
1881 

1882   void retire_alloc_buffers() {
1883     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1884       size_t waste = _alloc_buffers[ap]->words_remaining();
1885       add_to_alloc_buffer_waste(waste);
1886       _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1887                                                  true /* end_of_gc */,
1888                                                  false /* retain */);
1889     }
1890   }
1891 private:
1892   #define G1_PARTIAL_ARRAY_MASK 0x2
1893 
1894   inline bool has_partial_array_mask(oop* ref) const {
1895     return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
1896   }
1897 
1898   // We never encode partial array oops as narrowOop*, so return false immediately.
1899   // This allows the compiler to create optimized code when popping references from
1900   // the work queue.
1901   inline bool has_partial_array_mask(narrowOop* ref) const {
1902     assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1903     return false;
1904   }
1905 
1906   // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1907   // We always encode partial arrays as regular oop, to allow the
1908   // specialization for has_partial_array_mask() for narrowOops above.
1909   // This means that unintentional use of this method with narrowOops are caught
1910   // by the compiler.
1911   inline oop* set_partial_array_mask(oop obj) const {
1912     assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");




1674   void print_tracing_info() const;
1675 
1676   // The following two methods are helpful for debugging RSet issues.
1677   void print_cset_rsets() PRODUCT_RETURN;
1678   void print_all_rsets() PRODUCT_RETURN;
1679 
1680 public:
1681   size_t pending_card_num();
1682   size_t cards_scanned();
1683 
1684 protected:
1685   size_t _max_heap_capacity;
1686 };
1687 
1688 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1689 private:
1690   bool        _retired;
1691 
1692 public:
1693   G1ParGCAllocBuffer(size_t gclab_word_size);
1694   virtual ~G1ParGCAllocBuffer() {
1695     guarantee(_retired, "Allocation buffer has not been retired");
1696   }
1697 
1698   virtual void set_buf(HeapWord* buf) {
1699     ParGCAllocBuffer::set_buf(buf);
1700     _retired = false;
1701   }
1702 
1703   virtual void retire(bool end_of_gc, bool retain) {
1704     if (_retired) {
1705       return;
1706     }
1707     ParGCAllocBuffer::retire(end_of_gc, retain);
1708     _retired = true;
1709   }
1710 };
1711 
1712 class G1ParScanThreadState : public StackObj {
1713 protected:
1714   G1CollectedHeap* _g1h;
1715   RefToScanQueue*  _refs;
1716   DirtyCardQueue   _dcq;
1717   G1SATBCardTableModRefBS* _ct_bs;
1718   G1RemSet* _g1_rem;
1719 
1720   G1ParGCAllocBuffer  _surviving_alloc_buffer;
1721   G1ParGCAllocBuffer  _tenured_alloc_buffer;
1722   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1723   ageTable            _age_table;
1724 
1725   G1ParScanClosure    _scanner;
1726 


1756   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
1757 
1758   template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
1759 
1760   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1761     // If the new value of the field points to the same region or
1762     // is the to-space, we don't need to include it in the Rset updates.
1763     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1764       size_t card_index = ctbs()->index_for(p);
1765       // If the card hasn't been added to the buffer, do it.
1766       if (ctbs()->mark_card_deferred(card_index)) {
1767         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1768       }
1769     }
1770   }
1771 
1772 public:
1773   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1774 
1775   ~G1ParScanThreadState() {
1776     retire_alloc_buffers();
1777     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1778   }
1779 
1780   RefToScanQueue*   refs()            { return _refs;             }
1781   ageTable*         age_table()       { return &_age_table;       }
1782 
1783   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1784     return _alloc_buffers[purpose];
1785   }
1786 
1787   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1788   size_t undo_waste() const                      { return _undo_waste; }
1789 
1790 #ifdef ASSERT
1791   bool verify_ref(narrowOop* ref) const;
1792   bool verify_ref(oop* ref) const;
1793   bool verify_task(StarTask ref) const;
1794 #endif // ASSERT
1795 
1796   template <class T> void push_on_queue(T* ref) {


1867   void end_term_time() {
1868     _term_time += (os::elapsedTime() - _start_term);
1869   }
1870   double term_time() const { return _term_time; }
1871 
1872   double elapsed_time() const {
1873     return os::elapsedTime() - _start;
1874   }
1875 
1876   static void
1877     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1878   void
1879     print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1880 
1881   size_t* surviving_young_words() {
1882     // We add on to hide entry 0 which accumulates surviving words for
1883     // age -1 regions (i.e. non-young ones)
1884     return _surviving_young_words;
1885   }
1886 
1887 private:
1888   void retire_alloc_buffers() {
1889     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1890       size_t waste = _alloc_buffers[ap]->words_remaining();
1891       add_to_alloc_buffer_waste(waste);
1892       _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1893                                                  true /* end_of_gc */,
1894                                                  false /* retain */);
1895     }
1896   }
1897 
1898 #define G1_PARTIAL_ARRAY_MASK 0x2
1899 
1900   inline bool has_partial_array_mask(oop* ref) const {
1901     return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
1902   }
1903 
1904   // We never encode partial array oops as narrowOop*, so return false immediately.
1905   // This allows the compiler to create optimized code when popping references from
1906   // the work queue.
1907   inline bool has_partial_array_mask(narrowOop* ref) const {
1908     assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1909     return false;
1910   }
1911 
1912   // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1913   // We always encode partial arrays as regular oop, to allow the
1914   // specialization for has_partial_array_mask() for narrowOops above.
1915   // This means that unintentional use of this method with narrowOops are caught
1916   // by the compiler.
1917   inline oop* set_partial_array_mask(oop obj) const {
1918     assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");