src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page




  53 class ObjectClosure;
  54 class SpaceClosure;
  55 class CompactibleSpaceClosure;
  56 class Space;
  57 class G1CollectorPolicy;
  58 class GenRemSet;
  59 class G1RemSet;
  60 class HeapRegionRemSetIterator;
  61 class ConcurrentMark;
  62 class ConcurrentMarkThread;
  63 class ConcurrentG1Refine;
  64 class GenerationCounters;
  65 
  66 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
  67 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
  68 
  69 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
  70 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  71 
  72 enum GCAllocPurpose {
  73   GCAllocForTenured,
  74   GCAllocForSurvived,
  75   GCAllocPurposeCount

  76 };
  77 
  78 class YoungList : public CHeapObj<mtGC> {
  79 private:
  80   G1CollectedHeap* _g1h;
  81 
  82   HeapRegion* _head;
  83 
  84   HeapRegion* _survivor_head;
  85   HeapRegion* _survivor_tail;
  86 
  87   HeapRegion* _curr;
  88 
  89   uint        _length;
  90   uint        _survivor_length;
  91 
  92   size_t      _last_sampled_rs_lengths;
  93   size_t      _sampled_rs_lengths;
  94 
  95   void         empty_list(HeapRegion* list);


 576 
 577   // Allocation attempt that should be called during safepoints (e.g.,
 578   // at the end of a successful GC). expect_null_mutator_alloc_region
 579   // specifies whether the mutator alloc region is expected to be NULL
 580   // or not.
 581   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 582                                        bool expect_null_mutator_alloc_region);
 583 
 584   // It dirties the cards that cover the block so that so that the post
 585   // write barrier never queues anything when updating objects on this
 586   // block. It is assumed (and in fact we assert) that the block
 587   // belongs to a young region.
 588   inline void dirty_young_block(HeapWord* start, size_t word_size);
 589 
 590   // Allocate blocks during garbage collection. Will ensure an
 591   // allocation region, either by picking one or expanding the
 592   // heap, and then allocate a block of the given size. The block
 593   // may not be a humongous - it must fit into a single heap region.
 594   HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
 595 
 596   HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
 597                                     HeapRegion*    alloc_region,
 598                                     bool           par,
 599                                     size_t         word_size);
 600 
 601   // Ensure that no further allocations can happen in "r", bearing in mind
 602   // that parallel threads might be attempting allocations.
 603   void par_allocate_remaining_space(HeapRegion* r);
 604 
 605   // Allocation attempt during GC for a survivor object / PLAB.
 606   inline HeapWord* survivor_attempt_allocation(size_t word_size);
 607 
 608   // Allocation attempt during GC for an old object / PLAB.
 609   inline HeapWord* old_attempt_allocation(size_t word_size);
 610 
 611   // These methods are the "callbacks" from the G1AllocRegion class.
 612 
 613   // For mutator alloc regions.
 614   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 615   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 616                                    size_t allocated_bytes);
 617 
 618   // For GC alloc regions.
 619   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
 620                                   GCAllocPurpose ap);


1716 };
1717 
1718 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1719 private:
1720   bool        _retired;
1721 
1722 public:
1723   G1ParGCAllocBuffer(size_t gclab_word_size);
1724 
1725   void set_buf(HeapWord* buf) {
1726     ParGCAllocBuffer::set_buf(buf);
1727     _retired = false;
1728   }
1729 
1730   void retire(bool end_of_gc, bool retain) {
1731     if (_retired)
1732       return;
1733     ParGCAllocBuffer::retire(end_of_gc, retain);
1734     _retired = true;
1735   }






























































































1736 };
1737 
1738 class G1ParScanThreadState : public StackObj {
1739 protected:
1740   G1CollectedHeap* _g1h;
1741   RefToScanQueue*  _refs;
1742   DirtyCardQueue   _dcq;
1743   CardTableModRefBS* _ct_bs;
1744   G1RemSet* _g1_rem;
1745 
1746   G1ParGCAllocBuffer  _surviving_alloc_buffer;
1747   G1ParGCAllocBuffer  _tenured_alloc_buffer;
1748   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1749   ageTable            _age_table;
1750 
1751   size_t           _alloc_buffer_waste;
1752   size_t           _undo_waste;
1753 
1754   OopsInHeapRegionClosure*      _evac_failure_cl;
1755   G1ParScanHeapEvacClosure*     _evac_cl;
1756   G1ParScanPartialArrayClosure* _partial_scan_cl;
1757 
1758   int _hash_seed;
1759   uint _queue_num;
1760 
1761   size_t _term_attempts;
1762 
1763   double _start;
1764   double _start_strong_roots;
1765   double _strong_roots_time;
1766   double _start_term;
1767   double _term_time;
1768 


1792     // is the to-space, we don't need to include it in the Rset updates.
1793     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1794       size_t card_index = ctbs()->index_for(p);
1795       // If the card hasn't been added to the buffer, do it.
1796       if (ctbs()->mark_card_deferred(card_index)) {
1797         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1798       }
1799     }
1800   }
1801 
1802 public:
1803   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1804 
1805   ~G1ParScanThreadState() {
1806     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1807   }
1808 
1809   RefToScanQueue*   refs()            { return _refs;             }
1810   ageTable*         age_table()       { return &_age_table;       }
1811 
1812   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1813     return _alloc_buffers[purpose];
1814   }
1815 
1816   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1817   size_t undo_waste() const                      { return _undo_waste; }
1818 
1819 #ifdef ASSERT
1820   bool verify_ref(narrowOop* ref) const;
1821   bool verify_ref(oop* ref) const;
1822   bool verify_task(StarTask ref) const;
1823 #endif // ASSERT
1824 
1825   template <class T> void push_on_queue(T* ref) {
1826     assert(verify_ref(ref), "sanity");
1827     refs()->push(ref);
1828   }
1829 
1830   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1831     if (G1DeferredRSUpdate) {
1832       deferred_rs_update(from, p, tid);
1833     } else {
1834       immediate_rs_update(from, p, tid);
1835     }
1836   }
1837 
1838   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1839     HeapWord* obj = NULL;
1840     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1841     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1842       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1843       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1844       alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1845 
1846       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1847       if (buf == NULL) return NULL; // Let caller handle allocation failure.
1848       // Otherwise.
1849       alloc_buf->set_word_size(gclab_word_size);
1850       alloc_buf->set_buf(buf);

1851 
1852       obj = alloc_buf->allocate(word_sz);
1853       assert(obj != NULL, "buffer was definitely big enough...");
1854     } else {
1855       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1856     }
1857     return obj;
1858   }
1859 
1860   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1861     HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1862     if (obj != NULL) return obj;
1863     return allocate_slow(purpose, word_sz);
1864   }
1865 
1866   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1867     if (alloc_buffer(purpose)->contains(obj)) {
1868       assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1869              "should contain whole object");
1870       alloc_buffer(purpose)->undo_allocation(obj, word_sz);


1942       _partial_scan_cl->do_oop_nv(ref_to_scan);
1943     } else {
1944       // Note: we can use "raw" versions of "region_containing" because
1945       // "obj_to_scan" is definitely in the heap, and is not in a
1946       // humongous region.
1947       HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1948       _evac_cl->set_region(r);
1949       _evac_cl->do_oop_nv(ref_to_scan);
1950     }
1951   }
1952 
1953   void deal_with_reference(StarTask ref) {
1954     assert(verify_task(ref), "sanity");
1955     if (ref.is_narrow()) {
1956       deal_with_reference((narrowOop*)ref);
1957     } else {
1958       deal_with_reference((oop*)ref);
1959     }
1960   }
1961 
1962 public:
1963   void trim_queue();
1964 };
1965 
1966 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP


  53 class ObjectClosure;
  54 class SpaceClosure;
  55 class CompactibleSpaceClosure;
  56 class Space;
  57 class G1CollectorPolicy;
  58 class GenRemSet;
  59 class G1RemSet;
  60 class HeapRegionRemSetIterator;
  61 class ConcurrentMark;
  62 class ConcurrentMarkThread;
  63 class ConcurrentG1Refine;
  64 class GenerationCounters;
  65 
  66 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
  67 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
  68 
  69 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
  70 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  71 
  72 enum GCAllocPurpose {
  73   GCAllocPurposeStart = 0,
  74   GCAllocForTenured = GCAllocPurposeStart,
  75   GCAllocForSurvived = GCAllocPurposeStart + 1,
  76   GCAllocPurposeCount = GCAllocPurposeStart + 2
  77 };
  78 
  79 class YoungList : public CHeapObj<mtGC> {
  80 private:
  81   G1CollectedHeap* _g1h;
  82 
  83   HeapRegion* _head;
  84 
  85   HeapRegion* _survivor_head;
  86   HeapRegion* _survivor_tail;
  87 
  88   HeapRegion* _curr;
  89 
  90   uint        _length;
  91   uint        _survivor_length;
  92 
  93   size_t      _last_sampled_rs_lengths;
  94   size_t      _sampled_rs_lengths;
  95 
  96   void         empty_list(HeapRegion* list);


 577 
 578   // Allocation attempt that should be called during safepoints (e.g.,
 579   // at the end of a successful GC). expect_null_mutator_alloc_region
 580   // specifies whether the mutator alloc region is expected to be NULL
 581   // or not.
 582   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 583                                        bool expect_null_mutator_alloc_region);
 584 
 585   // It dirties the cards that cover the block so that so that the post
 586   // write barrier never queues anything when updating objects on this
 587   // block. It is assumed (and in fact we assert) that the block
 588   // belongs to a young region.
 589   inline void dirty_young_block(HeapWord* start, size_t word_size);
 590 
 591   // Allocate blocks during garbage collection. Will ensure an
 592   // allocation region, either by picking one or expanding the
 593   // heap, and then allocate a block of the given size. The block
 594   // may not be a humongous - it must fit into a single heap region.
 595   HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
 596 





 597   // Ensure that no further allocations can happen in "r", bearing in mind
 598   // that parallel threads might be attempting allocations.
 599   void par_allocate_remaining_space(HeapRegion* r);
 600 
 601   // Allocation attempt during GC for a survivor object / PLAB.
 602   inline HeapWord* survivor_attempt_allocation(size_t word_size);
 603 
 604   // Allocation attempt during GC for an old object / PLAB.
 605   inline HeapWord* old_attempt_allocation(size_t word_size);
 606 
 607   // These methods are the "callbacks" from the G1AllocRegion class.
 608 
 609   // For mutator alloc regions.
 610   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 611   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 612                                    size_t allocated_bytes);
 613 
 614   // For GC alloc regions.
 615   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
 616                                   GCAllocPurpose ap);


1712 };
1713 
1714 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1715 private:
1716   bool        _retired;
1717 
1718 public:
1719   G1ParGCAllocBuffer(size_t gclab_word_size);
1720 
1721   void set_buf(HeapWord* buf) {
1722     ParGCAllocBuffer::set_buf(buf);
1723     _retired = false;
1724   }
1725 
1726   void retire(bool end_of_gc, bool retain) {
1727     if (_retired)
1728       return;
1729     ParGCAllocBuffer::retire(end_of_gc, retain);
1730     _retired = true;
1731   }
1732 
1733   bool is_retired() {
1734     return _retired;
1735   }
1736 };
1737 
1738 class G1ParGCAllocBufferContainer {
1739 protected:
1740   enum GCAllocPriority {
1741       GCAllocPriorityStart = 0,
1742       GCAllocPriority1 = GCAllocPriorityStart,
1743       GCAllocPriority2 = GCAllocPriorityStart + 1,
1744       GCAllocPriorityCount = GCAllocPriorityStart + 2
1745   };
1746   G1ParGCAllocBuffer* _priority_buffer[GCAllocPriorityCount];
1747 
1748 public:
1749   G1ParGCAllocBufferContainer(size_t gclab_word_size) {
1750     for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1751       _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
1752     }
1753   }
1754 
1755   ~G1ParGCAllocBufferContainer() {
1756     for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1757       assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
1758       delete _priority_buffer[pr];
1759     }
1760   }
1761 
1762   HeapWord* allocate(size_t word_sz) {
1763     HeapWord* obj;
1764     for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1765       obj = _priority_buffer[pr]->allocate(word_sz);
1766       if (obj != NULL) return obj;
1767     }
1768     return obj;
1769   }
1770 
1771   bool contains(void* addr) {
1772     for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1773       if (_priority_buffer[pr]->contains(addr)) return true;
1774     }
1775     return false;
1776   }
1777 
1778   void undo_allocation(HeapWord* obj, size_t word_sz) {
1779     bool finish_undo;
1780     for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1781       if (_priority_buffer[pr]->contains(obj)) {
1782         _priority_buffer[pr]->undo_allocation(obj, word_sz);
1783         finish_undo = true;
1784       }
1785     }
1786     if (!finish_undo) ShouldNotReachHere();
1787   }
1788 
1789   size_t words_remaining() {
1790     size_t result = 0;
1791     for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1792       result += _priority_buffer[pr]->words_remaining();
1793     }
1794     return result;
1795   }
1796 
1797   size_t words_remaining_in_priority1_buffer() {
1798     G1ParGCAllocBuffer* retired = _priority_buffer[GCAllocPriority1];
1799     return retired->words_remaining();
1800   }
1801 
1802   void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
1803     for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1804       _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
1805     }
1806   }
1807 
1808   void retire_and_set_buf(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
1809     G1ParGCAllocBuffer* retired_and_set =  _priority_buffer[GCAllocPriority1];
1810     retired_and_set->retire(end_of_gc, retain);
1811     retired_and_set->set_buf(buf);
1812     retired_and_set->set_word_size(word_sz);
1813     adjust_priority_order();
1814   }
1815 
1816 private:
1817   void adjust_priority_order() {
1818     G1ParGCAllocBuffer* retired_and_set =  _priority_buffer[GCAllocPriority1];
1819 
1820     int last = GCAllocPriorityCount - 1;
1821     for (int pr = GCAllocPriorityStart; pr < last; ++pr) {
1822       _priority_buffer[pr] = _priority_buffer[pr + 1];
1823     }
1824     _priority_buffer[last] = retired_and_set;
1825   }
1826 };
1827 
1828 class G1ParScanThreadState : public StackObj {
1829 protected:
1830   G1CollectedHeap* _g1h;
1831   RefToScanQueue*  _refs;
1832   DirtyCardQueue   _dcq;
1833   CardTableModRefBS* _ct_bs;
1834   G1RemSet* _g1_rem;
1835 
1836   G1ParGCAllocBufferContainer  _surviving_alloc_buffer;
1837   G1ParGCAllocBufferContainer  _tenured_alloc_buffer;
1838   G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
1839   ageTable            _age_table;
1840 
1841   size_t           _alloc_buffer_waste;
1842   size_t           _undo_waste;
1843 
1844   OopsInHeapRegionClosure*      _evac_failure_cl;
1845   G1ParScanHeapEvacClosure*     _evac_cl;
1846   G1ParScanPartialArrayClosure* _partial_scan_cl;
1847 
1848   int _hash_seed;
1849   uint _queue_num;
1850 
1851   size_t _term_attempts;
1852 
1853   double _start;
1854   double _start_strong_roots;
1855   double _strong_roots_time;
1856   double _start_term;
1857   double _term_time;
1858 


1882     // is the to-space, we don't need to include it in the Rset updates.
1883     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1884       size_t card_index = ctbs()->index_for(p);
1885       // If the card hasn't been added to the buffer, do it.
1886       if (ctbs()->mark_card_deferred(card_index)) {
1887         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1888       }
1889     }
1890   }
1891 
1892 public:
1893   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1894 
1895   ~G1ParScanThreadState() {
1896     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1897   }
1898 
1899   RefToScanQueue*   refs()            { return _refs;             }
1900   ageTable*         age_table()       { return &_age_table;       }
1901 
1902   G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
1903     return _alloc_buffers[purpose];
1904   }
1905 
1906   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1907   size_t undo_waste() const                      { return _undo_waste; }
1908 
1909 #ifdef ASSERT
1910   bool verify_ref(narrowOop* ref) const;
1911   bool verify_ref(oop* ref) const;
1912   bool verify_task(StarTask ref) const;
1913 #endif // ASSERT
1914 
1915   template <class T> void push_on_queue(T* ref) {
1916     assert(verify_ref(ref), "sanity");
1917     refs()->push(ref);
1918   }
1919 
1920   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1921     if (G1DeferredRSUpdate) {
1922       deferred_rs_update(from, p, tid);
1923     } else {
1924       immediate_rs_update(from, p, tid);
1925     }
1926   }
1927 
1928   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1929     HeapWord* obj = NULL;
1930     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1931     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1932       G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);


1933 
1934       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1935       if (buf == NULL) return NULL; // Let caller handle allocation failure.
1936 
1937       add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_priority1_buffer());
1938       alloc_buf->retire_and_set_buf(false /* end_of_gc */, false /* retain */,
1939                                     buf, gclab_word_size);
1940 
1941       obj = alloc_buf->allocate(word_sz);
1942       assert(obj != NULL, "buffer was definitely big enough...");
1943     } else {
1944       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1945     }
1946     return obj;
1947   }
1948 
1949   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1950     HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1951     if (obj != NULL) return obj;
1952     return allocate_slow(purpose, word_sz);
1953   }
1954 
1955   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1956     if (alloc_buffer(purpose)->contains(obj)) {
1957       assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1958              "should contain whole object");
1959       alloc_buffer(purpose)->undo_allocation(obj, word_sz);


2031       _partial_scan_cl->do_oop_nv(ref_to_scan);
2032     } else {
2033       // Note: we can use "raw" versions of "region_containing" because
2034       // "obj_to_scan" is definitely in the heap, and is not in a
2035       // humongous region.
2036       HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2037       _evac_cl->set_region(r);
2038       _evac_cl->do_oop_nv(ref_to_scan);
2039     }
2040   }
2041 
2042   void deal_with_reference(StarTask ref) {
2043     assert(verify_task(ref), "sanity");
2044     if (ref.is_narrow()) {
2045       deal_with_reference((narrowOop*)ref);
2046     } else {
2047       deal_with_reference((oop*)ref);
2048     }
2049   }
2050 

2051   void trim_queue();
2052 };
2053 
2054 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP