src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page




 573 
 574   // Allocation attempt that should be called during safepoints (e.g.,
 575   // at the end of a successful GC). expect_null_mutator_alloc_region
 576   // specifies whether the mutator alloc region is expected to be NULL
 577   // or not.
 578   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 579                                        bool expect_null_mutator_alloc_region);
 580 
 581   // It dirties the cards that cover the block so that so that the post
 582   // write barrier never queues anything when updating objects on this
 583   // block. It is assumed (and in fact we assert) that the block
 584   // belongs to a young region.
 585   inline void dirty_young_block(HeapWord* start, size_t word_size);
 586 
 587   // Allocate blocks during garbage collection. Will ensure an
 588   // allocation region, either by picking one or expanding the
 589   // heap, and then allocate a block of the given size. The block
 590   // may not be a humongous - it must fit into a single heap region.
 591   HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
 592 
 593   HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
 594                                     HeapRegion*    alloc_region,
 595                                     bool           par,
 596                                     size_t         word_size);
 597 
 598   // Ensure that no further allocations can happen in "r", bearing in mind
 599   // that parallel threads might be attempting allocations.
 600   void par_allocate_remaining_space(HeapRegion* r);
 601 
 602   // Allocation attempt during GC for a survivor object / PLAB.
 603   inline HeapWord* survivor_attempt_allocation(size_t word_size);
 604 
 605   // Allocation attempt during GC for an old object / PLAB.
 606   inline HeapWord* old_attempt_allocation(size_t word_size);
 607 
 608   // These methods are the "callbacks" from the G1AllocRegion class.
 609 
 610   // For mutator alloc regions.
 611   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 612   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 613                                    size_t allocated_bytes);
 614 
 615   // For GC alloc regions.
 616   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
 617                                   GCAllocPurpose ap);


1729 private:
1730   bool        _retired;
1731 
1732 public:
1733   G1ParGCAllocBuffer(size_t gclab_word_size);
1734 
1735   void set_buf(HeapWord* buf) {
1736     ParGCAllocBuffer::set_buf(buf);
1737     _retired = false;
1738   }
1739 
1740   void retire(bool end_of_gc, bool retain) {
1741     if (_retired)
1742       return;
1743     ParGCAllocBuffer::retire(end_of_gc, retain);
1744     _retired = true;
1745   }
1746 };
1747 
1748 class G1ParScanThreadState : public StackObj {











1749 protected:
1750   G1CollectedHeap* _g1h;
1751   RefToScanQueue*  _refs;
1752   DirtyCardQueue   _dcq;
1753   CardTableModRefBS* _ct_bs;
1754   G1RemSet* _g1_rem;
1755 
1756   G1ParGCAllocBuffer  _surviving_alloc_buffer;
1757   G1ParGCAllocBuffer  _tenured_alloc_buffer;
1758   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1759   ageTable            _age_table;
1760 
1761   size_t           _alloc_buffer_waste;
1762   size_t           _undo_waste;
1763 
1764   OopsInHeapRegionClosure*      _evac_failure_cl;
1765   G1ParScanHeapEvacClosure*     _evac_cl;
1766   G1ParScanPartialArrayClosure* _partial_scan_cl;
1767 
1768   int _hash_seed;
1769   uint _queue_num;
1770 
1771   size_t _term_attempts;
1772 
1773   double _start;
1774   double _start_strong_roots;
1775   double _strong_roots_time;
1776   double _start_term;
1777   double _term_time;
1778 


1797     }
1798   }
1799 
1800   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1801     // If the new value of the field points to the same region or
1802     // is the to-space, we don't need to include it in the Rset updates.
1803     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1804       size_t card_index = ctbs()->index_for(p);
1805       // If the card hasn't been added to the buffer, do it.
1806       if (ctbs()->mark_card_deferred(card_index)) {
1807         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1808       }
1809     }
1810   }
1811 
1812 public:
1813   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1814 
1815   ~G1ParScanThreadState() {
1816     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);




1817   }
1818 
1819   RefToScanQueue*   refs()            { return _refs;             }
1820   ageTable*         age_table()       { return &_age_table;       }
1821 
1822   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1823     return _alloc_buffers[purpose];
1824   }
1825 
1826   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1827   size_t undo_waste() const                      { return _undo_waste; }
1828 
1829 #ifdef ASSERT
1830   bool verify_ref(narrowOop* ref) const;
1831   bool verify_ref(oop* ref) const;
1832   bool verify_task(StarTask ref) const;
1833 #endif // ASSERT
1834 
1835   template <class T> void push_on_queue(T* ref) {
1836     assert(verify_ref(ref), "sanity");
1837     refs()->push(ref);
1838   }
1839 
1840   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1841     if (G1DeferredRSUpdate) {
1842       deferred_rs_update(from, p, tid);
1843     } else {
1844       immediate_rs_update(from, p, tid);
1845     }
1846   }
1847 
1848   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1849     HeapWord* obj = NULL;
1850     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1851     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1852       G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1853       add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1854       alloc_buf->retire(false /* end_of_gc */, false /* retain */);


1855 
1856       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1857       if (buf == NULL) return NULL; // Let caller handle allocation failure.
1858       // Otherwise.
1859       alloc_buf->set_word_size(gclab_word_size);
1860       alloc_buf->set_buf(buf);
1861 
1862       obj = alloc_buf->allocate(word_sz);
1863       assert(obj != NULL, "buffer was definitely big enough...");




1864     } else {
1865       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1866     }
1867     return obj;
1868   }
1869 
1870   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1871     HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);


1872     if (obj != NULL) return obj;


1873     return allocate_slow(purpose, word_sz);
1874   }
1875 
1876   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1877     if (alloc_buffer(purpose)->contains(obj)) {
1878       assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),

1879              "should contain whole object");
1880       alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1881     } else {



1882       CollectedHeap::fill_with_object(obj, word_sz);
1883       add_to_undo_waste(word_sz);
1884     }
1885   }
1886 
1887   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1888     _evac_failure_cl = evac_failure_cl;
1889   }
1890   OopsInHeapRegionClosure* evac_failure_closure() {
1891     return _evac_failure_cl;
1892   }
1893 
1894   void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
1895     _evac_cl = evac_cl;
1896   }
1897 
1898   void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
1899     _partial_scan_cl = partial_scan_cl;
1900   }
1901 
1902   int* hash_seed() { return &_hash_seed; }
1903   uint queue_num() { return _queue_num; }
1904 
1905   size_t term_attempts() const  { return _term_attempts; }


1921     _term_time += (os::elapsedTime() - _start_term);
1922   }
1923   double term_time() const { return _term_time; }
1924 
1925   double elapsed_time() const {
1926     return os::elapsedTime() - _start;
1927   }
1928 
1929   static void
1930     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1931   void
1932     print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1933 
1934   size_t* surviving_young_words() {
1935     // We add on to hide entry 0 which accumulates surviving words for
1936     // age -1 regions (i.e. non-young ones)
1937     return _surviving_young_words;
1938   }
1939 
1940   void retire_alloc_buffers() {
1941     for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1942       size_t waste = _alloc_buffers[ap]->words_remaining();


1943       add_to_alloc_buffer_waste(waste);
1944       _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1945                                                  true /* end_of_gc */,
1946                                                  false /* retain */);
1947     }
1948   }
1949 
1950   template <class T> void deal_with_reference(T* ref_to_scan) {
1951     if (has_partial_array_mask(ref_to_scan)) {
1952       _partial_scan_cl->do_oop_nv(ref_to_scan);
1953     } else {
1954       // Note: we can use "raw" versions of "region_containing" because
1955       // "obj_to_scan" is definitely in the heap, and is not in a
1956       // humongous region.
1957       HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1958       _evac_cl->set_region(r);
1959       _evac_cl->do_oop_nv(ref_to_scan);
1960     }
1961   }
1962 
1963   void deal_with_reference(StarTask ref) {
1964     assert(verify_task(ref), "sanity");
1965     if (ref.is_narrow()) {
1966       deal_with_reference((narrowOop*)ref);
1967     } else {
1968       deal_with_reference((oop*)ref);
1969     }
1970   }
1971 
1972 public:
1973   void trim_queue();
1974 };
1975 
1976 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP


 573 
 574   // Allocation attempt that should be called during safepoints (e.g.,
 575   // at the end of a successful GC). expect_null_mutator_alloc_region
 576   // specifies whether the mutator alloc region is expected to be NULL
 577   // or not.
 578   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 579                                        bool expect_null_mutator_alloc_region);
 580 
 581   // It dirties the cards that cover the block so that so that the post
 582   // write barrier never queues anything when updating objects on this
 583   // block. It is assumed (and in fact we assert) that the block
 584   // belongs to a young region.
 585   inline void dirty_young_block(HeapWord* start, size_t word_size);
 586 
 587   // Allocate blocks during garbage collection. Will ensure an
 588   // allocation region, either by picking one or expanding the
 589   // heap, and then allocate a block of the given size. The block
 590   // may not be a humongous - it must fit into a single heap region.
 591   HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
 592 





 593   // Ensure that no further allocations can happen in "r", bearing in mind
 594   // that parallel threads might be attempting allocations.
 595   void par_allocate_remaining_space(HeapRegion* r);
 596 
 597   // Allocation attempt during GC for a survivor object / PLAB.
 598   inline HeapWord* survivor_attempt_allocation(size_t word_size);
 599 
 600   // Allocation attempt during GC for an old object / PLAB.
 601   inline HeapWord* old_attempt_allocation(size_t word_size);
 602 
 603   // These methods are the "callbacks" from the G1AllocRegion class.
 604 
 605   // For mutator alloc regions.
 606   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 607   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 608                                    size_t allocated_bytes);
 609 
 610   // For GC alloc regions.
 611   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
 612                                   GCAllocPurpose ap);


1724 private:
1725   bool        _retired;
1726 
1727 public:
1728   G1ParGCAllocBuffer(size_t gclab_word_size);
1729 
1730   void set_buf(HeapWord* buf) {
1731     ParGCAllocBuffer::set_buf(buf);
1732     _retired = false;
1733   }
1734 
1735   void retire(bool end_of_gc, bool retain) {
1736     if (_retired)
1737       return;
1738     ParGCAllocBuffer::retire(end_of_gc, retain);
1739     _retired = true;
1740   }
1741 };
1742 
1743 class G1ParScanThreadState : public StackObj {
1744 private:
1745   enum GCAllocPriority {
1746     GCAllocPriority1,
1747     GCAllocPriority2,
1748     GCAllocPriorityCount
1749   };
1750 
1751   int buf_idx(int purpose, int priority) {
1752     return purpose*GCAllocPriorityCount + priority;
1753   }
1754 
1755 protected:
1756   G1CollectedHeap* _g1h;
1757   RefToScanQueue*  _refs;
1758   DirtyCardQueue   _dcq;
1759   CardTableModRefBS* _ct_bs;
1760   G1RemSet* _g1_rem;
1761 
1762   G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount*GCAllocPriorityCount];


1763   ageTable            _age_table;
1764 
1765   size_t           _alloc_buffer_waste;
1766   size_t           _undo_waste;
1767 
1768   OopsInHeapRegionClosure*      _evac_failure_cl;
1769   G1ParScanHeapEvacClosure*     _evac_cl;
1770   G1ParScanPartialArrayClosure* _partial_scan_cl;
1771 
1772   int _hash_seed;
1773   uint _queue_num;
1774 
1775   size_t _term_attempts;
1776 
1777   double _start;
1778   double _start_strong_roots;
1779   double _strong_roots_time;
1780   double _start_term;
1781   double _term_time;
1782 


1801     }
1802   }
1803 
1804   template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1805     // If the new value of the field points to the same region or
1806     // is the to-space, we don't need to include it in the Rset updates.
1807     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1808       size_t card_index = ctbs()->index_for(p);
1809       // If the card hasn't been added to the buffer, do it.
1810       if (ctbs()->mark_card_deferred(card_index)) {
1811         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1812       }
1813     }
1814   }
1815 
1816 public:
1817   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1818 
1819   ~G1ParScanThreadState() {
1820     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1821     for (int ap = 0; ap < GCAllocPurposeCount; ++ap)
1822       for (int pr = 0; pr < GCAllocPriorityCount; ++pr) {
1823         delete alloc_buffer(GCAllocPurpose(ap), GCAllocPriority(pr));
1824       }
1825   }
1826 
1827   RefToScanQueue*   refs()            { return _refs;             }
1828   ageTable*         age_table()       { return &_age_table;       }
1829 
1830   G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, GCAllocPriority priority) {
1831     return _alloc_buffers[buf_idx(purpose, priority)];
1832   }
1833 
1834   size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
1835   size_t undo_waste() const                      { return _undo_waste; }
1836 
1837 #ifdef ASSERT
1838   bool verify_ref(narrowOop* ref) const;
1839   bool verify_ref(oop* ref) const;
1840   bool verify_task(StarTask ref) const;
1841 #endif // ASSERT
1842 
1843   template <class T> void push_on_queue(T* ref) {
1844     assert(verify_ref(ref), "sanity");
1845     refs()->push(ref);
1846   }
1847 
1848   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1849     if (G1DeferredRSUpdate) {
1850       deferred_rs_update(from, p, tid);
1851     } else {
1852       immediate_rs_update(from, p, tid);
1853     }
1854   }
1855 
1856   HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1857     HeapWord* obj = NULL;
1858     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1859     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1860       G1ParGCAllocBuffer* alloc_buf1 = alloc_buffer(purpose, GCAllocPriority1);
1861       G1ParGCAllocBuffer* alloc_buf2 = alloc_buffer(purpose, GCAllocPriority2);
1862 
1863       add_to_alloc_buffer_waste(alloc_buf1->words_remaining());
1864       alloc_buf1->retire(false /* end_of_gc */, false /* retain */);
1865 
1866       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1867       if (buf == NULL) return NULL; // Let caller handle allocation failure.
1868       // Otherwise.
1869       alloc_buf1->set_word_size(gclab_word_size);
1870       alloc_buf1->set_buf(buf);
1871 
1872       obj = alloc_buf1->allocate(word_sz);
1873       assert(obj != NULL, "buffer was definitely big enough...");
1874 
1875       // Swap buffers in order to preserve the priority order
1876       _alloc_buffers[buf_idx(purpose, GCAllocPriority1)] = alloc_buf2;
1877       _alloc_buffers[buf_idx(purpose, GCAllocPriority2)] = alloc_buf1;
1878     } else {
1879       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1880     }
1881     return obj;
1882   }
1883 
1884   HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1885     HeapWord* obj;
1886     for (int pr = 0; pr < GCAllocPriorityCount; ++pr) {
1887       obj = alloc_buffer(purpose, (GCAllocPriority)pr)->allocate(word_sz);
1888       if (obj != NULL) return obj;
1889     }
1890     // Otherwise.
1891     return allocate_slow(purpose, word_sz);
1892   }
1893 
1894   void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1895     for (int pr = 0; pr < GCAllocPriorityCount; ++pr) {
1896       if (alloc_buffer(purpose, (GCAllocPriority)pr)->contains(obj)) {
1897         assert(alloc_buffer(purpose, pr)->contains(obj + word_sz - 1),
1898                "should contain whole object");
1899         alloc_buffer(purpose, (GCAllocPriority)pr)->undo_allocation(obj, word_sz);
1900         return;
1901       }
1902     }
1903 
1904     CollectedHeap::fill_with_object(obj, word_sz);
1905     add_to_undo_waste(word_sz);
1906   }

1907 
1908   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1909     _evac_failure_cl = evac_failure_cl;
1910   }
1911   OopsInHeapRegionClosure* evac_failure_closure() {
1912     return _evac_failure_cl;
1913   }
1914 
1915   void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
1916     _evac_cl = evac_cl;
1917   }
1918 
1919   void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
1920     _partial_scan_cl = partial_scan_cl;
1921   }
1922 
1923   int* hash_seed() { return &_hash_seed; }
1924   uint queue_num() { return _queue_num; }
1925 
1926   size_t term_attempts() const  { return _term_attempts; }


1942     _term_time += (os::elapsedTime() - _start_term);
1943   }
1944   double term_time() const { return _term_time; }
1945 
1946   double elapsed_time() const {
1947     return os::elapsedTime() - _start;
1948   }
1949 
1950   static void
1951     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1952   void
1953     print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1954 
1955   size_t* surviving_young_words() {
1956     // We add on to hide entry 0 which accumulates surviving words for
1957     // age -1 regions (i.e. non-young ones)
1958     return _surviving_young_words;
1959   }
1960 
1961   void retire_alloc_buffers() {
1962     for (int ap = 0; ap < GCAllocPurposeCount; ++ap)
1963       for (int pr = 0; pr < GCAllocPriorityCount; ++pr) {
1964         G1ParGCAllocBuffer* buffer = alloc_buffer((GCAllocPurpose)ap, (GCAllocPriority)pr);
1965         size_t waste = buffer->words_remaining();
1966         add_to_alloc_buffer_waste(waste);
1967         buffer->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1968                                        true /* end_of_gc */,
1969                                        false /* retain */);
1970       }
1971   }
1972 
1973   template <class T> void deal_with_reference(T* ref_to_scan) {
1974     if (has_partial_array_mask(ref_to_scan)) {
1975       _partial_scan_cl->do_oop_nv(ref_to_scan);
1976     } else {
1977       // Note: we can use "raw" versions of "region_containing" because
1978       // "obj_to_scan" is definitely in the heap, and is not in a
1979       // humongous region.
1980       HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1981       _evac_cl->set_region(r);
1982       _evac_cl->do_oop_nv(ref_to_scan);
1983     }
1984   }
1985 
1986   void deal_with_reference(StarTask ref) {
1987     assert(verify_task(ref), "sanity");
1988     if (ref.is_narrow()) {
1989       deal_with_reference((narrowOop*)ref);
1990     } else {
1991       deal_with_reference((oop*)ref);
1992     }
1993   }
1994 

1995   void trim_queue();
1996 };
1997 
1998 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP