576
577 // Allocation attempt that should be called during safepoints (e.g.,
578 // at the end of a successful GC). expect_null_mutator_alloc_region
579 // specifies whether the mutator alloc region is expected to be NULL
580 // or not.
581 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
582 bool expect_null_mutator_alloc_region);
583
584 // It dirties the cards that cover the block so that so that the post
585 // write barrier never queues anything when updating objects on this
586 // block. It is assumed (and in fact we assert) that the block
587 // belongs to a young region.
588 inline void dirty_young_block(HeapWord* start, size_t word_size);
589
590 // Allocate blocks during garbage collection. Will ensure an
591 // allocation region, either by picking one or expanding the
592 // heap, and then allocate a block of the given size. The block
593 // may not be a humongous - it must fit into a single heap region.
594 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
595
596 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
597 HeapRegion* alloc_region,
598 bool par,
599 size_t word_size);
600
601 // Ensure that no further allocations can happen in "r", bearing in mind
602 // that parallel threads might be attempting allocations.
603 void par_allocate_remaining_space(HeapRegion* r);
604
605 // Allocation attempt during GC for a survivor object / PLAB.
606 inline HeapWord* survivor_attempt_allocation(size_t word_size);
607
608 // Allocation attempt during GC for an old object / PLAB.
609 inline HeapWord* old_attempt_allocation(size_t word_size);
610
611 // These methods are the "callbacks" from the G1AllocRegion class.
612
613 // For mutator alloc regions.
614 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
615 void retire_mutator_alloc_region(HeapRegion* alloc_region,
616 size_t allocated_bytes);
617
618 // For GC alloc regions.
619 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
620 GCAllocPurpose ap);
1716 };
1717
1718 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1719 private:
1720 bool _retired;
1721
1722 public:
1723 G1ParGCAllocBuffer(size_t gclab_word_size);
1724
1725 void set_buf(HeapWord* buf) {
1726 ParGCAllocBuffer::set_buf(buf);
1727 _retired = false;
1728 }
1729
1730 void retire(bool end_of_gc, bool retain) {
1731 if (_retired)
1732 return;
1733 ParGCAllocBuffer::retire(end_of_gc, retain);
1734 _retired = true;
1735 }
1736 };
1737
1738 class G1ParScanThreadState : public StackObj {
1739 protected:
1740 G1CollectedHeap* _g1h;
1741 RefToScanQueue* _refs;
1742 DirtyCardQueue _dcq;
1743 CardTableModRefBS* _ct_bs;
1744 G1RemSet* _g1_rem;
1745
1746 G1ParGCAllocBuffer _surviving_alloc_buffer;
1747 G1ParGCAllocBuffer _tenured_alloc_buffer;
1748 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1749 ageTable _age_table;
1750
1751 size_t _alloc_buffer_waste;
1752 size_t _undo_waste;
1753
1754 OopsInHeapRegionClosure* _evac_failure_cl;
1755 G1ParScanHeapEvacClosure* _evac_cl;
1756 G1ParScanPartialArrayClosure* _partial_scan_cl;
1757
1758 int _hash_seed;
1759 uint _queue_num;
1760
1761 size_t _term_attempts;
1762
1763 double _start;
1764 double _start_strong_roots;
1765 double _strong_roots_time;
1766 double _start_term;
1767 double _term_time;
1768
1792 // is the to-space, we don't need to include it in the Rset updates.
1793 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1794 size_t card_index = ctbs()->index_for(p);
1795 // If the card hasn't been added to the buffer, do it.
1796 if (ctbs()->mark_card_deferred(card_index)) {
1797 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1798 }
1799 }
1800 }
1801
1802 public:
1803 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1804
1805 ~G1ParScanThreadState() {
1806 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1807 }
1808
1809 RefToScanQueue* refs() { return _refs; }
1810 ageTable* age_table() { return &_age_table; }
1811
1812 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1813 return _alloc_buffers[purpose];
1814 }
1815
1816 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1817 size_t undo_waste() const { return _undo_waste; }
1818
1819 #ifdef ASSERT
1820 bool verify_ref(narrowOop* ref) const;
1821 bool verify_ref(oop* ref) const;
1822 bool verify_task(StarTask ref) const;
1823 #endif // ASSERT
1824
1825 template <class T> void push_on_queue(T* ref) {
1826 assert(verify_ref(ref), "sanity");
1827 refs()->push(ref);
1828 }
1829
1830 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1831 if (G1DeferredRSUpdate) {
1832 deferred_rs_update(from, p, tid);
1833 } else {
1834 immediate_rs_update(from, p, tid);
1835 }
1836 }
1837
1838 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1839 HeapWord* obj = NULL;
1840 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1841 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1842 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1843 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1844 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1845
1846 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1847 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1848 // Otherwise.
1849 alloc_buf->set_word_size(gclab_word_size);
1850 alloc_buf->set_buf(buf);
1851
1852 obj = alloc_buf->allocate(word_sz);
1853 assert(obj != NULL, "buffer was definitely big enough...");
1854 } else {
1855 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1856 }
1857 return obj;
1858 }
1859
1860 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1861 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1862 if (obj != NULL) return obj;
1863 return allocate_slow(purpose, word_sz);
1864 }
1865
1866 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1867 if (alloc_buffer(purpose)->contains(obj)) {
1868 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1869 "should contain whole object");
1870 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1942 _partial_scan_cl->do_oop_nv(ref_to_scan);
1943 } else {
1944 // Note: we can use "raw" versions of "region_containing" because
1945 // "obj_to_scan" is definitely in the heap, and is not in a
1946 // humongous region.
1947 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1948 _evac_cl->set_region(r);
1949 _evac_cl->do_oop_nv(ref_to_scan);
1950 }
1951 }
1952
1953 void deal_with_reference(StarTask ref) {
1954 assert(verify_task(ref), "sanity");
1955 if (ref.is_narrow()) {
1956 deal_with_reference((narrowOop*)ref);
1957 } else {
1958 deal_with_reference((oop*)ref);
1959 }
1960 }
1961
1962 public:
1963 void trim_queue();
1964 };
1965
1966 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
576
577 // Allocation attempt that should be called during safepoints (e.g.,
578 // at the end of a successful GC). expect_null_mutator_alloc_region
579 // specifies whether the mutator alloc region is expected to be NULL
580 // or not.
581 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
582 bool expect_null_mutator_alloc_region);
583
584 // It dirties the cards that cover the block so that so that the post
585 // write barrier never queues anything when updating objects on this
586 // block. It is assumed (and in fact we assert) that the block
587 // belongs to a young region.
588 inline void dirty_young_block(HeapWord* start, size_t word_size);
589
590 // Allocate blocks during garbage collection. Will ensure an
591 // allocation region, either by picking one or expanding the
592 // heap, and then allocate a block of the given size. The block
593 // may not be a humongous - it must fit into a single heap region.
594 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
595
596 // Ensure that no further allocations can happen in "r", bearing in mind
597 // that parallel threads might be attempting allocations.
598 void par_allocate_remaining_space(HeapRegion* r);
599
600 // Allocation attempt during GC for a survivor object / PLAB.
601 inline HeapWord* survivor_attempt_allocation(size_t word_size);
602
603 // Allocation attempt during GC for an old object / PLAB.
604 inline HeapWord* old_attempt_allocation(size_t word_size);
605
606 // These methods are the "callbacks" from the G1AllocRegion class.
607
608 // For mutator alloc regions.
609 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
610 void retire_mutator_alloc_region(HeapRegion* alloc_region,
611 size_t allocated_bytes);
612
613 // For GC alloc regions.
614 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
615 GCAllocPurpose ap);
1711 };
1712
1713 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1714 private:
1715 bool _retired;
1716
1717 public:
1718 G1ParGCAllocBuffer(size_t gclab_word_size);
1719
1720 void set_buf(HeapWord* buf) {
1721 ParGCAllocBuffer::set_buf(buf);
1722 _retired = false;
1723 }
1724
1725 void retire(bool end_of_gc, bool retain) {
1726 if (_retired)
1727 return;
1728 ParGCAllocBuffer::retire(end_of_gc, retain);
1729 _retired = true;
1730 }
1731
1732 bool is_retired() {
1733 return _retired;
1734 }
1735 };
1736
1737 class G1ParGCAllocBufferContainer {
1738 protected:
1739 static int const _priority_max = 2;
1740 G1ParGCAllocBuffer* _priority_buffer[_priority_max];
1741
1742 public:
1743 G1ParGCAllocBufferContainer(size_t gclab_word_size) {
1744 for (int pr = 0; pr < _priority_max; ++pr) {
1745 _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
1746 }
1747 }
1748
1749 ~G1ParGCAllocBufferContainer() {
1750 for (int pr = 0; pr < _priority_max; ++pr) {
1751 assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
1752 delete _priority_buffer[pr];
1753 }
1754 }
1755
1756 HeapWord* allocate(size_t word_sz) {
1757 HeapWord* obj;
1758 for (int pr = 0; pr < _priority_max; ++pr) {
1759 obj = _priority_buffer[pr]->allocate(word_sz);
1760 if (obj != NULL) return obj;
1761 }
1762 return obj;
1763 }
1764
1765 bool contains(void* addr) {
1766 for (int pr = 0; pr < _priority_max; ++pr) {
1767 if (_priority_buffer[pr]->contains(addr)) return true;
1768 }
1769 return false;
1770 }
1771
1772 void undo_allocation(HeapWord* obj, size_t word_sz) {
1773 bool finish_undo;
1774 for (int pr = 0; pr < _priority_max; ++pr) {
1775 if (_priority_buffer[pr]->contains(obj)) {
1776 _priority_buffer[pr]->undo_allocation(obj, word_sz);
1777 finish_undo = true;
1778 }
1779 }
1780 if (!finish_undo) ShouldNotReachHere();
1781 }
1782
1783 size_t words_remaining() {
1784 size_t result = 0;
1785 for (int pr = 0; pr < _priority_max; ++pr) {
1786 result += _priority_buffer[pr]->words_remaining();
1787 }
1788 return result;
1789 }
1790
1791 size_t words_remaining_in_retired_buffer() {
1792 G1ParGCAllocBuffer* retired = _priority_buffer[0];
1793 return retired->words_remaining();
1794 }
1795
1796 void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
1797 for (int pr = 0; pr < _priority_max; ++pr) {
1798 _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
1799 }
1800 }
1801
1802 void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
1803 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1804 retired_and_set->retire(end_of_gc, retain);
1805 retired_and_set->set_buf(buf);
1806 retired_and_set->set_word_size(word_sz);
1807 adjust_priority_order();
1808 }
1809
1810 private:
1811 void adjust_priority_order() {
1812 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
1813
1814 int last = _priority_max - 1;
1815 for (int pr = 0; pr < last; ++pr) {
1816 _priority_buffer[pr] = _priority_buffer[pr + 1];
1817 }
1818 _priority_buffer[last] = retired_and_set;
1819 }
1820 };
1821
1822 class G1ParScanThreadState : public StackObj {
1823 protected:
1824 G1CollectedHeap* _g1h;
1825 RefToScanQueue* _refs;
1826 DirtyCardQueue _dcq;
1827 CardTableModRefBS* _ct_bs;
1828 G1RemSet* _g1_rem;
1829
1830 G1ParGCAllocBufferContainer _surviving_alloc_buffer;
1831 G1ParGCAllocBufferContainer _tenured_alloc_buffer;
1832 G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
1833 ageTable _age_table;
1834
1835 size_t _alloc_buffer_waste;
1836 size_t _undo_waste;
1837
1838 OopsInHeapRegionClosure* _evac_failure_cl;
1839 G1ParScanHeapEvacClosure* _evac_cl;
1840 G1ParScanPartialArrayClosure* _partial_scan_cl;
1841
1842 int _hash_seed;
1843 uint _queue_num;
1844
1845 size_t _term_attempts;
1846
1847 double _start;
1848 double _start_strong_roots;
1849 double _strong_roots_time;
1850 double _start_term;
1851 double _term_time;
1852
1876 // is the to-space, we don't need to include it in the Rset updates.
1877 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1878 size_t card_index = ctbs()->index_for(p);
1879 // If the card hasn't been added to the buffer, do it.
1880 if (ctbs()->mark_card_deferred(card_index)) {
1881 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1882 }
1883 }
1884 }
1885
1886 public:
1887 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1888
1889 ~G1ParScanThreadState() {
1890 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1891 }
1892
1893 RefToScanQueue* refs() { return _refs; }
1894 ageTable* age_table() { return &_age_table; }
1895
1896 G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
1897 return _alloc_buffers[purpose];
1898 }
1899
1900 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1901 size_t undo_waste() const { return _undo_waste; }
1902
1903 #ifdef ASSERT
1904 bool verify_ref(narrowOop* ref) const;
1905 bool verify_ref(oop* ref) const;
1906 bool verify_task(StarTask ref) const;
1907 #endif // ASSERT
1908
1909 template <class T> void push_on_queue(T* ref) {
1910 assert(verify_ref(ref), "sanity");
1911 refs()->push(ref);
1912 }
1913
1914 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1915 if (G1DeferredRSUpdate) {
1916 deferred_rs_update(from, p, tid);
1917 } else {
1918 immediate_rs_update(from, p, tid);
1919 }
1920 }
1921
1922 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1923 HeapWord* obj = NULL;
1924 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1925 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1926 G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
1927
1928 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1929 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1930
1931 add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
1932 alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
1933
1934 obj = alloc_buf->allocate(word_sz);
1935 assert(obj != NULL, "buffer was definitely big enough...");
1936 } else {
1937 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1938 }
1939 return obj;
1940 }
1941
1942 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1943 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1944 if (obj != NULL) return obj;
1945 return allocate_slow(purpose, word_sz);
1946 }
1947
1948 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1949 if (alloc_buffer(purpose)->contains(obj)) {
1950 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1951 "should contain whole object");
1952 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
2024 _partial_scan_cl->do_oop_nv(ref_to_scan);
2025 } else {
2026 // Note: we can use "raw" versions of "region_containing" because
2027 // "obj_to_scan" is definitely in the heap, and is not in a
2028 // humongous region.
2029 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2030 _evac_cl->set_region(r);
2031 _evac_cl->do_oop_nv(ref_to_scan);
2032 }
2033 }
2034
2035 void deal_with_reference(StarTask ref) {
2036 assert(verify_task(ref), "sanity");
2037 if (ref.is_narrow()) {
2038 deal_with_reference((narrowOop*)ref);
2039 } else {
2040 deal_with_reference((oop*)ref);
2041 }
2042 }
2043
2044 void trim_queue();
2045 };
2046
2047 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|