14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/evacuationInfo.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.hpp"
31 #include "gc_implementation/g1/g1BiasedArray.hpp"
32 #include "gc_implementation/g1/g1HRPrinter.hpp"
33 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
34 #include "gc_implementation/g1/g1RemSet.hpp"
35 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
36 #include "gc_implementation/g1/g1YCTypes.hpp"
37 #include "gc_implementation/g1/heapRegionSeq.hpp"
38 #include "gc_implementation/g1/heapRegionSet.hpp"
39 #include "gc_implementation/shared/hSpaceCounters.hpp"
40 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
41 #include "memory/barrierSet.hpp"
42 #include "memory/memRegion.hpp"
43 #include "memory/sharedHeap.hpp"
44 #include "utilities/stack.hpp"
45
46 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
47 // It uses the "Garbage First" heap organization and algorithm, which
48 // may combine concurrent marking with parallel, incremental compaction of
49 // heap subsets that will yield large amounts of garbage.
50
51 // Forward declarations
52 class HeapRegion;
53 class HRRSCleanupTask;
54 class GenerationSpec;
1698 public:
1699 G1ParGCAllocBuffer(size_t gclab_word_size);
1700 virtual ~G1ParGCAllocBuffer() {
1701 guarantee(_retired, "Allocation buffer has not been retired");
1702 }
1703
1704 virtual void set_buf(HeapWord* buf) {
1705 ParGCAllocBuffer::set_buf(buf);
1706 _retired = false;
1707 }
1708
1709 virtual void retire(bool end_of_gc, bool retain) {
1710 if (_retired) {
1711 return;
1712 }
1713 ParGCAllocBuffer::retire(end_of_gc, retain);
1714 _retired = true;
1715 }
1716 };
1717
1718 class G1ParScanThreadState : public StackObj {
1719 protected:
1720 G1CollectedHeap* _g1h;
1721 RefToScanQueue* _refs;
1722 DirtyCardQueue _dcq;
1723 G1SATBCardTableModRefBS* _ct_bs;
1724 G1RemSet* _g1_rem;
1725
1726 G1ParGCAllocBuffer _surviving_alloc_buffer;
1727 G1ParGCAllocBuffer _tenured_alloc_buffer;
1728 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1729 ageTable _age_table;
1730
1731 G1ParScanClosure _scanner;
1732
1733 size_t _alloc_buffer_waste;
1734 size_t _undo_waste;
1735
1736 OopsInHeapRegionClosure* _evac_failure_cl;
1737
1738 int _hash_seed;
1739 uint _queue_num;
1740
1741 size_t _term_attempts;
1742
1743 double _start;
1744 double _start_strong_roots;
1745 double _strong_roots_time;
1746 double _start_term;
1747 double _term_time;
1748
1749 // Map from young-age-index (0 == not young, 1 is youngest) to
1750 // surviving words. base is what we get back from the malloc call
1751 size_t* _surviving_young_words_base;
1752 // this points into the array, as we use the first few entries for padding
1753 size_t* _surviving_young_words;
1754
1755 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1756
1757 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1758
1759 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1760
1761 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1762 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
1763
1764 template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
1765
1766 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1767 // If the new value of the field points to the same region or
1768 // is the to-space, we don't need to include it in the Rset updates.
1769 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1770 size_t card_index = ctbs()->index_for(p);
1771 // If the card hasn't been added to the buffer, do it.
1772 if (ctbs()->mark_card_deferred(card_index)) {
1773 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1774 }
1775 }
1776 }
1777
1778 public:
1779 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1780
1781 ~G1ParScanThreadState() {
1782 retire_alloc_buffers();
1783 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1784 }
1785
1786 RefToScanQueue* refs() { return _refs; }
1787 ageTable* age_table() { return &_age_table; }
1788
1789 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1790 return _alloc_buffers[purpose];
1791 }
1792
1793 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1794 size_t undo_waste() const { return _undo_waste; }
1795
1796 #ifdef ASSERT
1797 bool verify_ref(narrowOop* ref) const;
1798 bool verify_ref(oop* ref) const;
1799 bool verify_task(StarTask ref) const;
1800 #endif // ASSERT
1801
1802 template <class T> void push_on_queue(T* ref) {
1803 assert(verify_ref(ref), "sanity");
1804 refs()->push(ref);
1805 }
1806
1807 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
1808
1809 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1810 HeapWord* obj = NULL;
1811 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1812 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1813 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1814 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1815 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1816
1817 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1818 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1819 // Otherwise.
1820 alloc_buf->set_word_size(gclab_word_size);
1821 alloc_buf->set_buf(buf);
1822
1823 obj = alloc_buf->allocate(word_sz);
1824 assert(obj != NULL, "buffer was definitely big enough...");
1825 } else {
1826 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1827 }
1828 return obj;
1829 }
1830
1831 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1832 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1833 if (obj != NULL) return obj;
1834 return allocate_slow(purpose, word_sz);
1835 }
1836
1837 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1838 if (alloc_buffer(purpose)->contains(obj)) {
1839 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1840 "should contain whole object");
1841 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1842 } else {
1843 CollectedHeap::fill_with_object(obj, word_sz);
1844 add_to_undo_waste(word_sz);
1845 }
1846 }
1847
1848 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1849 _evac_failure_cl = evac_failure_cl;
1850 }
1851 OopsInHeapRegionClosure* evac_failure_closure() {
1852 return _evac_failure_cl;
1853 }
1854
1855 int* hash_seed() { return &_hash_seed; }
1856 uint queue_num() { return _queue_num; }
1857
1858 size_t term_attempts() const { return _term_attempts; }
1859 void note_term_attempt() { _term_attempts++; }
1860
1861 void start_strong_roots() {
1862 _start_strong_roots = os::elapsedTime();
1863 }
1864 void end_strong_roots() {
1865 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
1866 }
1867 double strong_roots_time() const { return _strong_roots_time; }
1868
1869 void start_term_time() {
1870 note_term_attempt();
1871 _start_term = os::elapsedTime();
1872 }
1873 void end_term_time() {
1874 _term_time += (os::elapsedTime() - _start_term);
1875 }
1876 double term_time() const { return _term_time; }
1877
1878 double elapsed_time() const {
1879 return os::elapsedTime() - _start;
1880 }
1881
1882 static void
1883 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1884 void
1885 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1886
1887 size_t* surviving_young_words() {
1888 // We add on to hide entry 0 which accumulates surviving words for
1889 // age -1 regions (i.e. non-young ones)
1890 return _surviving_young_words;
1891 }
1892
1893 private:
1894 void retire_alloc_buffers() {
1895 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1896 size_t waste = _alloc_buffers[ap]->words_remaining();
1897 add_to_alloc_buffer_waste(waste);
1898 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1899 true /* end_of_gc */,
1900 false /* retain */);
1901 }
1902 }
1903
1904 #define G1_PARTIAL_ARRAY_MASK 0x2
1905
1906 inline bool has_partial_array_mask(oop* ref) const {
1907 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
1908 }
1909
1910 // We never encode partial array oops as narrowOop*, so return false immediately.
1911 // This allows the compiler to create optimized code when popping references from
1912 // the work queue.
1913 inline bool has_partial_array_mask(narrowOop* ref) const {
1914 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1915 return false;
1916 }
1917
1918 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1919 // We always encode partial arrays as regular oop, to allow the
1920 // specialization for has_partial_array_mask() for narrowOops above.
1921 // This means that unintentional use of this method with narrowOops are caught
1922 // by the compiler.
1923 inline oop* set_partial_array_mask(oop obj) const {
1924 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
1925 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
1926 }
1927
1928 inline oop clear_partial_array_mask(oop* ref) const {
1929 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
1930 }
1931
1932 inline void do_oop_partial_array(oop* p);
1933
1934 // This method is applied to the fields of the objects that have just been copied.
1935 template <class T> void do_oop_evac(T* p, HeapRegion* from) {
1936 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
1937 "Reference should not be NULL here as such are never pushed to the task queue.");
1938 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1939
1940 // Although we never intentionally push references outside of the collection
1941 // set, due to (benign) races in the claim mechanism during RSet scanning more
1942 // than one thread might claim the same card. So the same card may be
1943 // processed multiple times. So redo this check.
1944 if (_g1h->in_cset_fast_test(obj)) {
1945 oop forwardee;
1946 if (obj->is_forwarded()) {
1947 forwardee = obj->forwardee();
1948 } else {
1949 forwardee = copy_to_survivor_space(obj);
1950 }
1951 assert(forwardee != NULL, "forwardee should not be NULL");
1952 oopDesc::encode_store_heap_oop(p, forwardee);
1953 }
1954
1955 assert(obj != NULL, "Must be");
1956 update_rs(from, p, queue_num());
1957 }
1958 public:
1959
1960 oop copy_to_survivor_space(oop const obj);
1961
1962 template <class T> inline void deal_with_reference(T* ref_to_scan);
1963
1964 inline void deal_with_reference(StarTask ref);
1965
1966 public:
1967 void trim_queue();
1968 };
1969
1970 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/evacuationInfo.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.hpp"
31 #include "gc_implementation/g1/g1BiasedArray.hpp"
32 #include "gc_implementation/g1/g1HRPrinter.hpp"
33 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
35 #include "gc_implementation/g1/g1YCTypes.hpp"
36 #include "gc_implementation/g1/heapRegionSeq.hpp"
37 #include "gc_implementation/g1/heapRegionSet.hpp"
38 #include "gc_implementation/shared/hSpaceCounters.hpp"
39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
40 #include "memory/barrierSet.hpp"
41 #include "memory/memRegion.hpp"
42 #include "memory/sharedHeap.hpp"
43 #include "utilities/stack.hpp"
44
45 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
46 // It uses the "Garbage First" heap organization and algorithm, which
47 // may combine concurrent marking with parallel, incremental compaction of
48 // heap subsets that will yield large amounts of garbage.
49
50 // Forward declarations
51 class HeapRegion;
52 class HRRSCleanupTask;
53 class GenerationSpec;
1697 public:
1698 G1ParGCAllocBuffer(size_t gclab_word_size);
1699 virtual ~G1ParGCAllocBuffer() {
1700 guarantee(_retired, "Allocation buffer has not been retired");
1701 }
1702
1703 virtual void set_buf(HeapWord* buf) {
1704 ParGCAllocBuffer::set_buf(buf);
1705 _retired = false;
1706 }
1707
1708 virtual void retire(bool end_of_gc, bool retain) {
1709 if (_retired) {
1710 return;
1711 }
1712 ParGCAllocBuffer::retire(end_of_gc, retain);
1713 _retired = true;
1714 }
1715 };
1716
1717 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|