1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
53 class ObjectClosure;
54 class SpaceClosure;
55 class CompactibleSpaceClosure;
56 class Space;
57 class G1CollectorPolicy;
58 class GenRemSet;
59 class G1RemSet;
60 class HeapRegionRemSetIterator;
61 class ConcurrentMark;
62 class ConcurrentMarkThread;
63 class ConcurrentG1Refine;
64 class GenerationCounters;
65
66 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
67 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
68
69 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
70 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
71
72 enum GCAllocPurpose {
73 GCAllocForTenured,
74 GCAllocForSurvived,
75 GCAllocPurposeCount
76 };
77
78 class YoungList : public CHeapObj<mtGC> {
79 private:
80 G1CollectedHeap* _g1h;
81
82 HeapRegion* _head;
83
84 HeapRegion* _survivor_head;
85 HeapRegion* _survivor_tail;
86
87 HeapRegion* _curr;
88
89 uint _length;
90 uint _survivor_length;
91
92 size_t _last_sampled_rs_lengths;
93 size_t _sampled_rs_lengths;
94
95 void empty_list(HeapRegion* list);
577
578 // Allocation attempt that should be called during safepoints (e.g.,
579 // at the end of a successful GC). expect_null_mutator_alloc_region
580 // specifies whether the mutator alloc region is expected to be NULL
581 // or not.
582 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
583 bool expect_null_mutator_alloc_region);
584
585 // It dirties the cards that cover the block so that so that the post
586 // write barrier never queues anything when updating objects on this
587 // block. It is assumed (and in fact we assert) that the block
588 // belongs to a young region.
589 inline void dirty_young_block(HeapWord* start, size_t word_size);
590
591 // Allocate blocks during garbage collection. Will ensure an
592 // allocation region, either by picking one or expanding the
593 // heap, and then allocate a block of the given size. The block
594 // may not be a humongous - it must fit into a single heap region.
595 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
596
597 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
598 HeapRegion* alloc_region,
599 bool par,
600 size_t word_size);
601
602 // Ensure that no further allocations can happen in "r", bearing in mind
603 // that parallel threads might be attempting allocations.
604 void par_allocate_remaining_space(HeapRegion* r);
605
606 // Allocation attempt during GC for a survivor object / PLAB.
607 inline HeapWord* survivor_attempt_allocation(size_t word_size);
608
609 // Allocation attempt during GC for an old object / PLAB.
610 inline HeapWord* old_attempt_allocation(size_t word_size);
611
612 // These methods are the "callbacks" from the G1AllocRegion class.
613
614 // For mutator alloc regions.
615 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
616 void retire_mutator_alloc_region(HeapRegion* alloc_region,
617 size_t allocated_bytes);
618
619 // For GC alloc regions.
620 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
621 GCAllocPurpose ap);
1730 };
1731
1732 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1733 private:
1734 bool _retired;
1735
1736 public:
1737 G1ParGCAllocBuffer(size_t gclab_word_size);
1738
1739 void set_buf(HeapWord* buf) {
1740 ParGCAllocBuffer::set_buf(buf);
1741 _retired = false;
1742 }
1743
1744 void retire(bool end_of_gc, bool retain) {
1745 if (_retired)
1746 return;
1747 ParGCAllocBuffer::retire(end_of_gc, retain);
1748 _retired = true;
1749 }
1750 };
1751
1752 class G1ParScanThreadState : public StackObj {
1753 protected:
1754 G1CollectedHeap* _g1h;
1755 RefToScanQueue* _refs;
1756 DirtyCardQueue _dcq;
1757 CardTableModRefBS* _ct_bs;
1758 G1RemSet* _g1_rem;
1759
1760 G1ParGCAllocBuffer _surviving_alloc_buffer;
1761 G1ParGCAllocBuffer _tenured_alloc_buffer;
1762 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1763 ageTable _age_table;
1764
1765 size_t _alloc_buffer_waste;
1766 size_t _undo_waste;
1767
1768 OopsInHeapRegionClosure* _evac_failure_cl;
1769 G1ParScanHeapEvacClosure* _evac_cl;
1770 G1ParScanPartialArrayClosure* _partial_scan_cl;
1771
1772 int _hash_seed;
1773 uint _queue_num;
1774
1775 size_t _term_attempts;
1776
1777 double _start;
1778 double _start_strong_roots;
1779 double _strong_roots_time;
1780 double _start_term;
1781 double _term_time;
1782
1806 // is the to-space, we don't need to include it in the Rset updates.
1807 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1808 size_t card_index = ctbs()->index_for(p);
1809 // If the card hasn't been added to the buffer, do it.
1810 if (ctbs()->mark_card_deferred(card_index)) {
1811 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1812 }
1813 }
1814 }
1815
1816 public:
1817 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1818
1819 ~G1ParScanThreadState() {
1820 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1821 }
1822
1823 RefToScanQueue* refs() { return _refs; }
1824 ageTable* age_table() { return &_age_table; }
1825
1826 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1827 return _alloc_buffers[purpose];
1828 }
1829
1830 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1831 size_t undo_waste() const { return _undo_waste; }
1832
1833 #ifdef ASSERT
1834 bool verify_ref(narrowOop* ref) const;
1835 bool verify_ref(oop* ref) const;
1836 bool verify_task(StarTask ref) const;
1837 #endif // ASSERT
1838
1839 template <class T> void push_on_queue(T* ref) {
1840 assert(verify_ref(ref), "sanity");
1841 refs()->push(ref);
1842 }
1843
1844 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1845 if (G1DeferredRSUpdate) {
1846 deferred_rs_update(from, p, tid);
1847 } else {
1848 immediate_rs_update(from, p, tid);
1849 }
1850 }
1851
1852 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1853 HeapWord* obj = NULL;
1854 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1855 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1856 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1857 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1858 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1859
1860 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1861 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1862 // Otherwise.
1863 alloc_buf->set_word_size(gclab_word_size);
1864 alloc_buf->set_buf(buf);
1865
1866 obj = alloc_buf->allocate(word_sz);
1867 assert(obj != NULL, "buffer was definitely big enough...");
1868 } else {
1869 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1870 }
1871 return obj;
1872 }
1873
1874 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1875 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1876 if (obj != NULL) return obj;
1877 return allocate_slow(purpose, word_sz);
1878 }
1879
1880 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1881 if (alloc_buffer(purpose)->contains(obj)) {
1882 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1883 "should contain whole object");
1884 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1956 _partial_scan_cl->do_oop_nv(ref_to_scan);
1957 } else {
1958 // Note: we can use "raw" versions of "region_containing" because
1959 // "obj_to_scan" is definitely in the heap, and is not in a
1960 // humongous region.
1961 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1962 _evac_cl->set_region(r);
1963 _evac_cl->do_oop_nv(ref_to_scan);
1964 }
1965 }
1966
1967 void deal_with_reference(StarTask ref) {
1968 assert(verify_task(ref), "sanity");
1969 if (ref.is_narrow()) {
1970 deal_with_reference((narrowOop*)ref);
1971 } else {
1972 deal_with_reference((oop*)ref);
1973 }
1974 }
1975
1976 public:
1977 void trim_queue();
1978 };
1979
1980 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
53 class ObjectClosure;
54 class SpaceClosure;
55 class CompactibleSpaceClosure;
56 class Space;
57 class G1CollectorPolicy;
58 class GenRemSet;
59 class G1RemSet;
60 class HeapRegionRemSetIterator;
61 class ConcurrentMark;
62 class ConcurrentMarkThread;
63 class ConcurrentG1Refine;
64 class GenerationCounters;
65
66 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
67 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
68
69 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
70 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
71
72 enum GCAllocPurpose {
73 GCAllocPurposeStart = 0,
74 GCAllocForTenured = GCAllocPurposeStart,
75 GCAllocForSurvived = GCAllocPurposeStart + 1,
76 GCAllocPurposeCount = GCAllocPurposeStart + 2
77 };
78
79 class YoungList : public CHeapObj<mtGC> {
80 private:
81 G1CollectedHeap* _g1h;
82
83 HeapRegion* _head;
84
85 HeapRegion* _survivor_head;
86 HeapRegion* _survivor_tail;
87
88 HeapRegion* _curr;
89
90 uint _length;
91 uint _survivor_length;
92
93 size_t _last_sampled_rs_lengths;
94 size_t _sampled_rs_lengths;
95
96 void empty_list(HeapRegion* list);
578
579 // Allocation attempt that should be called during safepoints (e.g.,
580 // at the end of a successful GC). expect_null_mutator_alloc_region
581 // specifies whether the mutator alloc region is expected to be NULL
582 // or not.
583 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
584 bool expect_null_mutator_alloc_region);
585
586 // It dirties the cards that cover the block so that so that the post
587 // write barrier never queues anything when updating objects on this
588 // block. It is assumed (and in fact we assert) that the block
589 // belongs to a young region.
590 inline void dirty_young_block(HeapWord* start, size_t word_size);
591
592 // Allocate blocks during garbage collection. Will ensure an
593 // allocation region, either by picking one or expanding the
594 // heap, and then allocate a block of the given size. The block
595 // may not be a humongous - it must fit into a single heap region.
596 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
597
598 // Ensure that no further allocations can happen in "r", bearing in mind
599 // that parallel threads might be attempting allocations.
600 void par_allocate_remaining_space(HeapRegion* r);
601
602 // Allocation attempt during GC for a survivor object / PLAB.
603 inline HeapWord* survivor_attempt_allocation(size_t word_size);
604
605 // Allocation attempt during GC for an old object / PLAB.
606 inline HeapWord* old_attempt_allocation(size_t word_size);
607
608 // These methods are the "callbacks" from the G1AllocRegion class.
609
610 // For mutator alloc regions.
611 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
612 void retire_mutator_alloc_region(HeapRegion* alloc_region,
613 size_t allocated_bytes);
614
615 // For GC alloc regions.
616 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
617 GCAllocPurpose ap);
1726 };
1727
1728 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1729 private:
1730 bool _retired;
1731
1732 public:
1733 G1ParGCAllocBuffer(size_t gclab_word_size);
1734
1735 void set_buf(HeapWord* buf) {
1736 ParGCAllocBuffer::set_buf(buf);
1737 _retired = false;
1738 }
1739
1740 void retire(bool end_of_gc, bool retain) {
1741 if (_retired)
1742 return;
1743 ParGCAllocBuffer::retire(end_of_gc, retain);
1744 _retired = true;
1745 }
1746
1747 bool is_retired() {
1748 return _retired;
1749 }
1750 };
1751
1752 class G1MultiParGCAllocBuffer {
1753 protected:
1754 enum GCAllocPriority {
1755 GCAllocPriorityStart = 0,
1756 GCAllocPriority1 = GCAllocPriorityStart,
1757 GCAllocPriority2 = GCAllocPriorityStart + 1,
1758 GCAllocPriorityCount = GCAllocPriorityStart + 2
1759 };
1760 G1ParGCAllocBuffer* _priority_buffer[GCAllocPriorityCount];
1761
1762 public:
1763 G1MultiParGCAllocBuffer(size_t gclab_word_size) {
1764 for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1765 _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
1766 }
1767 }
1768
1769 ~G1MultiParGCAllocBuffer() {
1770 for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1771 assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
1772 delete _priority_buffer[pr];
1773 }
1774 }
1775
1776 HeapWord* allocate(size_t word_sz) {
1777 HeapWord* obj;
1778 for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1779 obj = _priority_buffer[pr]->allocate(word_sz);
1780 if (obj != NULL) return obj;
1781 }
1782 return obj;
1783 }
1784
1785 bool contains(void* addr) {
1786 for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1787 if (_priority_buffer[pr]->contains(addr)) return true;
1788 }
1789 return false;
1790 }
1791
1792 void undo_allocation(HeapWord* obj, size_t word_sz) {
1793 bool finish_undo;
1794 for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1795 if (_priority_buffer[pr]->contains(obj)) {
1796 _priority_buffer[pr]->undo_allocation(obj, word_sz);
1797 finish_undo = true;
1798 }
1799 }
1800 if (finish_undo != true) ShouldNotReachHere();
1801 }
1802
1803 size_t words_remaining() {
1804 size_t result=0;
1805 for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1806 result += _priority_buffer[pr]->words_remaining();
1807 }
1808 return result;
1809 }
1810
1811 size_t words_remaining_in_retired() {
1812 G1ParGCAllocBuffer* retired = _priority_buffer[GCAllocPriority1];
1813 return retired->words_remaining();
1814 }
1815
1816 void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
1817 for (int pr = GCAllocPriorityStart; pr < GCAllocPriorityCount; ++pr) {
1818 _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
1819 }
1820 }
1821
1822 void retire_and_set_buf(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
1823 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[GCAllocPriority1];
1824 retired_and_set->retire(end_of_gc, retain);
1825 retired_and_set->set_buf(buf);
1826 retired_and_set->set_word_size(word_sz);
1827 adjust_priority_order();
1828 }
1829
1830 private:
1831 void adjust_priority_order() {
1832 G1ParGCAllocBuffer* retired_and_set = _priority_buffer[GCAllocPriority1];
1833
1834 int last = GCAllocPriorityCount - 1;
1835 for (int pr = GCAllocPriorityStart; pr < last; ++pr) {
1836 _priority_buffer[pr] = _priority_buffer[pr + 1];
1837 }
1838 _priority_buffer[last] = retired_and_set;
1839 }
1840 };
1841
1842 class G1ParScanThreadState : public StackObj {
1843 protected:
1844 G1CollectedHeap* _g1h;
1845 RefToScanQueue* _refs;
1846 DirtyCardQueue _dcq;
1847 CardTableModRefBS* _ct_bs;
1848 G1RemSet* _g1_rem;
1849
1850 G1MultiParGCAllocBuffer _surviving_alloc_buffer;
1851 G1MultiParGCAllocBuffer _tenured_alloc_buffer;
1852 G1MultiParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1853 ageTable _age_table;
1854
1855 size_t _alloc_buffer_waste;
1856 size_t _undo_waste;
1857
1858 OopsInHeapRegionClosure* _evac_failure_cl;
1859 G1ParScanHeapEvacClosure* _evac_cl;
1860 G1ParScanPartialArrayClosure* _partial_scan_cl;
1861
1862 int _hash_seed;
1863 uint _queue_num;
1864
1865 size_t _term_attempts;
1866
1867 double _start;
1868 double _start_strong_roots;
1869 double _strong_roots_time;
1870 double _start_term;
1871 double _term_time;
1872
1896 // is the to-space, we don't need to include it in the Rset updates.
1897 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1898 size_t card_index = ctbs()->index_for(p);
1899 // If the card hasn't been added to the buffer, do it.
1900 if (ctbs()->mark_card_deferred(card_index)) {
1901 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1902 }
1903 }
1904 }
1905
1906 public:
1907 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1908
1909 ~G1ParScanThreadState() {
1910 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1911 }
1912
1913 RefToScanQueue* refs() { return _refs; }
1914 ageTable* age_table() { return &_age_table; }
1915
1916 G1MultiParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1917 return _alloc_buffers[purpose];
1918 }
1919
1920 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1921 size_t undo_waste() const { return _undo_waste; }
1922
1923 #ifdef ASSERT
1924 bool verify_ref(narrowOop* ref) const;
1925 bool verify_ref(oop* ref) const;
1926 bool verify_task(StarTask ref) const;
1927 #endif // ASSERT
1928
1929 template <class T> void push_on_queue(T* ref) {
1930 assert(verify_ref(ref), "sanity");
1931 refs()->push(ref);
1932 }
1933
1934 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1935 if (G1DeferredRSUpdate) {
1936 deferred_rs_update(from, p, tid);
1937 } else {
1938 immediate_rs_update(from, p, tid);
1939 }
1940 }
1941
1942 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1943 HeapWord* obj = NULL;
1944 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1945 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1946 G1MultiParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1947
1948 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1949 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1950
1951 add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired());
1952 alloc_buf->retire_and_set_buf(false /* end_of_gc */, false /* retain */,
1953 buf, gclab_word_size);
1954
1955 obj = alloc_buf->allocate(word_sz);
1956 assert(obj != NULL, "buffer was definitely big enough...");
1957 } else {
1958 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1959 }
1960 return obj;
1961 }
1962
1963 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1964 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1965 if (obj != NULL) return obj;
1966 return allocate_slow(purpose, word_sz);
1967 }
1968
1969 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1970 if (alloc_buffer(purpose)->contains(obj)) {
1971 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1972 "should contain whole object");
1973 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
2045 _partial_scan_cl->do_oop_nv(ref_to_scan);
2046 } else {
2047 // Note: we can use "raw" versions of "region_containing" because
2048 // "obj_to_scan" is definitely in the heap, and is not in a
2049 // humongous region.
2050 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2051 _evac_cl->set_region(r);
2052 _evac_cl->do_oop_nv(ref_to_scan);
2053 }
2054 }
2055
2056 void deal_with_reference(StarTask ref) {
2057 assert(verify_task(ref), "sanity");
2058 if (ref.is_narrow()) {
2059 deal_with_reference((narrowOop*)ref);
2060 } else {
2061 deal_with_reference((oop*)ref);
2062 }
2063 }
2064
2065 void trim_queue();
2066 };
2067
2068 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
|