Print this page
rev 2724 : 6484965: G1: piggy-back liveness accounting phase on marking
Summary: Remove the separate counting phase of concurrent marking by tracking the amount of marked bytes and the cards spanned by marked objects in marking task/worker thread local data structures, which are updated as individual objects are marked.
Reviewed-by:
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/g1OopClosures.hpp
+++ new/src/share/vm/gc_implementation/g1/g1OopClosures.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
27 27
28 28 class HeapRegion;
29 29 class G1CollectedHeap;
30 30 class G1RemSet;
31 31 class ConcurrentMark;
32 32 class DirtyCardToOopClosure;
33 33 class CMBitMap;
34 34 class CMMarkStack;
35 35 class G1ParScanThreadState;
36 36 class CMTask;
37 37 class ReferenceProcessor;
38 38
39 39 // A class that scans oops in a given heap region (much as OopsInGenClosure
40 40 // scans oops in a generation.)
41 41 class OopsInHeapRegionClosure: public OopsInGenClosure {
42 42 protected:
43 43 HeapRegion* _from;
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
44 44 public:
45 45 void set_region(HeapRegion* from) { _from = from; }
46 46 };
47 47
48 48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
49 49 protected:
50 50 G1CollectedHeap* _g1;
51 51 G1RemSet* _g1_rem;
52 52 ConcurrentMark* _cm;
53 53 G1ParScanThreadState* _par_scan_state;
54 + int _worker_i;
54 55 bool _during_initial_mark;
55 56 bool _mark_in_progress;
56 57 public:
57 58 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
58 59 bool apply_to_weak_ref_discovered_field() { return true; }
59 60 };
60 61
61 62 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
62 63 public:
63 64 G1ParPushHeapRSClosure(G1CollectedHeap* g1,
64 65 G1ParScanThreadState* par_scan_state,
65 66 ReferenceProcessor* rp) :
66 67 G1ParClosureSuper(g1, par_scan_state)
67 68 {
68 69 assert(_ref_processor == NULL, "sanity");
69 70 _ref_processor = rp;
70 71 }
71 72
72 73 template <class T> void do_oop_nv(T* p);
73 74 virtual void do_oop(oop* p) { do_oop_nv(p); }
74 75 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
75 76 };
76 77
77 78 class G1ParScanClosure : public G1ParClosureSuper {
78 79 public:
79 80 G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
80 81 G1ParClosureSuper(g1, par_scan_state)
81 82 {
82 83 assert(_ref_processor == NULL, "sanity");
83 84 _ref_processor = rp;
84 85 }
85 86
86 87 template <class T> void do_oop_nv(T* p);
87 88 virtual void do_oop(oop* p) { do_oop_nv(p); }
88 89 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
89 90 };
90 91
91 92 #define G1_PARTIAL_ARRAY_MASK 0x2
92 93
93 94 template <class T> inline bool has_partial_array_mask(T* ref) {
94 95 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
95 96 }
96 97
97 98 template <class T> inline T* set_partial_array_mask(T obj) {
98 99 assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
99 100 return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
100 101 }
101 102
102 103 template <class T> inline oop clear_partial_array_mask(T* ref) {
103 104 return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
104 105 }
105 106
106 107 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
107 108 G1ParScanClosure _scanner;
108 109
109 110 public:
110 111 G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
111 112 G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
112 113 {
113 114 assert(_ref_processor == NULL, "sanity");
114 115 }
115 116
116 117 G1ParScanClosure* scanner() {
117 118 return &_scanner;
118 119 }
119 120
120 121 template <class T> void do_oop_nv(T* p);
121 122 virtual void do_oop(oop* p) { do_oop_nv(p); }
122 123 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
123 124 };
124 125
125 126
126 127 class G1ParCopyHelper : public G1ParClosureSuper {
127 128 G1ParScanClosure *_scanner;
128 129 protected:
129 130 template <class T> void mark_object(T* p);
130 131 oop copy_to_survivor_space(oop obj, bool should_mark_root,
131 132 bool should_mark_copy);
132 133 public:
133 134 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
134 135 G1ParScanClosure *scanner) :
135 136 G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
136 137 };
137 138
138 139 template<bool do_gen_barrier, G1Barrier barrier,
139 140 bool do_mark_object>
140 141 class G1ParCopyClosure : public G1ParCopyHelper {
141 142 G1ParScanClosure _scanner;
142 143
143 144 template <class T> void do_oop_work(T* p);
144 145
145 146 public:
146 147 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
147 148 ReferenceProcessor* rp) :
148 149 _scanner(g1, par_scan_state, rp),
149 150 G1ParCopyHelper(g1, par_scan_state, &_scanner)
150 151 {
151 152 assert(_ref_processor == NULL, "sanity");
152 153 }
153 154
154 155 G1ParScanClosure* scanner() { return &_scanner; }
155 156
156 157 template <class T> void do_oop_nv(T* p) {
157 158 do_oop_work(p);
158 159 }
159 160 virtual void do_oop(oop* p) { do_oop_nv(p); }
160 161 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
161 162 };
162 163
163 164 typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
164 165 typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure;
165 166
166 167 typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
167 168 typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure;
168 169
169 170 // The following closure types are no longer used but are retained
170 171 // for historical reasons:
171 172 // typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
172 173 // typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
173 174
174 175 // The following closure type is defined in g1_specialized_oop_closures.hpp:
175 176 //
176 177 // typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
177 178
178 179 // We use a separate closure to handle references during evacuation
179 180 // failure processing.
180 181 // We could have used another instance of G1ParScanHeapEvacClosure
181 182 // (since that closure no longer assumes that the references it
182 183 // handles point into the collection set).
183 184
184 185 typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
185 186
186 187 class FilterIntoCSClosure: public OopClosure {
187 188 G1CollectedHeap* _g1;
188 189 OopClosure* _oc;
189 190 DirtyCardToOopClosure* _dcto_cl;
190 191 public:
191 192 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl,
192 193 G1CollectedHeap* g1,
193 194 OopClosure* oc,
194 195 ReferenceProcessor* rp) :
195 196 _dcto_cl(dcto_cl), _g1(g1), _oc(oc)
196 197 {
197 198 assert(_ref_processor == NULL, "sanity");
198 199 _ref_processor = rp;
199 200 }
200 201
201 202 template <class T> void do_oop_nv(T* p);
202 203 virtual void do_oop(oop* p) { do_oop_nv(p); }
203 204 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
204 205 bool apply_to_weak_ref_discovered_field() { return true; }
205 206 bool do_header() { return false; }
206 207 };
207 208
208 209 class FilterOutOfRegionClosure: public OopClosure {
209 210 HeapWord* _r_bottom;
210 211 HeapWord* _r_end;
211 212 OopClosure* _oc;
212 213 int _out_of_region;
213 214 public:
214 215 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
215 216 template <class T> void do_oop_nv(T* p);
216 217 virtual void do_oop(oop* p) { do_oop_nv(p); }
217 218 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
218 219 bool apply_to_weak_ref_discovered_field() { return true; }
219 220 bool do_header() { return false; }
220 221 int out_of_region() { return _out_of_region; }
221 222 };
222 223
223 224 // Closure for iterating over object fields during concurrent marking
224 225 class G1CMOopClosure : public OopClosure {
225 226 G1CollectedHeap* _g1h;
226 227 ConcurrentMark* _cm;
227 228 CMTask* _task;
228 229 public:
229 230 G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
230 231 template <class T> void do_oop_nv(T* p);
231 232 virtual void do_oop( oop* p) { do_oop_nv(p); }
232 233 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
233 234 };
234 235
235 236 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
↓ open down ↓ |
172 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX