Print this page
rev 2585 : [mq]: g1-reference-processing
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/satbQueue.cpp
+++ new/src/share/vm/gc_implementation/g1/satbQueue.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
27 27 #include "gc_implementation/g1/satbQueue.hpp"
28 28 #include "memory/allocation.inline.hpp"
29 29 #include "memory/sharedHeap.hpp"
30 30 #include "runtime/mutexLocker.hpp"
31 31 #include "runtime/thread.hpp"
32 +#include "runtime/vmThread.hpp"
32 33
33 34 // This method removes entries from an SATB buffer that will not be
34 35 // useful to the concurrent marking threads. An entry is removed if it
35 36 // satisfies one of the following conditions:
36 37 //
37 38 // * it points to an object outside the G1 heap (G1's concurrent
38 39 // marking only visits objects inside the G1 heap),
39 40 // * it points to an object that has been allocated since marking
40 41 // started (according to SATB those objects do not need to be
41 42 // visited during marking), or
42 43 // * it points to an object that has already been marked (no need to
43 44 // process it again).
44 45 //
45 46 // The rest of the entries will be retained and are compacted towards
46 47 // the top of the buffer. If with this filtering we clear a large
47 48 // enough chunk of the buffer we can re-use it (instead of enqueueing
48 49 // it) and we can just allow the mutator to carry on executing.
49 50
50 51 bool ObjPtrQueue::should_enqueue_buffer() {
51 52 assert(_lock == NULL || _lock->owned_by_self(),
52 53 "we should have taken the lock before calling this");
53 54
54 55 // A value of 0 means "don't filter SATB buffers".
55 56 if (G1SATBBufferEnqueueingThresholdPercent == 0) {
56 57 return true;
57 58 }
58 59
59 60 G1CollectedHeap* g1h = G1CollectedHeap::heap();
60 61
61 62 // This method should only be called if there is a non-NULL buffer
62 63 // that is full.
63 64 assert(_index == 0, "pre-condition");
64 65 assert(_buf != NULL, "pre-condition");
65 66
66 67 void** buf = _buf;
67 68 size_t sz = _sz;
68 69
69 70 // Used for sanity checking at the end of the loop.
70 71 debug_only(size_t entries = 0; size_t retained = 0;)
71 72
72 73 size_t i = sz;
73 74 size_t new_index = sz;
74 75
75 76 // Given that we are expecting _index == 0, we could have changed
76 77 // the loop condition to (i > 0). But we are using _index for
77 78 // generality.
78 79 while (i > _index) {
79 80 assert(i > 0, "we should have at least one more entry to process");
80 81 i -= oopSize;
81 82 debug_only(entries += 1;)
82 83 oop* p = (oop*) &buf[byte_index_to_index((int) i)];
83 84 oop obj = *p;
84 85 // NULL the entry so that unused parts of the buffer contain NULLs
85 86 // at the end. If we are going to retain it we will copy it to its
86 87 // final place. If we have retained all entries we have visited so
87 88 // far, we'll just end up copying it to the same place.
88 89 *p = NULL;
89 90
90 91 bool retain = g1h->is_obj_ill(obj);
91 92 if (retain) {
92 93 assert(new_index > 0, "we should not have already filled up the buffer");
93 94 new_index -= oopSize;
94 95 assert(new_index >= i,
95 96 "new_index should never be below i, as we alwaysr compact 'up'");
96 97 oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
97 98 assert(new_p >= p, "the destination location should never be below "
98 99 "the source as we always compact 'up'");
99 100 assert(*new_p == NULL,
100 101 "we should have already cleared the destination location");
101 102 *new_p = obj;
102 103 debug_only(retained += 1;)
103 104 }
104 105 }
105 106 size_t entries_calc = (sz - _index) / oopSize;
106 107 assert(entries == entries_calc, "the number of entries we counted "
107 108 "should match the number of entries we calculated");
108 109 size_t retained_calc = (sz - new_index) / oopSize;
109 110 assert(retained == retained_calc, "the number of retained entries we counted "
110 111 "should match the number of retained entries we calculated");
111 112 size_t perc = retained_calc * 100 / entries_calc;
112 113 bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
113 114 _index = new_index;
114 115
115 116 return should_enqueue;
116 117 }
117 118
118 119 void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
119 120 if (_buf != NULL) {
120 121 apply_closure_to_buffer(cl, _buf, _index, _sz);
121 122 _index = _sz;
122 123 }
123 124 }
124 125
125 126 void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
126 127 void** buf, size_t index, size_t sz) {
127 128 if (cl == NULL) return;
128 129 for (size_t i = index; i < sz; i += oopSize) {
129 130 oop obj = (oop)buf[byte_index_to_index((int)i)];
130 131 // There can be NULL entries because of destructors.
131 132 if (obj != NULL) {
132 133 cl->do_object(obj);
133 134 }
134 135 }
135 136 }
136 137
137 138 #ifdef ASSERT
138 139 void ObjPtrQueue::verify_oops_in_buffer() {
139 140 if (_buf == NULL) return;
140 141 for (size_t i = _index; i < _sz; i += oopSize) {
141 142 oop obj = (oop)_buf[byte_index_to_index((int)i)];
142 143 assert(obj != NULL && obj->is_oop(true /* ignore mark word */),
143 144 "Not an oop");
144 145 }
145 146 }
146 147 #endif
147 148
148 149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
149 150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
150 151 #endif // _MSC_VER
151 152
152 153
153 154 SATBMarkQueueSet::SATBMarkQueueSet() :
154 155 PtrQueueSet(),
155 156 _closure(NULL), _par_closures(NULL),
156 157 _shared_satb_queue(this, true /*perm*/)
157 158 {}
158 159
159 160 void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
160 161 int process_completed_threshold,
161 162 Mutex* lock) {
162 163 PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1);
163 164 _shared_satb_queue.set_lock(lock);
164 165 if (ParallelGCThreads > 0) {
165 166 _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads);
166 167 }
167 168 }
168 169
169 170
170 171 void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
171 172 DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
172 173 t->satb_mark_queue().handle_zero_index();
173 174 }
174 175
175 176 #ifdef ASSERT
176 177 void SATBMarkQueueSet::dump_active_values(JavaThread* first,
177 178 bool expected_active) {
178 179 gclog_or_tty->print_cr("SATB queue active values for Java Threads");
179 180 gclog_or_tty->print_cr(" SATB queue set: active is %s",
180 181 (is_active()) ? "TRUE" : "FALSE");
181 182 gclog_or_tty->print_cr(" expected_active is %s",
182 183 (expected_active) ? "TRUE" : "FALSE");
183 184 for (JavaThread* t = first; t; t = t->next()) {
184 185 bool active = t->satb_mark_queue().is_active();
185 186 gclog_or_tty->print_cr(" thread %s, active is %s",
186 187 t->name(), (active) ? "TRUE" : "FALSE");
187 188 }
188 189 }
189 190 #endif // ASSERT
190 191
191 192 void SATBMarkQueueSet::set_active_all_threads(bool b,
192 193 bool expected_active) {
193 194 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
194 195 JavaThread* first = Threads::first();
195 196
196 197 #ifdef ASSERT
197 198 if (_all_active != expected_active) {
198 199 dump_active_values(first, expected_active);
199 200
200 201 // I leave this here as a guarantee, instead of an assert, so
201 202 // that it will still be compiled in if we choose to uncomment
202 203 // the #ifdef ASSERT in a product build. The whole block is
203 204 // within an #ifdef ASSERT so the guarantee will not be compiled
204 205 // in a product build anyway.
205 206 guarantee(false,
206 207 "SATB queue set has an unexpected active value");
207 208 }
208 209 #endif // ASSERT
209 210 _all_active = b;
210 211
211 212 for (JavaThread* t = first; t; t = t->next()) {
212 213 #ifdef ASSERT
213 214 bool active = t->satb_mark_queue().is_active();
214 215 if (active != expected_active) {
215 216 dump_active_values(first, expected_active);
216 217
217 218 // I leave this here as a guarantee, instead of an assert, so
218 219 // that it will still be compiled in if we choose to uncomment
219 220 // the #ifdef ASSERT in a product build. The whole block is
220 221 // within an #ifdef ASSERT so the guarantee will not be compiled
221 222 // in a product build anyway.
222 223 guarantee(false,
223 224 "thread has an unexpected active value in its SATB queue");
224 225 }
225 226 #endif // ASSERT
226 227 t->satb_mark_queue().set_active(b);
227 228 }
228 229 }
229 230
230 231 void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
231 232 _closure = closure;
232 233 }
233 234
234 235 void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
235 236 assert(ParallelGCThreads > 0 && _par_closures != NULL, "Precondition");
236 237 _par_closures[i] = par_closure;
237 238 }
238 239
239 240 void SATBMarkQueueSet::iterate_closure_all_threads() {
240 241 for(JavaThread* t = Threads::first(); t; t = t->next()) {
241 242 t->satb_mark_queue().apply_closure(_closure);
242 243 }
243 244 shared_satb_queue()->apply_closure(_closure);
244 245 }
↓ open down ↓ |
203 lines elided |
↑ open up ↑ |
245 246
246 247 void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
247 248 SharedHeap* sh = SharedHeap::heap();
248 249 int parity = sh->strong_roots_parity();
249 250
250 251 for(JavaThread* t = Threads::first(); t; t = t->next()) {
251 252 if (t->claim_oops_do(true, parity)) {
252 253 t->satb_mark_queue().apply_closure(_par_closures[worker]);
253 254 }
254 255 }
255 - // We'll have worker 0 do this one.
256 - if (worker == 0) {
257 - shared_satb_queue()->apply_closure(_par_closures[0]);
256 +
257 + // We also need to claim the VMThread so that its parity is updated
258 + // otherwise the next call to Thread::possibly_parallel_oops_do inside
259 + // a StrongRootsScope might skip the VMThread because it has a stale
260 + // parity that matches the parity set by the StrongRootsScope
261 + //
262 + // Whichever worker succeeds in claiming the VMThread gets to do
263 + // the shared queue.
264 +
265 + VMThread* vmt = VMThread::vm_thread();
266 + if (vmt->claim_oops_do(true, parity)) {
267 + shared_satb_queue()->apply_closure(_par_closures[worker]);
258 268 }
259 269 }
260 270
261 271 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
262 272 int worker) {
263 273 BufferNode* nd = NULL;
264 274 {
265 275 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
266 276 if (_completed_buffers_head != NULL) {
267 277 nd = _completed_buffers_head;
268 278 _completed_buffers_head = nd->next();
269 279 if (_completed_buffers_head == NULL) _completed_buffers_tail = NULL;
270 280 _n_completed_buffers--;
271 281 if (_n_completed_buffers == 0) _process_completed = false;
272 282 }
273 283 }
274 284 ObjectClosure* cl = (par ? _par_closures[worker] : _closure);
275 285 if (nd != NULL) {
276 286 void **buf = BufferNode::make_buffer_from_node(nd);
277 287 ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
278 288 deallocate_buffer(buf);
279 289 return true;
280 290 } else {
281 291 return false;
282 292 }
283 293 }
284 294
285 295 void SATBMarkQueueSet::abandon_partial_marking() {
286 296 BufferNode* buffers_to_delete = NULL;
287 297 {
288 298 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
289 299 while (_completed_buffers_head != NULL) {
290 300 BufferNode* nd = _completed_buffers_head;
291 301 _completed_buffers_head = nd->next();
292 302 nd->set_next(buffers_to_delete);
293 303 buffers_to_delete = nd;
294 304 }
295 305 _completed_buffers_tail = NULL;
296 306 _n_completed_buffers = 0;
297 307 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
298 308 }
299 309 while (buffers_to_delete != NULL) {
300 310 BufferNode* nd = buffers_to_delete;
301 311 buffers_to_delete = nd->next();
302 312 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
303 313 }
304 314 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
305 315 // So we can safely manipulate these queues.
306 316 for (JavaThread* t = Threads::first(); t; t = t->next()) {
307 317 t->satb_mark_queue().reset();
308 318 }
309 319 shared_satb_queue()->reset();
310 320 }
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX