Print this page
rev 2585 : [mq]: g1-reference-processing
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+++ new/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/javaClasses.hpp"
27 27 #include "classfile/symbolTable.hpp"
28 28 #include "classfile/systemDictionary.hpp"
29 29 #include "classfile/vmSymbols.hpp"
30 30 #include "code/codeCache.hpp"
31 31 #include "code/icBuffer.hpp"
32 32 #include "gc_implementation/g1/g1MarkSweep.hpp"
33 33 #include "memory/gcLocker.hpp"
34 34 #include "memory/genCollectedHeap.hpp"
35 35 #include "memory/modRefBarrierSet.hpp"
36 36 #include "memory/referencePolicy.hpp"
37 37 #include "memory/space.hpp"
38 38 #include "oops/instanceRefKlass.hpp"
39 39 #include "oops/oop.inline.hpp"
40 40 #include "prims/jvmtiExport.hpp"
41 41 #include "runtime/aprofiler.hpp"
42 42 #include "runtime/biasedLocking.hpp"
43 43 #include "runtime/fprofiler.hpp"
44 44 #include "runtime/synchronizer.hpp"
45 45 #include "runtime/thread.hpp"
46 46 #include "runtime/vmThread.hpp"
47 47 #include "utilities/copy.hpp"
48 48 #include "utilities/events.hpp"
49 49
50 50 class HeapRegion;
51 51
52 52 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
53 53 bool clear_all_softrefs) {
54 54 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
55 55
56 56 SharedHeap* sh = SharedHeap::heap();
57 57 #ifdef ASSERT
58 58 if (sh->collector_policy()->should_clear_all_soft_refs()) {
59 59 assert(clear_all_softrefs, "Policy should have been checked earler");
60 60 }
61 61 #endif
62 62 // hook up weak ref data so it can be used during Mark-Sweep
63 63 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
64 64 assert(rp != NULL, "should be non-NULL");
65 + assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
66 +
65 67 GenMarkSweep::_ref_processor = rp;
66 68 rp->setup_policy(clear_all_softrefs);
67 69
68 70 // When collecting the permanent generation methodOops may be moving,
69 71 // so we either have to flush all bcp data or convert it into bci.
70 72 CodeCache::gc_prologue();
71 73 Threads::gc_prologue();
72 74
73 75 // Increment the invocation count for the permanent generation, since it is
74 76 // implicitly collected whenever we do a full mark sweep collection.
75 77 sh->perm_gen()->stat_record()->invocations++;
76 78
77 79 bool marked_for_unloading = false;
78 80
79 81 allocate_stacks();
80 82
81 83 // We should save the marks of the currently locked biased monitors.
82 84 // The marking doesn't preserve the marks of biased objects.
83 85 BiasedLocking::preserve_marks();
84 86
85 87 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
86 88
87 89 mark_sweep_phase2();
88 90
89 91 // Don't add any more derived pointers during phase3
90 92 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
91 93
92 94 mark_sweep_phase3();
93 95
94 96 mark_sweep_phase4();
95 97
96 98 GenMarkSweep::restore_marks();
97 99 BiasedLocking::restore_marks();
98 100 GenMarkSweep::deallocate_stacks();
99 101
100 102 // We must invalidate the perm-gen rs, so that it gets rebuilt.
101 103 GenRemSet* rs = sh->rem_set();
102 104 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
103 105
104 106 // "free at last gc" is calculated from these.
105 107 // CHF: cheating for now!!!
106 108 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
107 109 // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
108 110
109 111 Threads::gc_epilogue();
110 112 CodeCache::gc_epilogue();
111 113 JvmtiExport::gc_epilogue();
112 114
113 115 // refs processing: clean slate
114 116 GenMarkSweep::_ref_processor = NULL;
115 117 }
116 118
117 119
118 120 void G1MarkSweep::allocate_stacks() {
119 121 GenMarkSweep::_preserved_count_max = 0;
120 122 GenMarkSweep::_preserved_marks = NULL;
121 123 GenMarkSweep::_preserved_count = 0;
122 124 }
123 125
124 126 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
125 127 bool clear_all_softrefs) {
126 128 // Recursively traverse all live objects and mark them
127 129 EventMark m("1 mark object");
128 130 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
129 131 GenMarkSweep::trace(" 1");
130 132
131 133 SharedHeap* sh = SharedHeap::heap();
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
132 134
133 135 sh->process_strong_roots(true, // activeate StrongRootsScope
134 136 true, // Collecting permanent generation.
135 137 SharedHeap::SO_SystemClasses,
136 138 &GenMarkSweep::follow_root_closure,
137 139 &GenMarkSweep::follow_code_root_closure,
138 140 &GenMarkSweep::follow_root_closure);
139 141
140 142 // Process reference objects found during marking
141 143 ReferenceProcessor* rp = GenMarkSweep::ref_processor();
144 + assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
145 +
142 146 rp->setup_policy(clear_all_softrefs);
143 147 rp->process_discovered_references(&GenMarkSweep::is_alive,
144 148 &GenMarkSweep::keep_alive,
145 149 &GenMarkSweep::follow_stack_closure,
146 150 NULL);
147 151
148 152 // Follow system dictionary roots and unload classes
149 153 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
150 154 assert(GenMarkSweep::_marking_stack.is_empty(),
151 155 "stack should be empty by now");
152 156
153 157 // Follow code cache roots (has to be done after system dictionary,
154 158 // assumes all live klasses are marked)
155 159 CodeCache::do_unloading(&GenMarkSweep::is_alive,
156 160 &GenMarkSweep::keep_alive,
157 161 purged_class);
158 162 GenMarkSweep::follow_stack();
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
159 163
160 164 // Update subklass/sibling/implementor links of live klasses
161 165 GenMarkSweep::follow_weak_klass_links();
162 166 assert(GenMarkSweep::_marking_stack.is_empty(),
163 167 "stack should be empty by now");
164 168
165 169 // Visit memoized MDO's and clear any unmarked weak refs
166 170 GenMarkSweep::follow_mdo_weak_refs();
167 171 assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
168 172
169 -
170 173 // Visit interned string tables and delete unmarked oops
171 174 StringTable::unlink(&GenMarkSweep::is_alive);
172 175 // Clean up unreferenced symbols in symbol table.
173 176 SymbolTable::unlink();
174 177
175 178 assert(GenMarkSweep::_marking_stack.is_empty(),
176 179 "stack should be empty by now");
177 180
178 181 if (VerifyDuringGC) {
179 182 HandleMark hm; // handle scope
180 183 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
181 184 gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
182 185 Universe::heap()->prepare_for_verify();
183 186 // Note: we can verify only the heap here. When an object is
184 187 // marked, the previous value of the mark word (including
185 188 // identity hash values, ages, etc) is preserved, and the mark
186 189 // word is set to markOop::marked_value - effectively removing
187 190 // any hash values from the mark word. These hash values are
188 191 // used when verifying the dictionaries and so removing them
189 192 // from the mark word can make verification of the dictionaries
190 193 // fail. At the end of the GC, the orginal mark word values
191 194 // (including hash values) are restored to the appropriate
192 195 // objects.
193 196 Universe::heap()->verify(/* allow dirty */ true,
194 197 /* silent */ false,
195 198 /* option */ VerifyOption_G1UseMarkWord);
196 199
197 200 G1CollectedHeap* g1h = G1CollectedHeap::heap();
198 201 gclog_or_tty->print_cr("]");
199 202 }
200 203 }
201 204
202 205 class G1PrepareCompactClosure: public HeapRegionClosure {
203 206 G1CollectedHeap* _g1h;
204 207 ModRefBarrierSet* _mrbs;
205 208 CompactPoint _cp;
206 209 HumongousRegionSet _humongous_proxy_set;
207 210
208 211 void free_humongous_region(HeapRegion* hr) {
209 212 HeapWord* end = hr->end();
210 213 size_t dummy_pre_used;
211 214 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
212 215
213 216 assert(hr->startsHumongous(),
214 217 "Only the start of a humongous region should be freed.");
215 218 _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
216 219 &_humongous_proxy_set, false /* par */);
217 220 hr->prepare_for_compaction(&_cp);
218 221 // Also clear the part of the card table that will be unused after
219 222 // compaction.
220 223 _mrbs->clear(MemRegion(hr->compaction_top(), end));
221 224 dummy_free_list.remove_all();
222 225 }
223 226
224 227 public:
225 228 G1PrepareCompactClosure(CompactibleSpace* cs)
226 229 : _g1h(G1CollectedHeap::heap()),
227 230 _mrbs(G1CollectedHeap::heap()->mr_bs()),
228 231 _cp(NULL, cs, cs->initialize_threshold()),
229 232 _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
230 233
231 234 void update_sets() {
232 235 // We'll recalculate total used bytes and recreate the free list
233 236 // at the end of the GC, so no point in updating those values here.
234 237 _g1h->update_sets_after_freeing_regions(0, /* pre_used */
235 238 NULL, /* free_list */
236 239 &_humongous_proxy_set,
237 240 false /* par */);
238 241 }
239 242
240 243 bool doHeapRegion(HeapRegion* hr) {
241 244 if (hr->isHumongous()) {
242 245 if (hr->startsHumongous()) {
243 246 oop obj = oop(hr->bottom());
244 247 if (obj->is_gc_marked()) {
245 248 obj->forward_to(obj);
246 249 } else {
247 250 free_humongous_region(hr);
248 251 }
249 252 } else {
250 253 assert(hr->continuesHumongous(), "Invalid humongous.");
251 254 }
252 255 } else {
253 256 hr->prepare_for_compaction(&_cp);
254 257 // Also clear the part of the card table that will be unused after
255 258 // compaction.
256 259 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
257 260 }
258 261 return false;
259 262 }
260 263 };
261 264
262 265 // Finds the first HeapRegion.
263 266 class FindFirstRegionClosure: public HeapRegionClosure {
264 267 HeapRegion* _a_region;
265 268 public:
266 269 FindFirstRegionClosure() : _a_region(NULL) {}
267 270 bool doHeapRegion(HeapRegion* r) {
268 271 _a_region = r;
269 272 return true;
270 273 }
271 274 HeapRegion* result() { return _a_region; }
272 275 };
273 276
274 277 void G1MarkSweep::mark_sweep_phase2() {
275 278 // Now all live objects are marked, compute the new object addresses.
276 279
277 280 // It is imperative that we traverse perm_gen LAST. If dead space is
278 281 // allowed a range of dead object may get overwritten by a dead int
279 282 // array. If perm_gen is not traversed last a klassOop may get
280 283 // overwritten. This is fine since it is dead, but if the class has dead
281 284 // instances we have to skip them, and in order to find their size we
282 285 // need the klassOop!
283 286 //
284 287 // It is not required that we traverse spaces in the same order in
285 288 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
286 289 // tracking expects us to do so. See comment under phase4.
287 290
288 291 G1CollectedHeap* g1h = G1CollectedHeap::heap();
289 292 Generation* pg = g1h->perm_gen();
290 293
291 294 EventMark m("2 compute new addresses");
292 295 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
293 296 GenMarkSweep::trace("2");
294 297
295 298 FindFirstRegionClosure cl;
296 299 g1h->heap_region_iterate(&cl);
297 300 HeapRegion *r = cl.result();
298 301 CompactibleSpace* sp = r;
299 302 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
300 303 sp = r->next_compaction_space();
301 304 }
302 305
303 306 G1PrepareCompactClosure blk(sp);
304 307 g1h->heap_region_iterate(&blk);
305 308 blk.update_sets();
306 309
307 310 CompactPoint perm_cp(pg, NULL, NULL);
308 311 pg->prepare_for_compaction(&perm_cp);
309 312 }
310 313
311 314 class G1AdjustPointersClosure: public HeapRegionClosure {
312 315 public:
313 316 bool doHeapRegion(HeapRegion* r) {
314 317 if (r->isHumongous()) {
315 318 if (r->startsHumongous()) {
316 319 // We must adjust the pointers on the single H object.
317 320 oop obj = oop(r->bottom());
318 321 debug_only(GenMarkSweep::track_interior_pointers(obj));
319 322 // point all the oops to the new location
320 323 obj->adjust_pointers();
321 324 debug_only(GenMarkSweep::check_interior_pointers());
322 325 }
323 326 } else {
324 327 // This really ought to be "as_CompactibleSpace"...
325 328 r->adjust_pointers();
326 329 }
327 330 return false;
328 331 }
329 332 };
330 333
331 334 void G1MarkSweep::mark_sweep_phase3() {
332 335 G1CollectedHeap* g1h = G1CollectedHeap::heap();
333 336 Generation* pg = g1h->perm_gen();
334 337
335 338 // Adjust the pointers to reflect the new locations
336 339 EventMark m("3 adjust pointers");
337 340 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
338 341 GenMarkSweep::trace("3");
↓ open down ↓ |
159 lines elided |
↑ open up ↑ |
339 342
340 343 SharedHeap* sh = SharedHeap::heap();
341 344
342 345 sh->process_strong_roots(true, // activate StrongRootsScope
343 346 true, // Collecting permanent generation.
344 347 SharedHeap::SO_AllClasses,
345 348 &GenMarkSweep::adjust_root_pointer_closure,
346 349 NULL, // do not touch code cache here
347 350 &GenMarkSweep::adjust_pointer_closure);
348 351
349 - g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
352 + assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
353 + g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
354 +
355 + // The discovered lists of the CM reference processor were emptied
356 + // at the start of the Full GC but if the sentinel ref was moved
357 + // as part of this Full GC then they need to be adjusted to point
358 + // to the sentinel ref's new location.
359 + g1h->ref_processor_cm()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
350 360
351 361 // Now adjust pointers in remaining weak roots. (All of which should
352 362 // have been cleared if they pointed to non-surviving objects.)
353 363 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
354 364 &GenMarkSweep::adjust_pointer_closure);
355 365
356 366 GenMarkSweep::adjust_marks();
357 367
358 368 G1AdjustPointersClosure blk;
359 369 g1h->heap_region_iterate(&blk);
360 370 pg->adjust_pointers();
361 371 }
362 372
363 373 class G1SpaceCompactClosure: public HeapRegionClosure {
364 374 public:
365 375 G1SpaceCompactClosure() {}
366 376
367 377 bool doHeapRegion(HeapRegion* hr) {
368 378 if (hr->isHumongous()) {
369 379 if (hr->startsHumongous()) {
370 380 oop obj = oop(hr->bottom());
371 381 if (obj->is_gc_marked()) {
372 382 obj->init_mark();
373 383 } else {
374 384 assert(hr->is_empty(), "Should have been cleared in phase 2.");
375 385 }
376 386 hr->reset_during_compaction();
377 387 }
378 388 } else {
379 389 hr->compact();
380 390 }
381 391 return false;
382 392 }
383 393 };
384 394
385 395 void G1MarkSweep::mark_sweep_phase4() {
386 396 // All pointers are now adjusted, move objects accordingly
387 397
388 398 // It is imperative that we traverse perm_gen first in phase4. All
389 399 // classes must be allocated earlier than their instances, and traversing
390 400 // perm_gen first makes sure that all klassOops have moved to their new
391 401 // location before any instance does a dispatch through it's klass!
392 402
393 403 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
394 404 // in the same order in phase2, phase3 and phase4. We don't quite do that
395 405 // here (perm_gen first rather than last), so we tell the validate code
396 406 // to use a higher index (saved from phase2) when verifying perm_gen.
397 407 G1CollectedHeap* g1h = G1CollectedHeap::heap();
398 408 Generation* pg = g1h->perm_gen();
399 409
400 410 EventMark m("4 compact heap");
401 411 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
402 412 GenMarkSweep::trace("4");
403 413
404 414 pg->compact();
405 415
406 416 G1SpaceCompactClosure blk;
407 417 g1h->heap_region_iterate(&blk);
408 418
409 419 }
410 420
411 421 // Local Variables: ***
412 422 // c-indentation-style: gnu ***
413 423 // End: ***
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX