rev 11595 : imported patch 8034842-erikh-jmasa-review
1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/icBuffer.hpp"
32 #include "gc/g1/g1MarkSweep.hpp"
33 #include "gc/g1/g1RootProcessor.hpp"
34 #include "gc/g1/g1StringDedup.hpp"
35 #include "gc/serial/markSweep.inline.hpp"
36 #include "gc/shared/gcHeapSummary.hpp"
37 #include "gc/shared/gcLocker.hpp"
38 #include "gc/shared/gcTimer.hpp"
39 #include "gc/shared/gcTrace.hpp"
40 #include "gc/shared/gcTraceTime.inline.hpp"
41 #include "gc/shared/genCollectedHeap.hpp"
42 #include "gc/shared/modRefBarrierSet.hpp"
43 #include "gc/shared/referencePolicy.hpp"
44 #include "gc/shared/space.hpp"
45 #include "oops/instanceRefKlass.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "prims/jvmtiExport.hpp"
48 #include "runtime/atomic.inline.hpp"
49 #include "runtime/biasedLocking.hpp"
50 #include "runtime/fprofiler.hpp"
51 #include "runtime/synchronizer.hpp"
52 #include "runtime/thread.hpp"
53 #include "runtime/vmThread.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/events.hpp"
56
57 class HeapRegion;
58
59 bool G1MarkSweep::_archive_check_enabled = false;
60 G1ArchiveRegionMap G1MarkSweep::_archive_region_map;
61
62 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
63 bool clear_all_softrefs) {
64 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
65
66 #ifdef ASSERT
67 if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
68 assert(clear_all_softrefs, "Policy should have been checked earler");
69 }
70 #endif
71 // hook up weak ref data so it can be used during Mark-Sweep
72 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
73 assert(rp != NULL, "should be non-NULL");
74 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
75
76 GenMarkSweep::set_ref_processor(rp);
77 rp->setup_policy(clear_all_softrefs);
78
79 // When collecting the permanent generation Method*s may be moving,
80 // so we either have to flush all bcp data or convert it into bci.
81 CodeCache::gc_prologue();
82
83 bool marked_for_unloading = false;
84
85 allocate_stacks();
86
87 // We should save the marks of the currently locked biased monitors.
88 // The marking doesn't preserve the marks of biased objects.
89 BiasedLocking::preserve_marks();
90
91 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
92
93 mark_sweep_phase2();
94
95 #if defined(COMPILER2) || INCLUDE_JVMCI
96 // Don't add any more derived pointers during phase3
97 DerivedPointerTable::set_active(false);
98 #endif
99
100 mark_sweep_phase3();
101
102 mark_sweep_phase4();
103
104 GenMarkSweep::restore_marks();
105 BiasedLocking::restore_marks();
106 GenMarkSweep::deallocate_stacks();
107
108 CodeCache::gc_epilogue();
109 JvmtiExport::gc_epilogue();
110
111 // refs processing: clean slate
112 GenMarkSweep::set_ref_processor(NULL);
113 }
114
115
116 void G1MarkSweep::allocate_stacks() {
117 GenMarkSweep::_preserved_count_max = 0;
118 GenMarkSweep::_preserved_marks = NULL;
119 GenMarkSweep::_preserved_count = 0;
120 }
121
122 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
123 bool clear_all_softrefs) {
124 // Recursively traverse all live objects and mark them
125 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer());
126
127 G1CollectedHeap* g1h = G1CollectedHeap::heap();
128
129 // Need cleared claim bits for the roots processing
130 ClassLoaderDataGraph::clear_claimed_marks();
131
132 MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
133 {
134 G1RootProcessor root_processor(g1h, 1);
135 root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
136 &GenMarkSweep::follow_cld_closure,
137 &follow_code_closure);
138 }
139
140 {
141 GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer());
142
143 // Process reference objects found during marking
144 ReferenceProcessor* rp = GenMarkSweep::ref_processor();
145 assert(rp == g1h->ref_processor_stw(), "Sanity");
146
147 rp->setup_policy(clear_all_softrefs);
148 const ReferenceProcessorStats& stats =
149 rp->process_discovered_references(&GenMarkSweep::is_alive,
150 &GenMarkSweep::keep_alive,
151 &GenMarkSweep::follow_stack_closure,
152 NULL,
153 gc_timer());
154 gc_tracer()->report_gc_reference_stats(stats);
155 }
156
157 // This is the point where the entire marking should have completed.
158 assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
159
160 {
161 GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
162
163 // Unload classes and purge the SystemDictionary.
164 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
165
166 // Unload nmethods.
167 CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
168
169 // Prune dead klasses from subklass/sibling/implementor lists.
170 Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
171 }
172
173 {
174 GCTraceTime(Debug, gc, phases) trace("Scrub String and Symbol Tables", gc_timer());
175 // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
176 g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
177 }
178
179 if (G1StringDedup::is_enabled()) {
180 GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", gc_timer());
181 G1StringDedup::unlink(&GenMarkSweep::is_alive);
182 }
183
184 if (VerifyDuringGC) {
185 HandleMark hm; // handle scope
186 #if defined(COMPILER2) || INCLUDE_JVMCI
187 DerivedPointerTableDeactivate dpt_deact;
188 #endif
189 g1h->prepare_for_verify();
190 // Note: we can verify only the heap here. When an object is
191 // marked, the previous value of the mark word (including
192 // identity hash values, ages, etc) is preserved, and the mark
193 // word is set to markOop::marked_value - effectively removing
194 // any hash values from the mark word. These hash values are
195 // used when verifying the dictionaries and so removing them
196 // from the mark word can make verification of the dictionaries
197 // fail. At the end of the GC, the original mark word values
198 // (including hash values) are restored to the appropriate
199 // objects.
200 GCTraceTime(Info, gc, verify)("During GC (full)");
201 g1h->verify(VerifyOption_G1UseMarkWord);
202 }
203
204 gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
205 }
206
207
208 void G1MarkSweep::mark_sweep_phase2() {
209 // Now all live objects are marked, compute the new object addresses.
210
211 // It is not required that we traverse spaces in the same order in
212 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
213 // tracking expects us to do so. See comment under phase4.
214
215 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer());
216
217 prepare_compaction();
218 }
219
220 class G1AdjustPointersClosure: public HeapRegionClosure {
221 public:
222 bool doHeapRegion(HeapRegion* r) {
223 if (r->is_humongous()) {
224 if (r->is_starts_humongous()) {
225 // We must adjust the pointers on the single H object.
226 oop obj = oop(r->bottom());
227 // point all the oops to the new location
228 MarkSweep::adjust_pointers(obj);
229 }
230 } else if (!r->is_pinned()) {
231 // This really ought to be "as_CompactibleSpace"...
232 r->adjust_pointers();
233 }
234 return false;
235 }
236 };
237
238 void G1MarkSweep::mark_sweep_phase3() {
239 G1CollectedHeap* g1h = G1CollectedHeap::heap();
240
241 // Adjust the pointers to reflect the new locations
242 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
243
244 // Need cleared claim bits for the roots processing
245 ClassLoaderDataGraph::clear_claimed_marks();
246
247 CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
248 {
249 G1RootProcessor root_processor(g1h, 1);
250 root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure,
251 &GenMarkSweep::adjust_cld_closure,
252 &adjust_code_closure);
253 }
254
255 assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
256 g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
257
258 // Now adjust pointers in remaining weak roots. (All of which should
259 // have been cleared if they pointed to non-surviving objects.)
260 JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
261
262 if (G1StringDedup::is_enabled()) {
263 G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
264 }
265
266 GenMarkSweep::adjust_marks();
267
268 G1AdjustPointersClosure blk;
269 g1h->heap_region_iterate(&blk);
270 }
271
272 class G1SpaceCompactClosure: public HeapRegionClosure {
273 public:
274 G1SpaceCompactClosure() {}
275
276 bool doHeapRegion(HeapRegion* hr) {
277 if (hr->is_humongous()) {
278 if (hr->is_starts_humongous()) {
279 oop obj = oop(hr->bottom());
280 if (obj->is_gc_marked()) {
281 obj->init_mark();
282 } else {
283 assert(hr->is_empty(), "Should have been cleared in phase 2.");
284 }
285 }
286 hr->reset_during_compaction();
287 } else if (!hr->is_pinned()) {
288 hr->compact();
289 }
290 return false;
291 }
292 };
293
294 void G1MarkSweep::mark_sweep_phase4() {
295 // All pointers are now adjusted, move objects accordingly
296
297 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
298 // in the same order in phase2, phase3 and phase4. We don't quite do that
299 // here (code and comment not fixed for perm removal), so we tell the validate code
300 // to use a higher index (saved from phase2) when verifying perm_gen.
301 G1CollectedHeap* g1h = G1CollectedHeap::heap();
302
303 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer());
304
305 G1SpaceCompactClosure blk;
306 g1h->heap_region_iterate(&blk);
307
308 }
309
310 void G1MarkSweep::enable_archive_object_check() {
311 assert(!_archive_check_enabled, "archive range check already enabled");
312 _archive_check_enabled = true;
313 size_t length = Universe::heap()->max_capacity();
314 _archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
315 (HeapWord*)Universe::heap()->base() + length,
316 HeapRegion::GrainBytes);
317 }
318
319 void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
320 assert(_archive_check_enabled, "archive range check not enabled");
321 _archive_region_map.set_by_address(range, is_archive);
322 }
323
324 bool G1MarkSweep::in_archive_range(oop object) {
325 // This is the out-of-line part of is_archive_object test, done separately
326 // to avoid additional performance impact when the check is not enabled.
327 return _archive_region_map.get_by_address((HeapWord*)object);
328 }
329
330 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
331 G1CollectedHeap* g1h = G1CollectedHeap::heap();
332 g1h->heap_region_iterate(blk);
333 blk->update_sets();
334 }
335
336 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
337 HeapWord* end = hr->end();
338 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
339
340 hr->set_containing_set(NULL);
341 _humongous_regions_removed++;
342
343 _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
344 prepare_for_compaction(hr, end);
345 dummy_free_list.remove_all();
346 }
347
348 void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
349 // If this is the first live region that we came across which we can compact,
350 // initialize the CompactPoint.
351 if (!is_cp_initialized()) {
352 _cp.space = hr;
353 _cp.threshold = hr->initialize_threshold();
354 }
355 prepare_for_compaction_work(&_cp, hr, end);
356 }
357
358 void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
359 HeapRegion* hr,
360 HeapWord* end) {
361 hr->prepare_for_compaction(cp);
362 // Also clear the part of the card table that will be unused after
363 // compaction.
364 _mrbs->clear(MemRegion(hr->compaction_top(), end));
365 }
366
367 void G1PrepareCompactClosure::update_sets() {
368 // We'll recalculate total used bytes and recreate the free list
369 // at the end of the GC, so no point in updating those values here.
370 _g1h->remove_from_old_sets(0, _humongous_regions_removed);
371 }
372
373 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
374 if (hr->is_humongous()) {
375 oop obj = oop(hr->humongous_start_region()->bottom());
376 if (hr->is_starts_humongous() && obj->is_gc_marked()) {
377 obj->forward_to(obj);
378 }
379 if (!obj->is_gc_marked()) {
380 free_humongous_region(hr);
381 }
382 } else if (!hr->is_pinned()) {
383 prepare_for_compaction(hr, hr->end());
384 }
385 return false;
386 }
--- EOF ---