1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc/g1/g1FullGCScope.hpp" 33 #include "gc/g1/g1MarkSweep.hpp" 34 #include "gc/g1/g1RootProcessor.hpp" 35 #include "gc/g1/g1StringDedup.hpp" 36 #include "gc/serial/markSweep.inline.hpp" 37 #include "gc/shared/gcHeapSummary.hpp" 38 #include "gc/shared/gcLocker.hpp" 39 #include "gc/shared/gcTimer.hpp" 40 #include "gc/shared/gcTrace.hpp" 41 #include "gc/shared/gcTraceTime.inline.hpp" 42 #include "gc/shared/genCollectedHeap.hpp" 43 #include "gc/shared/modRefBarrierSet.hpp" 44 #include "gc/shared/referencePolicy.hpp" 45 #include "gc/shared/space.hpp" 46 #include "oops/instanceRefKlass.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "prims/jvmtiExport.hpp" 49 #include "runtime/atomic.hpp" 50 #include "runtime/biasedLocking.hpp" 51 #include "runtime/heapMonitoring.hpp" 52 #include "runtime/synchronizer.hpp" 53 #include "runtime/thread.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/copy.hpp" 56 #include "utilities/events.hpp" 57 58 class HeapRegion; 59 60 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, 61 bool clear_all_softrefs) { 62 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 63 HandleMark hm; // Discard invalid handles created during gc 64 65 #if defined(COMPILER2) || INCLUDE_JVMCI 66 DerivedPointerTable::clear(); 67 #endif 68 #ifdef ASSERT 69 if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) { 70 assert(clear_all_softrefs, "Policy should have been checked earler"); 71 } 72 #endif 73 // hook up weak ref data so it can be used during Mark-Sweep 74 assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); 75 assert(rp != NULL, "should be non-NULL"); 76 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition"); 77 78 GenMarkSweep::set_ref_processor(rp); 79 rp->setup_policy(clear_all_softrefs); 80 81 // When collecting the permanent generation Method*s may be moving, 82 // so we either have to flush all bcp data or convert it into bci. 83 CodeCache::gc_prologue(); 84 85 bool marked_for_unloading = false; 86 87 allocate_stacks(); 88 89 // We should save the marks of the currently locked biased monitors. 90 // The marking doesn't preserve the marks of biased objects. 91 BiasedLocking::preserve_marks(); 92 93 // Process roots and do the marking. 94 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); 95 96 // Prepare compaction. 97 mark_sweep_phase2(); 98 99 #if defined(COMPILER2) || INCLUDE_JVMCI 100 // Don't add any more derived pointers during phase3 101 DerivedPointerTable::set_active(false); 102 #endif 103 104 // Adjust all pointers. 105 mark_sweep_phase3(); 106 107 // Do the actual compaction. 108 mark_sweep_phase4(); 109 110 GenMarkSweep::restore_marks(); 111 BiasedLocking::restore_marks(); 112 GenMarkSweep::deallocate_stacks(); 113 114 #if defined(COMPILER2) || INCLUDE_JVMCI 115 // Now update the derived pointers. 116 DerivedPointerTable::update_pointers(); 117 #endif 118 119 CodeCache::gc_epilogue(); 120 JvmtiExport::gc_epilogue(); 121 122 // refs processing: clean slate 123 GenMarkSweep::set_ref_processor(NULL); 124 } 125 126 STWGCTimer* G1MarkSweep::gc_timer() { 127 return G1FullGCScope::instance()->timer(); 128 } 129 130 SerialOldTracer* G1MarkSweep::gc_tracer() { 131 return G1FullGCScope::instance()->tracer(); 132 } 133 134 void G1MarkSweep::allocate_stacks() { 135 GenMarkSweep::_preserved_count_max = 0; 136 GenMarkSweep::_preserved_marks = NULL; 137 GenMarkSweep::_preserved_count = 0; 138 } 139 140 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, 141 bool clear_all_softrefs) { 142 // Recursively traverse all live objects and mark them 143 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer()); 144 145 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 146 147 // Need cleared claim bits for the roots processing 148 ClassLoaderDataGraph::clear_claimed_marks(); 149 150 MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); 151 { 152 G1RootProcessor root_processor(g1h, 1); 153 if (ClassUnloading) { 154 root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, 155 &GenMarkSweep::follow_cld_closure, 156 &follow_code_closure); 157 } else { 158 root_processor.process_all_roots_no_string_table( 159 &GenMarkSweep::follow_root_closure, 160 &GenMarkSweep::follow_cld_closure, 161 &follow_code_closure); 162 } 163 } 164 165 { 166 GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer()); 167 168 // Process reference objects found during marking 169 ReferenceProcessor* rp = GenMarkSweep::ref_processor(); 170 assert(rp == g1h->ref_processor_stw(), "Sanity"); 171 172 rp->setup_policy(clear_all_softrefs); 173 ReferenceProcessorPhaseTimes pt(gc_timer(), rp->num_q()); 174 175 const ReferenceProcessorStats& stats = 176 rp->process_discovered_references(&GenMarkSweep::is_alive, 177 &GenMarkSweep::keep_alive, 178 &GenMarkSweep::follow_stack_closure, 179 NULL, 180 &pt); 181 gc_tracer()->report_gc_reference_stats(stats); 182 pt.print_all_references(); 183 } 184 185 // This is the point where the entire marking should have completed. 186 assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); 187 188 if (ClassUnloading) { 189 GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer()); 190 191 // Unload classes and purge the SystemDictionary. 192 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive, gc_timer()); 193 194 g1h->complete_cleaning(&GenMarkSweep::is_alive, purged_class); 195 } else { 196 GCTraceTime(Debug, gc, phases) trace("Cleanup", gc_timer()); 197 g1h->partial_cleaning(&GenMarkSweep::is_alive, true, true, G1StringDedup::is_enabled()); 198 } 199 200 if (VerifyDuringGC) { 201 HandleMark hm; // handle scope 202 #if defined(COMPILER2) || INCLUDE_JVMCI 203 DerivedPointerTableDeactivate dpt_deact; 204 #endif 205 g1h->prepare_for_verify(); 206 // Note: we can verify only the heap here. When an object is 207 // marked, the previous value of the mark word (including 208 // identity hash values, ages, etc) is preserved, and the mark 209 // word is set to markOop::marked_value - effectively removing 210 // any hash values from the mark word. These hash values are 211 // used when verifying the dictionaries and so removing them 212 // from the mark word can make verification of the dictionaries 213 // fail. At the end of the GC, the original mark word values 214 // (including hash values) are restored to the appropriate 215 // objects. 216 GCTraceTime(Info, gc, verify)("During GC (full)"); 217 g1h->verify(VerifyOption_G1UseMarkWord); 218 } 219 220 gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); 221 } 222 223 224 void G1MarkSweep::mark_sweep_phase2() { 225 // Now all live objects are marked, compute the new object addresses. 226 227 // It is not required that we traverse spaces in the same order in 228 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 229 // tracking expects us to do so. See comment under phase4. 230 231 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer()); 232 233 prepare_compaction(); 234 } 235 236 class G1AdjustPointersClosure: public HeapRegionClosure { 237 public: 238 bool doHeapRegion(HeapRegion* r) { 239 if (r->is_humongous()) { 240 if (r->is_starts_humongous()) { 241 // We must adjust the pointers on the single H object. 242 oop obj = oop(r->bottom()); 243 // point all the oops to the new location 244 MarkSweep::adjust_pointers(obj); 245 } 246 } else if (!r->is_closed_archive()) { 247 // This really ought to be "as_CompactibleSpace"... 248 r->adjust_pointers(); 249 } 250 return false; 251 } 252 }; 253 254 void G1MarkSweep::mark_sweep_phase3() { 255 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 256 257 // Adjust the pointers to reflect the new locations 258 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); 259 260 // Need cleared claim bits for the roots processing 261 ClassLoaderDataGraph::clear_claimed_marks(); 262 263 CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); 264 { 265 G1RootProcessor root_processor(g1h, 1); 266 root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure, 267 &GenMarkSweep::adjust_cld_closure, 268 &adjust_code_closure); 269 } 270 271 assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); 272 g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 273 274 // Now adjust pointers in remaining weak roots. (All of which should 275 // have been cleared if they pointed to non-surviving objects.) 276 JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 277 HeapMonitoring::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 278 279 if (G1StringDedup::is_enabled()) { 280 G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); 281 } 282 283 GenMarkSweep::adjust_marks(); 284 285 G1AdjustPointersClosure blk; 286 g1h->heap_region_iterate(&blk); 287 } 288 289 class G1SpaceCompactClosure: public HeapRegionClosure { 290 public: 291 G1SpaceCompactClosure() {} 292 293 bool doHeapRegion(HeapRegion* hr) { 294 if (hr->is_humongous()) { 295 if (hr->is_starts_humongous()) { 296 oop obj = oop(hr->bottom()); 297 if (obj->is_gc_marked()) { 298 obj->init_mark(); 299 } else { 300 assert(hr->is_empty(), "Should have been cleared in phase 2."); 301 } 302 } 303 hr->reset_during_compaction(); 304 } else if (!hr->is_pinned()) { 305 hr->compact(); 306 } 307 return false; 308 } 309 }; 310 311 void G1MarkSweep::mark_sweep_phase4() { 312 // All pointers are now adjusted, move objects accordingly 313 314 // The ValidateMarkSweep live oops tracking expects us to traverse spaces 315 // in the same order in phase2, phase3 and phase4. We don't quite do that 316 // here (code and comment not fixed for perm removal), so we tell the validate code 317 // to use a higher index (saved from phase2) when verifying perm_gen. 318 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 319 320 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer()); 321 322 G1SpaceCompactClosure blk; 323 g1h->heap_region_iterate(&blk); 324 325 } 326 327 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) { 328 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 329 g1h->heap_region_iterate(blk); 330 blk->update_sets(); 331 } 332 333 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) { 334 HeapWord* end = hr->end(); 335 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); 336 337 hr->set_containing_set(NULL); 338 _humongous_regions_removed++; 339 340 _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */); 341 prepare_for_compaction(hr, end); 342 dummy_free_list.remove_all(); 343 } 344 345 void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) { 346 // If this is the first live region that we came across which we can compact, 347 // initialize the CompactPoint. 348 if (!is_cp_initialized()) { 349 _cp.space = hr; 350 _cp.threshold = hr->initialize_threshold(); 351 } 352 prepare_for_compaction_work(&_cp, hr, end); 353 } 354 355 void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp, 356 HeapRegion* hr, 357 HeapWord* end) { 358 hr->prepare_for_compaction(cp); 359 // Also clear the part of the card table that will be unused after 360 // compaction. 361 _mrbs->clear(MemRegion(hr->compaction_top(), end)); 362 } 363 364 void G1PrepareCompactClosure::update_sets() { 365 // We'll recalculate total used bytes and recreate the free list 366 // at the end of the GC, so no point in updating those values here. 367 _g1h->remove_from_old_sets(0, _humongous_regions_removed); 368 } 369 370 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { 371 if (hr->is_humongous()) { 372 oop obj = oop(hr->humongous_start_region()->bottom()); 373 if (hr->is_starts_humongous() && obj->is_gc_marked()) { 374 obj->forward_to(obj); 375 } 376 if (!obj->is_gc_marked()) { 377 free_humongous_region(hr); 378 } 379 } else if (!hr->is_pinned()) { 380 prepare_for_compaction(hr, hr->end()); 381 } 382 return false; 383 }