1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc/g1/g1Log.hpp" 33 #include "gc/g1/g1MarkSweep.hpp" 34 #include "gc/g1/g1RootProcessor.hpp" 35 #include "gc/g1/g1StringDedup.hpp" 36 #include "gc/serial/markSweep.inline.hpp" 37 #include "gc/shared/gcHeapSummary.hpp" 38 #include "gc/shared/gcLocker.hpp" 39 #include "gc/shared/gcTimer.hpp" 40 #include "gc/shared/gcTrace.hpp" 41 #include "gc/shared/gcTraceTime.hpp" 42 #include "gc/shared/genCollectedHeap.hpp" 43 #include "gc/shared/modRefBarrierSet.hpp" 44 #include "gc/shared/referencePolicy.hpp" 45 #include "gc/shared/space.hpp" 46 #include "oops/instanceRefKlass.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "prims/jvmtiExport.hpp" 49 #include "runtime/atomic.inline.hpp" 50 #include "runtime/biasedLocking.hpp" 51 #include "runtime/fprofiler.hpp" 52 #include "runtime/synchronizer.hpp" 53 #include "runtime/thread.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/copy.hpp" 56 #include "utilities/events.hpp" 57 58 class HeapRegion; 59 60 bool G1MarkSweep::_archive_check_enabled = false; 61 G1ArchiveRegionMap G1MarkSweep::_archive_region_map; 62 63 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, 64 bool clear_all_softrefs) { 65 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 66 67 #ifdef ASSERT 68 if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) { 69 assert(clear_all_softrefs, "Policy should have been checked earler"); 70 } 71 #endif 72 // hook up weak ref data so it can be used during Mark-Sweep 73 assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); 74 assert(rp != NULL, "should be non-NULL"); 75 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition"); 76 77 GenMarkSweep::_ref_processor = rp; 78 rp->setup_policy(clear_all_softrefs); 79 80 // When collecting the permanent generation Method*s may be moving, 81 // so we either have to flush all bcp data or convert it into bci. 82 CodeCache::gc_prologue(); 83 84 bool marked_for_unloading = false; 85 86 allocate_stacks(); 87 88 // We should save the marks of the currently locked biased monitors. 89 // The marking doesn't preserve the marks of biased objects. 90 BiasedLocking::preserve_marks(); 91 92 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); 93 94 mark_sweep_phase2(); 95 96 #if defined(COMPILER2) || INCLUDE_JVMCI 97 // Don't add any more derived pointers during phase3 98 DerivedPointerTable::set_active(false); 99 #endif 100 101 mark_sweep_phase3(); 102 103 mark_sweep_phase4(); 104 105 GenMarkSweep::restore_marks(); 106 BiasedLocking::restore_marks(); 107 GenMarkSweep::deallocate_stacks(); 108 109 CodeCache::gc_epilogue(); 110 JvmtiExport::gc_epilogue(); 111 112 // refs processing: clean slate 113 GenMarkSweep::_ref_processor = NULL; 114 } 115 116 117 void G1MarkSweep::allocate_stacks() { 118 GenMarkSweep::_preserved_count_max = 0; 119 GenMarkSweep::_preserved_marks = NULL; 120 GenMarkSweep::_preserved_count = 0; 121 } 122 123 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, 124 bool clear_all_softrefs) { 125 // Recursively traverse all live objects and mark them 126 GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); 127 128 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 129 130 // Need cleared claim bits for the roots processing 131 ClassLoaderDataGraph::clear_claimed_marks(); 132 133 MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); 134 { 135 G1RootProcessor root_processor(g1h, 1); 136 root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, 137 &GenMarkSweep::follow_cld_closure, 138 &follow_code_closure); 139 } 140 141 // Process reference objects found during marking 142 ReferenceProcessor* rp = GenMarkSweep::ref_processor(); 143 assert(rp == g1h->ref_processor_stw(), "Sanity"); 144 145 rp->setup_policy(clear_all_softrefs); 146 const ReferenceProcessorStats& stats = 147 rp->process_discovered_references(&GenMarkSweep::is_alive, 148 &GenMarkSweep::keep_alive, 149 &GenMarkSweep::follow_stack_closure, 150 NULL, 151 gc_timer(), 152 gc_tracer()->gc_id()); 153 gc_tracer()->report_gc_reference_stats(stats); 154 155 156 // This is the point where the entire marking should have completed. 157 assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); 158 159 // Unload classes and purge the SystemDictionary. 160 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); 161 162 // Unload nmethods. 163 CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); 164 165 // Prune dead klasses from subklass/sibling/implementor lists. 166 Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); 167 168 // Delete entries for dead interned string and clean up unreferenced symbols in symbol table. 169 g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive); 170 171 if (VerifyDuringGC) { 172 HandleMark hm; // handle scope 173 #if defined(COMPILER2) || INCLUDE_JVMCI 174 DerivedPointerTableDeactivate dpt_deact; 175 #endif 176 g1h->prepare_for_verify(); 177 // Note: we can verify only the heap here. When an object is 178 // marked, the previous value of the mark word (including 179 // identity hash values, ages, etc) is preserved, and the mark 180 // word is set to markOop::marked_value - effectively removing 181 // any hash values from the mark word. These hash values are 182 // used when verifying the dictionaries and so removing them 183 // from the mark word can make verification of the dictionaries 184 // fail. At the end of the GC, the original mark word values 185 // (including hash values) are restored to the appropriate 186 // objects. 187 if (!VerifySilently) { 188 gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying "); 189 } 190 g1h->verify(VerifySilently, VerifyOption_G1UseMarkWord); 191 if (!VerifySilently) { 192 gclog_or_tty->print_cr("]"); 193 } 194 } 195 196 gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); 197 } 198 199 200 void G1MarkSweep::mark_sweep_phase2() { 201 // Now all live objects are marked, compute the new object addresses. 202 203 // It is not required that we traverse spaces in the same order in 204 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 205 // tracking expects us to do so. See comment under phase4. 206 207 GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); 208 209 prepare_compaction(); 210 } 211 212 class G1AdjustPointersClosure: public HeapRegionClosure { 213 public: 214 bool doHeapRegion(HeapRegion* r) { 215 if (r->is_humongous()) { 216 if (r->is_starts_humongous()) { 217 // We must adjust the pointers on the single H object. 218 oop obj = oop(r->bottom()); 219 // point all the oops to the new location 220 MarkSweep::adjust_pointers(obj); 221 } 222 } else if (!r->is_pinned()) { 223 // This really ought to be "as_CompactibleSpace"... 224 r->adjust_pointers(); 225 } 226 return false; 227 } 228 }; 229 230 class G1AlwaysTrueClosure: public BoolObjectClosure { 231 public: 232 bool do_object_b(oop p) { return true; } 233 }; 234 static G1AlwaysTrueClosure always_true; 235 236 void G1MarkSweep::mark_sweep_phase3() { 237 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 238 239 // Adjust the pointers to reflect the new locations 240 GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); 241 242 // Need cleared claim bits for the roots processing 243 ClassLoaderDataGraph::clear_claimed_marks(); 244 245 CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); 246 { 247 G1RootProcessor root_processor(g1h, 1); 248 root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure, 249 &GenMarkSweep::adjust_cld_closure, 250 &adjust_code_closure); 251 } 252 253 assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); 254 g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 255 256 // Now adjust pointers in remaining weak roots. (All of which should 257 // have been cleared if they pointed to non-surviving objects.) 258 JNIHandles::weak_oops_do(&always_true, &GenMarkSweep::adjust_pointer_closure); 259 260 if (G1StringDedup::is_enabled()) { 261 G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); 262 } 263 264 GenMarkSweep::adjust_marks(); 265 266 G1AdjustPointersClosure blk; 267 g1h->heap_region_iterate(&blk); 268 } 269 270 class G1SpaceCompactClosure: public HeapRegionClosure { 271 public: 272 G1SpaceCompactClosure() {} 273 274 bool doHeapRegion(HeapRegion* hr) { 275 if (hr->is_humongous()) { 276 if (hr->is_starts_humongous()) { 277 oop obj = oop(hr->bottom()); 278 if (obj->is_gc_marked()) { 279 obj->init_mark(); 280 } else { 281 assert(hr->is_empty(), "Should have been cleared in phase 2."); 282 } 283 hr->reset_during_compaction(); 284 } 285 } else if (!hr->is_pinned()) { 286 hr->compact(); 287 } 288 return false; 289 } 290 }; 291 292 void G1MarkSweep::mark_sweep_phase4() { 293 // All pointers are now adjusted, move objects accordingly 294 295 // The ValidateMarkSweep live oops tracking expects us to traverse spaces 296 // in the same order in phase2, phase3 and phase4. We don't quite do that 297 // here (code and comment not fixed for perm removal), so we tell the validate code 298 // to use a higher index (saved from phase2) when verifying perm_gen. 299 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 300 301 GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); 302 303 G1SpaceCompactClosure blk; 304 g1h->heap_region_iterate(&blk); 305 306 } 307 308 void G1MarkSweep::enable_archive_object_check() { 309 assert(!_archive_check_enabled, "archive range check already enabled"); 310 _archive_check_enabled = true; 311 size_t length = Universe::heap()->max_capacity(); 312 _archive_region_map.initialize((HeapWord*)Universe::heap()->base(), 313 (HeapWord*)Universe::heap()->base() + length, 314 HeapRegion::GrainBytes); 315 } 316 317 void G1MarkSweep::mark_range_archive(MemRegion range) { 318 assert(_archive_check_enabled, "archive range check not enabled"); 319 _archive_region_map.set_by_address(range, true); 320 } 321 322 bool G1MarkSweep::in_archive_range(oop object) { 323 // This is the out-of-line part of is_archive_object test, done separately 324 // to avoid additional performance impact when the check is not enabled. 325 return _archive_region_map.get_by_address((HeapWord*)object); 326 } 327 328 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) { 329 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 330 g1h->heap_region_iterate(blk); 331 blk->update_sets(); 332 } 333 334 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) { 335 HeapWord* end = hr->end(); 336 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); 337 338 assert(hr->is_starts_humongous(), 339 "Only the start of a humongous region should be freed."); 340 341 hr->set_containing_set(NULL); 342 _humongous_regions_removed.increment(1u, hr->capacity()); 343 344 _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */); 345 prepare_for_compaction(hr, end); 346 dummy_free_list.remove_all(); 347 } 348 349 void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) { 350 // If this is the first live region that we came across which we can compact, 351 // initialize the CompactPoint. 352 if (!is_cp_initialized()) { 353 _cp.space = hr; 354 _cp.threshold = hr->initialize_threshold(); 355 } 356 prepare_for_compaction_work(&_cp, hr, end); 357 } 358 359 void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp, 360 HeapRegion* hr, 361 HeapWord* end) { 362 hr->prepare_for_compaction(cp); 363 // Also clear the part of the card table that will be unused after 364 // compaction. 365 _mrbs->clear(MemRegion(hr->compaction_top(), end)); 366 } 367 368 void G1PrepareCompactClosure::update_sets() { 369 // We'll recalculate total used bytes and recreate the free list 370 // at the end of the GC, so no point in updating those values here. 371 HeapRegionSetCount empty_set; 372 _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed); 373 } 374 375 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { 376 if (hr->is_humongous()) { 377 if (hr->is_starts_humongous()) { 378 oop obj = oop(hr->bottom()); 379 if (obj->is_gc_marked()) { 380 obj->forward_to(obj); 381 } else { 382 free_humongous_region(hr); 383 } 384 } else { 385 assert(hr->is_continues_humongous(), "Invalid humongous."); 386 } 387 } else if (!hr->is_pinned()) { 388 prepare_for_compaction(hr, hr->end()); 389 } 390 return false; 391 }