1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc/g1/g1MarkSweep.hpp" 33 #include "gc/g1/g1RootProcessor.hpp" 34 #include "gc/g1/g1StringDedup.hpp" 35 #include "gc/serial/markSweep.inline.hpp" 36 #include "gc/shared/gcHeapSummary.hpp" 37 #include "gc/shared/gcLocker.hpp" 38 #include "gc/shared/gcTimer.hpp" 39 #include "gc/shared/gcTrace.hpp" 40 #include "gc/shared/gcTraceTime.inline.hpp" 41 #include "gc/shared/genCollectedHeap.hpp" 42 #include "gc/shared/modRefBarrierSet.hpp" 43 #include "gc/shared/referencePolicy.hpp" 44 #include "gc/shared/space.hpp" 45 #include "oops/instanceRefKlass.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "prims/jvmtiExport.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/biasedLocking.hpp" 50 #include "runtime/fprofiler.hpp" 51 #include "runtime/synchronizer.hpp" 52 #include "runtime/thread.hpp" 53 #include "runtime/vmThread.hpp" 54 #include "utilities/copy.hpp" 55 #include "utilities/events.hpp" 56 57 class HeapRegion; 58 59 bool G1MarkSweep::_archive_check_enabled = false; 60 G1ArchiveRegionMap G1MarkSweep::_archive_region_map; 61 62 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, 63 bool clear_all_softrefs) { 64 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 65 66 #ifdef ASSERT 67 if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) { 68 assert(clear_all_softrefs, "Policy should have been checked earler"); 69 } 70 #endif 71 // hook up weak ref data so it can be used during Mark-Sweep 72 assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); 73 assert(rp != NULL, "should be non-NULL"); 74 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition"); 75 76 GenMarkSweep::set_ref_processor(rp); 77 rp->setup_policy(clear_all_softrefs); 78 79 // When collecting the permanent generation Method*s may be moving, 80 // so we either have to flush all bcp data or convert it into bci. 81 CodeCache::gc_prologue(); 82 83 bool marked_for_unloading = false; 84 85 allocate_stacks(); 86 87 // We should save the marks of the currently locked biased monitors. 88 // The marking doesn't preserve the marks of biased objects. 89 BiasedLocking::preserve_marks(); 90 91 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); 92 93 mark_sweep_phase2(); 94 95 #if defined(COMPILER2) || INCLUDE_JVMCI 96 // Don't add any more derived pointers during phase3 97 DerivedPointerTable::set_active(false); 98 #endif 99 100 mark_sweep_phase3(); 101 102 mark_sweep_phase4(); 103 104 GenMarkSweep::restore_marks(); 105 BiasedLocking::restore_marks(); 106 GenMarkSweep::deallocate_stacks(); 107 108 CodeCache::gc_epilogue(); 109 JvmtiExport::gc_epilogue(); 110 111 // refs processing: clean slate 112 GenMarkSweep::set_ref_processor(NULL); 113 } 114 115 116 void G1MarkSweep::allocate_stacks() { 117 GenMarkSweep::_preserved_count_max = 0; 118 GenMarkSweep::_preserved_marks = NULL; 119 GenMarkSweep::_preserved_count = 0; 120 } 121 122 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, 123 bool clear_all_softrefs) { 124 // Recursively traverse all live objects and mark them 125 GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", gc_timer()); 126 127 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 128 129 // Need cleared claim bits for the roots processing 130 ClassLoaderDataGraph::clear_claimed_marks(); 131 132 MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); 133 { 134 G1RootProcessor root_processor(g1h, 1); 135 root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, 136 &GenMarkSweep::follow_cld_closure, 137 &follow_code_closure); 138 } 139 140 // Process reference objects found during marking 141 ReferenceProcessor* rp = GenMarkSweep::ref_processor(); 142 assert(rp == g1h->ref_processor_stw(), "Sanity"); 143 144 rp->setup_policy(clear_all_softrefs); 145 const ReferenceProcessorStats& stats = 146 rp->process_discovered_references(&GenMarkSweep::is_alive, 147 &GenMarkSweep::keep_alive, 148 &GenMarkSweep::follow_stack_closure, 149 NULL, 150 gc_timer()); 151 gc_tracer()->report_gc_reference_stats(stats); 152 153 154 // This is the point where the entire marking should have completed. 155 assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); 156 157 // Unload classes and purge the SystemDictionary. 158 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); 159 160 // Unload nmethods. 161 CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class); 162 163 // Prune dead klasses from subklass/sibling/implementor lists. 164 Klass::clean_weak_klass_links(&GenMarkSweep::is_alive); 165 166 // Delete entries for dead interned string and clean up unreferenced symbols in symbol table. 167 g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive); 168 169 if (VerifyDuringGC) { 170 HandleMark hm; // handle scope 171 #if defined(COMPILER2) || INCLUDE_JVMCI 172 DerivedPointerTableDeactivate dpt_deact; 173 #endif 174 g1h->prepare_for_verify(); 175 // Note: we can verify only the heap here. When an object is 176 // marked, the previous value of the mark word (including 177 // identity hash values, ages, etc) is preserved, and the mark 178 // word is set to markOop::marked_value - effectively removing 179 // any hash values from the mark word. These hash values are 180 // used when verifying the dictionaries and so removing them 181 // from the mark word can make verification of the dictionaries 182 // fail. At the end of the GC, the original mark word values 183 // (including hash values) are restored to the appropriate 184 // objects. 185 GCTraceTime(Info, gc, verify)("During GC (full)"); 186 g1h->verify(VerifyOption_G1UseMarkWord); 187 } 188 189 gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); 190 } 191 192 193 void G1MarkSweep::mark_sweep_phase2() { 194 // Now all live objects are marked, compute the new object addresses. 195 196 // It is not required that we traverse spaces in the same order in 197 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 198 // tracking expects us to do so. See comment under phase4. 199 200 GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", gc_timer()); 201 202 prepare_compaction(); 203 } 204 205 class G1AdjustPointersClosure: public HeapRegionClosure { 206 public: 207 bool doHeapRegion(HeapRegion* r) { 208 if (r->is_humongous()) { 209 if (r->is_starts_humongous()) { 210 // We must adjust the pointers on the single H object. 211 oop obj = oop(r->bottom()); 212 // point all the oops to the new location 213 MarkSweep::adjust_pointers(obj); 214 } 215 } else if (!r->is_pinned()) { 216 // This really ought to be "as_CompactibleSpace"... 217 r->adjust_pointers(); 218 } 219 return false; 220 } 221 }; 222 223 void G1MarkSweep::mark_sweep_phase3() { 224 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 225 226 // Adjust the pointers to reflect the new locations 227 GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", gc_timer()); 228 229 // Need cleared claim bits for the roots processing 230 ClassLoaderDataGraph::clear_claimed_marks(); 231 232 CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); 233 { 234 G1RootProcessor root_processor(g1h, 1); 235 root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure, 236 &GenMarkSweep::adjust_cld_closure, 237 &adjust_code_closure); 238 } 239 240 assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); 241 g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 242 243 // Now adjust pointers in remaining weak roots. (All of which should 244 // have been cleared if they pointed to non-surviving objects.) 245 JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 246 247 if (G1StringDedup::is_enabled()) { 248 G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); 249 } 250 251 GenMarkSweep::adjust_marks(); 252 253 G1AdjustPointersClosure blk; 254 g1h->heap_region_iterate(&blk); 255 } 256 257 class G1SpaceCompactClosure: public HeapRegionClosure { 258 public: 259 G1SpaceCompactClosure() {} 260 261 bool doHeapRegion(HeapRegion* hr) { 262 if (hr->is_humongous()) { 263 if (hr->is_starts_humongous()) { 264 oop obj = oop(hr->bottom()); 265 if (obj->is_gc_marked()) { 266 obj->init_mark(); 267 } else { 268 assert(hr->is_empty(), "Should have been cleared in phase 2."); 269 } 270 } 271 hr->reset_during_compaction(); 272 } else if (!hr->is_pinned()) { 273 hr->compact(); 274 } 275 return false; 276 } 277 }; 278 279 void G1MarkSweep::mark_sweep_phase4() { 280 // All pointers are now adjusted, move objects accordingly 281 282 // The ValidateMarkSweep live oops tracking expects us to traverse spaces 283 // in the same order in phase2, phase3 and phase4. We don't quite do that 284 // here (code and comment not fixed for perm removal), so we tell the validate code 285 // to use a higher index (saved from phase2) when verifying perm_gen. 286 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 287 288 GCTraceTime(Trace, gc) tm("Phase 4: Move objects", gc_timer()); 289 290 G1SpaceCompactClosure blk; 291 g1h->heap_region_iterate(&blk); 292 293 } 294 295 void G1MarkSweep::enable_archive_object_check() { 296 assert(!_archive_check_enabled, "archive range check already enabled"); 297 _archive_check_enabled = true; 298 size_t length = Universe::heap()->max_capacity(); 299 _archive_region_map.initialize((HeapWord*)Universe::heap()->base(), 300 (HeapWord*)Universe::heap()->base() + length, 301 HeapRegion::GrainBytes); 302 } 303 304 void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) { 305 assert(_archive_check_enabled, "archive range check not enabled"); 306 _archive_region_map.set_by_address(range, is_archive); 307 } 308 309 bool G1MarkSweep::in_archive_range(oop object) { 310 // This is the out-of-line part of is_archive_object test, done separately 311 // to avoid additional performance impact when the check is not enabled. 312 return _archive_region_map.get_by_address((HeapWord*)object); 313 } 314 315 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) { 316 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 317 g1h->heap_region_iterate(blk); 318 blk->update_sets(); 319 } 320 321 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) { 322 HeapWord* end = hr->end(); 323 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); 324 325 hr->set_containing_set(NULL); 326 _humongous_regions_removed++; 327 328 _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */); 329 prepare_for_compaction(hr, end); 330 dummy_free_list.remove_all(); 331 } 332 333 void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) { 334 // If this is the first live region that we came across which we can compact, 335 // initialize the CompactPoint. 336 if (!is_cp_initialized()) { 337 _cp.space = hr; 338 _cp.threshold = hr->initialize_threshold(); 339 } 340 prepare_for_compaction_work(&_cp, hr, end); 341 } 342 343 void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp, 344 HeapRegion* hr, 345 HeapWord* end) { 346 hr->prepare_for_compaction(cp); 347 // Also clear the part of the card table that will be unused after 348 // compaction. 349 _mrbs->clear(MemRegion(hr->compaction_top(), end)); 350 } 351 352 void G1PrepareCompactClosure::update_sets() { 353 // We'll recalculate total used bytes and recreate the free list 354 // at the end of the GC, so no point in updating those values here. 355 _g1h->remove_from_old_sets(0, _humongous_regions_removed); 356 } 357 358 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { 359 if (hr->is_humongous()) { 360 oop obj = oop(hr->humongous_start_region()->bottom()); 361 if (hr->is_starts_humongous() && obj->is_gc_marked()) { 362 obj->forward_to(obj); 363 } 364 if (!obj->is_gc_marked()) { 365 free_humongous_region(hr); 366 } 367 } else if (!hr->is_pinned()) { 368 prepare_for_compaction(hr, hr->end()); 369 } 370 return false; 371 }