1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc/g1/g1MarkSweep.hpp" 33 #include "gc/g1/g1RootProcessor.hpp" 34 #include "gc/g1/g1StringDedup.hpp" 35 #include "gc/serial/markSweep.inline.hpp" 36 #include "gc/shared/gcHeapSummary.hpp" 37 #include "gc/shared/gcLocker.hpp" 38 #include "gc/shared/gcTimer.hpp" 39 #include "gc/shared/gcTrace.hpp" 40 #include "gc/shared/gcTraceTime.inline.hpp" 41 #include "gc/shared/genCollectedHeap.hpp" 42 #include "gc/shared/modRefBarrierSet.hpp" 43 #include "gc/shared/referencePolicy.hpp" 44 #include "gc/shared/space.hpp" 45 #include "oops/instanceRefKlass.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "prims/jvmtiExport.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/biasedLocking.hpp" 50 #include "runtime/fprofiler.hpp" 51 #include "runtime/synchronizer.hpp" 52 #include "runtime/thread.hpp" 53 #include "runtime/vmThread.hpp" 54 #include "utilities/copy.hpp" 55 #include "utilities/events.hpp" 56 57 class HeapRegion; 58 59 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, 60 bool clear_all_softrefs) { 61 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 62 63 // hook up weak ref data so it can be used during Mark-Sweep 64 assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); 65 assert(rp != NULL, "should be non-NULL"); 66 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition"); 67 68 GenMarkSweep::set_ref_processor(rp); 69 rp->setup_policy(clear_all_softrefs); 70 71 // When collecting the permanent generation Method*s may be moving, 72 // so we either have to flush all bcp data or convert it into bci. 73 CodeCache::gc_prologue(); 74 75 bool marked_for_unloading = false; 76 77 allocate_stacks(); 78 79 // We should save the marks of the currently locked biased monitors. 80 // The marking doesn't preserve the marks of biased objects. 81 BiasedLocking::preserve_marks(); 82 83 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); 84 85 mark_sweep_phase2(); 86 87 #if defined(COMPILER2) || INCLUDE_JVMCI 88 // Don't add any more derived pointers during phase3 89 DerivedPointerTable::set_active(false); 90 #endif 91 92 mark_sweep_phase3(); 93 94 mark_sweep_phase4(); 95 96 GenMarkSweep::restore_marks(); 97 BiasedLocking::restore_marks(); 98 GenMarkSweep::deallocate_stacks(); 99 100 CodeCache::gc_epilogue(); 101 JvmtiExport::gc_epilogue(); 102 103 // refs processing: clean slate 104 GenMarkSweep::set_ref_processor(NULL); 105 } 106 107 108 void G1MarkSweep::allocate_stacks() { 109 GenMarkSweep::_preserved_count_max = 0; 110 GenMarkSweep::_preserved_marks = NULL; 111 GenMarkSweep::_preserved_count = 0; 112 } 113 114 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, 115 bool clear_all_softrefs) { 116 // Recursively traverse all live objects and mark them 117 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer()); 118 119 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 120 121 // Need cleared claim bits for the roots processing 122 ClassLoaderDataGraph::clear_claimed_marks(); 123 124 MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); 125 { 126 G1RootProcessor root_processor(g1h, 1); 127 if (ClassUnloading) { 128 root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, 129 &GenMarkSweep::follow_cld_closure, 130 &follow_code_closure); 131 } else { 132 root_processor.process_all_roots_no_string_table( 133 &GenMarkSweep::follow_root_closure, 134 &GenMarkSweep::follow_cld_closure, 135 &follow_code_closure); 136 } 137 } 138 139 { 140 GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer()); 141 142 // Process reference objects found during marking 143 ReferenceProcessor* rp = GenMarkSweep::ref_processor(); 144 assert(rp == g1h->ref_processor_stw(), "Sanity"); 145 146 rp->setup_policy(clear_all_softrefs); 147 const ReferenceProcessorStats& stats = 148 rp->process_discovered_references(&GenMarkSweep::is_alive, 149 &GenMarkSweep::keep_alive, 150 &GenMarkSweep::follow_stack_closure, 151 NULL, 152 gc_timer()); 153 gc_tracer()->report_gc_reference_stats(stats); 154 } 155 156 // This is the point where the entire marking should have completed. 157 assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); 158 159 if (ClassUnloading) { 160 GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer()); 161 162 // Unload classes and purge the SystemDictionary. 163 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive, gc_timer()); 164 165 g1h->complete_cleaning(&GenMarkSweep::is_alive, purged_class); 166 } else { 167 GCTraceTime(Debug, gc, phases) trace("Cleanup", gc_timer()); 168 g1h->partial_cleaning(&GenMarkSweep::is_alive, true, true, G1StringDedup::is_enabled()); 169 } 170 171 if (VerifyDuringGC) { 172 HandleMark hm; // handle scope 173 #if defined(COMPILER2) || INCLUDE_JVMCI 174 DerivedPointerTableDeactivate dpt_deact; 175 #endif 176 g1h->prepare_for_verify(); 177 // Note: we can verify only the heap here. When an object is 178 // marked, the previous value of the mark word (including 179 // identity hash values, ages, etc) is preserved, and the mark 180 // word is set to markOop::marked_value - effectively removing 181 // any hash values from the mark word. These hash values are 182 // used when verifying the dictionaries and so removing them 183 // from the mark word can make verification of the dictionaries 184 // fail. At the end of the GC, the original mark word values 185 // (including hash values) are restored to the appropriate 186 // objects. 187 GCTraceTime(Info, gc, verify)("During GC (full)"); 188 g1h->verify(VerifyOption_G1UseMarkWord); 189 } 190 191 gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive); 192 } 193 194 195 void G1MarkSweep::mark_sweep_phase2() { 196 // Now all live objects are marked, compute the new object addresses. 197 198 // It is not required that we traverse spaces in the same order in 199 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 200 // tracking expects us to do so. See comment under phase4. 201 202 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer()); 203 204 prepare_compaction(); 205 } 206 207 class G1AdjustPointersClosure: public HeapRegionClosure { 208 public: 209 bool doHeapRegion(HeapRegion* r) { 210 if (r->is_humongous()) { 211 if (r->is_starts_humongous()) { 212 // We must adjust the pointers on the single H object. 213 oop obj = oop(r->bottom()); 214 // point all the oops to the new location 215 MarkSweep::adjust_pointers(obj); 216 } 217 } else if (!r->is_pinned()) { 218 // This really ought to be "as_CompactibleSpace"... 219 r->adjust_pointers(); 220 } 221 return false; 222 } 223 }; 224 225 void G1MarkSweep::mark_sweep_phase3() { 226 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 227 228 // Adjust the pointers to reflect the new locations 229 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer()); 230 231 // Need cleared claim bits for the roots processing 232 ClassLoaderDataGraph::clear_claimed_marks(); 233 234 CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations); 235 { 236 G1RootProcessor root_processor(g1h, 1); 237 root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure, 238 &GenMarkSweep::adjust_cld_closure, 239 &adjust_code_closure); 240 } 241 242 assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); 243 g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 244 245 // Now adjust pointers in remaining weak roots. (All of which should 246 // have been cleared if they pointed to non-surviving objects.) 247 JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); 248 249 if (G1StringDedup::is_enabled()) { 250 G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); 251 } 252 253 GenMarkSweep::adjust_marks(); 254 255 G1AdjustPointersClosure blk; 256 g1h->heap_region_iterate(&blk); 257 } 258 259 class G1SpaceCompactClosure: public HeapRegionClosure { 260 public: 261 G1SpaceCompactClosure() {} 262 263 bool doHeapRegion(HeapRegion* hr) { 264 if (hr->is_humongous()) { 265 if (hr->is_starts_humongous()) { 266 oop obj = oop(hr->bottom()); 267 if (obj->is_gc_marked()) { 268 obj->init_mark(); 269 } else { 270 assert(hr->is_empty(), "Should have been cleared in phase 2."); 271 } 272 } 273 hr->reset_during_compaction(); 274 } else if (!hr->is_pinned()) { 275 hr->compact(); 276 } 277 return false; 278 } 279 }; 280 281 void G1MarkSweep::mark_sweep_phase4() { 282 // All pointers are now adjusted, move objects accordingly 283 284 // The ValidateMarkSweep live oops tracking expects us to traverse spaces 285 // in the same order in phase2, phase3 and phase4. We don't quite do that 286 // here (code and comment not fixed for perm removal), so we tell the validate code 287 // to use a higher index (saved from phase2) when verifying perm_gen. 288 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 289 290 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer()); 291 292 G1SpaceCompactClosure blk; 293 g1h->heap_region_iterate(&blk); 294 295 } 296 297 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) { 298 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 299 g1h->heap_region_iterate(blk); 300 blk->update_sets(); 301 } 302 303 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) { 304 HeapWord* end = hr->end(); 305 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); 306 307 hr->set_containing_set(NULL); 308 _humongous_regions_removed++; 309 310 _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */); 311 prepare_for_compaction(hr, end); 312 dummy_free_list.remove_all(); 313 } 314 315 void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) { 316 // If this is the first live region that we came across which we can compact, 317 // initialize the CompactPoint. 318 if (!is_cp_initialized()) { 319 _cp.space = hr; 320 _cp.threshold = hr->initialize_threshold(); 321 } 322 prepare_for_compaction_work(&_cp, hr, end); 323 } 324 325 void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp, 326 HeapRegion* hr, 327 HeapWord* end) { 328 hr->prepare_for_compaction(cp); 329 // Also clear the part of the card table that will be unused after 330 // compaction. 331 _mrbs->clear(MemRegion(hr->compaction_top(), end)); 332 } 333 334 void G1PrepareCompactClosure::update_sets() { 335 // We'll recalculate total used bytes and recreate the free list 336 // at the end of the GC, so no point in updating those values here. 337 _g1h->remove_from_old_sets(0, _humongous_regions_removed); 338 } 339 340 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) { 341 if (hr->is_humongous()) { 342 oop obj = oop(hr->humongous_start_region()->bottom()); 343 if (hr->is_starts_humongous() && obj->is_gc_marked()) { 344 obj->forward_to(obj); 345 } 346 if (!obj->is_gc_marked()) { 347 free_humongous_region(hr); 348 } 349 } else if (!hr->is_pinned()) { 350 prepare_for_compaction(hr, hr->end()); 351 } 352 return false; 353 }