1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc_implementation/g1/g1MarkSweep.hpp" 33 #include "memory/gcLocker.hpp" 34 #include "memory/genCollectedHeap.hpp" 35 #include "memory/modRefBarrierSet.hpp" 36 #include "memory/referencePolicy.hpp" 37 #include "memory/space.hpp" 38 #include "oops/instanceRefKlass.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "runtime/aprofiler.hpp" 42 #include "runtime/biasedLocking.hpp" 43 #include "runtime/fprofiler.hpp" 44 #include "runtime/synchronizer.hpp" 45 #include "runtime/thread.hpp" 46 #include "runtime/vmThread.hpp" 47 #include "utilities/copy.hpp" 48 #include "utilities/events.hpp" 49 50 class HeapRegion; 51 52 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, 53 bool clear_all_softrefs) { 54 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 55 56 SharedHeap* sh = SharedHeap::heap(); 57 #ifdef ASSERT 58 if (sh->collector_policy()->should_clear_all_soft_refs()) { 59 assert(clear_all_softrefs, "Policy should have been checked earler"); 60 } 61 #endif 62 // hook up weak ref data so it can be used during Mark-Sweep 63 assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); 64 assert(rp != NULL, "should be non-NULL"); 65 GenMarkSweep::_ref_processor = rp; 66 rp->setup_policy(clear_all_softrefs); 67 68 // When collecting the permanent generation methodOops may be moving, 69 // so we either have to flush all bcp data or convert it into bci. 70 CodeCache::gc_prologue(); 71 Threads::gc_prologue(); 72 73 // Increment the invocation count for the permanent generation, since it is 74 // implicitly collected whenever we do a full mark sweep collection. 75 sh->perm_gen()->stat_record()->invocations++; 76 77 bool marked_for_unloading = false; 78 79 allocate_stacks(); 80 81 // We should save the marks of the currently locked biased monitors. 82 // The marking doesn't preserve the marks of biased objects. 83 BiasedLocking::preserve_marks(); 84 85 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs); 86 87 if (VerifyDuringGC) { 88 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 89 g1h->checkConcurrentMark(); 90 } 91 92 mark_sweep_phase2(); 93 94 // Don't add any more derived pointers during phase3 95 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 96 97 mark_sweep_phase3(); 98 99 mark_sweep_phase4(); 100 101 GenMarkSweep::restore_marks(); 102 BiasedLocking::restore_marks(); 103 GenMarkSweep::deallocate_stacks(); 104 105 // We must invalidate the perm-gen rs, so that it gets rebuilt. 106 GenRemSet* rs = sh->rem_set(); 107 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/); 108 109 // "free at last gc" is calculated from these. 110 // CHF: cheating for now!!! 111 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity()); 112 // Universe::set_heap_used_at_last_gc(Universe::heap()->used()); 113 114 Threads::gc_epilogue(); 115 CodeCache::gc_epilogue(); 116 117 // refs processing: clean slate 118 GenMarkSweep::_ref_processor = NULL; 119 } 120 121 122 void G1MarkSweep::allocate_stacks() { 123 GenMarkSweep::_preserved_count_max = 0; 124 GenMarkSweep::_preserved_marks = NULL; 125 GenMarkSweep::_preserved_count = 0; 126 GenMarkSweep::_preserved_mark_stack = NULL; 127 GenMarkSweep::_preserved_oop_stack = NULL; 128 129 GenMarkSweep::_marking_stack = 130 new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); 131 GenMarkSweep::_objarray_stack = 132 new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true); 133 134 int size = SystemDictionary::number_of_classes() * 2; 135 GenMarkSweep::_revisit_klass_stack = 136 new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); 137 // (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do 138 // for now until we have a chance to work out a more optimal setting. 139 GenMarkSweep::_revisit_mdo_stack = 140 new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true); 141 142 } 143 144 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, 145 bool clear_all_softrefs) { 146 // Recursively traverse all live objects and mark them 147 EventMark m("1 mark object"); 148 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); 149 GenMarkSweep::trace(" 1"); 150 151 SharedHeap* sh = SharedHeap::heap(); 152 153 sh->process_strong_roots(true, // activeate StrongRootsScope 154 true, // Collecting permanent generation. 155 SharedHeap::SO_SystemClasses, 156 &GenMarkSweep::follow_root_closure, 157 &GenMarkSweep::follow_code_root_closure, 158 &GenMarkSweep::follow_root_closure); 159 160 // Process reference objects found during marking 161 ReferenceProcessor* rp = GenMarkSweep::ref_processor(); 162 rp->setup_policy(clear_all_softrefs); 163 rp->process_discovered_references(&GenMarkSweep::is_alive, 164 &GenMarkSweep::keep_alive, 165 &GenMarkSweep::follow_stack_closure, 166 NULL); 167 168 // Follow system dictionary roots and unload classes 169 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); 170 assert(GenMarkSweep::_marking_stack->is_empty(), 171 "stack should be empty by now"); 172 173 // Follow code cache roots (has to be done after system dictionary, 174 // assumes all live klasses are marked) 175 CodeCache::do_unloading(&GenMarkSweep::is_alive, 176 &GenMarkSweep::keep_alive, 177 purged_class); 178 GenMarkSweep::follow_stack(); 179 180 // Update subklass/sibling/implementor links of live klasses 181 GenMarkSweep::follow_weak_klass_links(); 182 assert(GenMarkSweep::_marking_stack->is_empty(), 183 "stack should be empty by now"); 184 185 // Visit memoized MDO's and clear any unmarked weak refs 186 GenMarkSweep::follow_mdo_weak_refs(); 187 assert(GenMarkSweep::_marking_stack->is_empty(), "just drained"); 188 189 190 // Visit symbol and interned string tables and delete unmarked oops 191 SymbolTable::unlink(&GenMarkSweep::is_alive); 192 StringTable::unlink(&GenMarkSweep::is_alive); 193 194 assert(GenMarkSweep::_marking_stack->is_empty(), 195 "stack should be empty by now"); 196 } 197 198 class G1PrepareCompactClosure: public HeapRegionClosure { 199 ModRefBarrierSet* _mrbs; 200 CompactPoint _cp; 201 202 void free_humongous_region(HeapRegion* hr) { 203 HeapWord* bot = hr->bottom(); 204 HeapWord* end = hr->end(); 205 assert(hr->startsHumongous(), 206 "Only the start of a humongous region should be freed."); 207 G1CollectedHeap::heap()->free_region(hr); 208 hr->prepare_for_compaction(&_cp); 209 // Also clear the part of the card table that will be unused after 210 // compaction. 211 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end())); 212 } 213 214 public: 215 G1PrepareCompactClosure(CompactibleSpace* cs) : 216 _cp(NULL, cs, cs->initialize_threshold()), 217 _mrbs(G1CollectedHeap::heap()->mr_bs()) 218 {} 219 bool doHeapRegion(HeapRegion* hr) { 220 if (hr->isHumongous()) { 221 if (hr->startsHumongous()) { 222 oop obj = oop(hr->bottom()); 223 if (obj->is_gc_marked()) { 224 obj->forward_to(obj); 225 } else { 226 free_humongous_region(hr); 227 } 228 } else { 229 assert(hr->continuesHumongous(), "Invalid humongous."); 230 } 231 } else { 232 hr->prepare_for_compaction(&_cp); 233 // Also clear the part of the card table that will be unused after 234 // compaction. 235 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end())); 236 } 237 return false; 238 } 239 }; 240 241 // Finds the first HeapRegion. 242 class FindFirstRegionClosure: public HeapRegionClosure { 243 HeapRegion* _a_region; 244 public: 245 FindFirstRegionClosure() : _a_region(NULL) {} 246 bool doHeapRegion(HeapRegion* r) { 247 _a_region = r; 248 return true; 249 } 250 HeapRegion* result() { return _a_region; } 251 }; 252 253 void G1MarkSweep::mark_sweep_phase2() { 254 // Now all live objects are marked, compute the new object addresses. 255 256 // It is imperative that we traverse perm_gen LAST. If dead space is 257 // allowed a range of dead object may get overwritten by a dead int 258 // array. If perm_gen is not traversed last a klassOop may get 259 // overwritten. This is fine since it is dead, but if the class has dead 260 // instances we have to skip them, and in order to find their size we 261 // need the klassOop! 262 // 263 // It is not required that we traverse spaces in the same order in 264 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 265 // tracking expects us to do so. See comment under phase4. 266 267 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 268 Generation* pg = g1h->perm_gen(); 269 270 EventMark m("2 compute new addresses"); 271 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); 272 GenMarkSweep::trace("2"); 273 274 FindFirstRegionClosure cl; 275 g1h->heap_region_iterate(&cl); 276 HeapRegion *r = cl.result(); 277 CompactibleSpace* sp = r; 278 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) { 279 sp = r->next_compaction_space(); 280 } 281 282 G1PrepareCompactClosure blk(sp); 283 g1h->heap_region_iterate(&blk); 284 285 CompactPoint perm_cp(pg, NULL, NULL); 286 pg->prepare_for_compaction(&perm_cp); 287 } 288 289 class G1AdjustPointersClosure: public HeapRegionClosure { 290 public: 291 bool doHeapRegion(HeapRegion* r) { 292 if (r->isHumongous()) { 293 if (r->startsHumongous()) { 294 // We must adjust the pointers on the single H object. 295 oop obj = oop(r->bottom()); 296 debug_only(GenMarkSweep::track_interior_pointers(obj)); 297 // point all the oops to the new location 298 obj->adjust_pointers(); 299 debug_only(GenMarkSweep::check_interior_pointers()); 300 } 301 } else { 302 // This really ought to be "as_CompactibleSpace"... 303 r->adjust_pointers(); 304 } 305 return false; 306 } 307 }; 308 309 void G1MarkSweep::mark_sweep_phase3() { 310 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 311 Generation* pg = g1h->perm_gen(); 312 313 // Adjust the pointers to reflect the new locations 314 EventMark m("3 adjust pointers"); 315 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); 316 GenMarkSweep::trace("3"); 317 318 SharedHeap* sh = SharedHeap::heap(); 319 320 sh->process_strong_roots(true, // activate StrongRootsScope 321 true, // Collecting permanent generation. 322 SharedHeap::SO_AllClasses, 323 &GenMarkSweep::adjust_root_pointer_closure, 324 NULL, // do not touch code cache here 325 &GenMarkSweep::adjust_pointer_closure); 326 327 g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); 328 329 // Now adjust pointers in remaining weak roots. (All of which should 330 // have been cleared if they pointed to non-surviving objects.) 331 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, 332 &GenMarkSweep::adjust_pointer_closure); 333 334 GenMarkSweep::adjust_marks(); 335 336 G1AdjustPointersClosure blk; 337 g1h->heap_region_iterate(&blk); 338 pg->adjust_pointers(); 339 } 340 341 class G1SpaceCompactClosure: public HeapRegionClosure { 342 public: 343 G1SpaceCompactClosure() {} 344 345 bool doHeapRegion(HeapRegion* hr) { 346 if (hr->isHumongous()) { 347 if (hr->startsHumongous()) { 348 oop obj = oop(hr->bottom()); 349 if (obj->is_gc_marked()) { 350 obj->init_mark(); 351 } else { 352 assert(hr->is_empty(), "Should have been cleared in phase 2."); 353 } 354 hr->reset_during_compaction(); 355 } 356 } else { 357 hr->compact(); 358 } 359 return false; 360 } 361 }; 362 363 void G1MarkSweep::mark_sweep_phase4() { 364 // All pointers are now adjusted, move objects accordingly 365 366 // It is imperative that we traverse perm_gen first in phase4. All 367 // classes must be allocated earlier than their instances, and traversing 368 // perm_gen first makes sure that all klassOops have moved to their new 369 // location before any instance does a dispatch through it's klass! 370 371 // The ValidateMarkSweep live oops tracking expects us to traverse spaces 372 // in the same order in phase2, phase3 and phase4. We don't quite do that 373 // here (perm_gen first rather than last), so we tell the validate code 374 // to use a higher index (saved from phase2) when verifying perm_gen. 375 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 376 Generation* pg = g1h->perm_gen(); 377 378 EventMark m("4 compact heap"); 379 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); 380 GenMarkSweep::trace("4"); 381 382 pg->compact(); 383 384 G1SpaceCompactClosure blk; 385 g1h->heap_region_iterate(&blk); 386 387 } 388 389 // Local Variables: *** 390 // c-indentation-style: gnu *** 391 // End: ***