1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc_implementation/shared/gcHeapSummary.hpp" 33 #include "gc_implementation/shared/gcTimer.hpp" 34 #include "gc_implementation/shared/gcTrace.hpp" 35 #include "gc_implementation/shared/gcTraceTime.hpp" 36 #include "gc_interface/collectedHeap.inline.hpp" 37 #include "memory/genCollectedHeap.hpp" 38 #include "memory/genMarkSweep.hpp" 39 #include "memory/genOopClosures.inline.hpp" 40 #include "memory/generation.inline.hpp" 41 #include "memory/modRefBarrierSet.hpp" 42 #include "memory/referencePolicy.hpp" 43 #include "memory/space.hpp" 44 #include "oops/instanceRefKlass.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "prims/jvmtiExport.hpp" 47 #include "runtime/fprofiler.hpp" 48 #include "runtime/handles.inline.hpp" 49 #include "runtime/synchronizer.hpp" 50 #include "runtime/vmThread.hpp" 51 #include "utilities/copy.hpp" 52 #include "utilities/events.hpp" 53 #ifdef TARGET_OS_FAMILY_linux 54 # include "thread_linux.inline.hpp" 55 #endif 56 #ifdef TARGET_OS_FAMILY_solaris 57 # include "thread_solaris.inline.hpp" 58 #endif 59 #ifdef TARGET_OS_FAMILY_windows 60 # include "thread_windows.inline.hpp" 61 #endif 62 #ifdef TARGET_OS_FAMILY_bsd 63 # include "thread_bsd.inline.hpp" 64 #endif 65 66 void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, 67 bool clear_all_softrefs) { 68 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 69 70 GenCollectedHeap* gch = GenCollectedHeap::heap(); 71 #ifdef ASSERT 72 if (gch->collector_policy()->should_clear_all_soft_refs()) { 73 assert(clear_all_softrefs, "Policy should have been checked earlier"); 74 } 75 #endif 76 77 // hook up weak ref data so it can be used during Mark-Sweep 78 assert(ref_processor() == NULL, "no stomping"); 79 assert(rp != NULL, "should be non-NULL"); 80 _ref_processor = rp; 81 rp->setup_policy(clear_all_softrefs); 82 83 GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 84 85 gch->trace_heap_before_gc(_gc_tracer); 86 87 // When collecting the permanent generation methodOops may be moving, 88 // so we either have to flush all bcp data or convert it into bci. 89 CodeCache::gc_prologue(); 90 Threads::gc_prologue(); 91 92 // Increment the invocation count for the permanent generation, since it is 93 // implicitly collected whenever we do a full mark sweep collection. 94 gch->perm_gen()->stat_record()->invocations++; 95 96 // Capture heap size before collection for printing. 97 size_t gch_prev_used = gch->used(); 98 99 // Some of the card table updates below assume that the perm gen is 100 // also being collected. 101 assert(level == gch->n_gens() - 1, 102 "All generations are being collected, ergo perm gen too."); 103 104 // Capture used regions for each generation that will be 105 // subject to collection, so that card table adjustments can 106 // be made intelligently (see clear / invalidate further below). 107 gch->save_used_regions(level, true /* perm */); 108 109 allocate_stacks(); 110 111 mark_sweep_phase1(level, clear_all_softrefs); 112 113 mark_sweep_phase2(); 114 115 // Don't add any more derived pointers during phase3 116 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 117 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 118 119 mark_sweep_phase3(level); 120 121 VALIDATE_MARK_SWEEP_ONLY( 122 if (ValidateMarkSweep) { 123 guarantee(_root_refs_stack->length() == 0, "should be empty by now"); 124 } 125 ) 126 127 mark_sweep_phase4(); 128 129 VALIDATE_MARK_SWEEP_ONLY( 130 if (ValidateMarkSweep) { 131 guarantee(_live_oops->length() == _live_oops_moved_to->length(), 132 "should be the same size"); 133 } 134 ) 135 136 restore_marks(); 137 138 // Set saved marks for allocation profiler (and other things? -- dld) 139 // (Should this be in general part?) 140 gch->save_marks(); 141 142 deallocate_stacks(); 143 144 // If compaction completely evacuated all generations younger than this 145 // one, then we can clear the card table. Otherwise, we must invalidate 146 // it (consider all cards dirty). In the future, we might consider doing 147 // compaction within generations only, and doing card-table sliding. 148 bool all_empty = true; 149 for (int i = 0; all_empty && i < level; i++) { 150 Generation* g = gch->get_gen(i); 151 all_empty = all_empty && gch->get_gen(i)->used() == 0; 152 } 153 GenRemSet* rs = gch->rem_set(); 154 // Clear/invalidate below make use of the "prev_used_regions" saved earlier. 155 if (all_empty) { 156 // We've evacuated all generations below us. 157 Generation* g = gch->get_gen(level); 158 rs->clear_into_younger(g, true /* perm */); 159 } else { 160 // Invalidate the cards corresponding to the currently used 161 // region and clear those corresponding to the evacuated region 162 // of all generations just collected (i.e. level and younger). 163 rs->invalidate_or_clear(gch->get_gen(level), 164 true /* younger */, 165 true /* perm */); 166 } 167 168 Threads::gc_epilogue(); 169 CodeCache::gc_epilogue(); 170 JvmtiExport::gc_epilogue(); 171 172 if (PrintGC && !PrintGCDetails) { 173 gch->print_heap_change(gch_prev_used); 174 } 175 176 // refs processing: clean slate 177 _ref_processor = NULL; 178 179 // Update heap occupancy information which is used as 180 // input to soft ref clearing policy at the next gc. 181 Universe::update_heap_info_at_gc(); 182 183 // Update time of last gc for all generations we collected 184 // (which curently is all the generations in the heap). 185 // We need to use a monotonically non-deccreasing time in ms 186 // or we will see time-warp warnings and os::javaTimeMillis() 187 // does not guarantee monotonicity. 188 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 189 gch->update_time_of_last_gc(now); 190 191 gch->trace_heap_after_gc(_gc_tracer); 192 } 193 194 void GenMarkSweep::allocate_stacks() { 195 GenCollectedHeap* gch = GenCollectedHeap::heap(); 196 // Scratch request on behalf of oldest generation; will do no 197 // allocation. 198 ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0); 199 200 // $$$ To cut a corner, we'll only use the first scratch block, and then 201 // revert to malloc. 202 if (scratch != NULL) { 203 _preserved_count_max = 204 scratch->num_words * HeapWordSize / sizeof(PreservedMark); 205 } else { 206 _preserved_count_max = 0; 207 } 208 209 _preserved_marks = (PreservedMark*)scratch; 210 _preserved_count = 0; 211 212 #ifdef VALIDATE_MARK_SWEEP 213 if (ValidateMarkSweep) { 214 _root_refs_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); 215 _other_refs_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); 216 _adjusted_pointers = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); 217 _live_oops = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true); 218 _live_oops_moved_to = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true); 219 _live_oops_size = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); 220 } 221 if (RecordMarkSweepCompaction) { 222 if (_cur_gc_live_oops == NULL) { 223 _cur_gc_live_oops = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 224 _cur_gc_live_oops_moved_to = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 225 _cur_gc_live_oops_size = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); 226 _last_gc_live_oops = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 227 _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 228 _last_gc_live_oops_size = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); 229 } else { 230 _cur_gc_live_oops->clear(); 231 _cur_gc_live_oops_moved_to->clear(); 232 _cur_gc_live_oops_size->clear(); 233 } 234 } 235 #endif 236 } 237 238 239 void GenMarkSweep::deallocate_stacks() { 240 if (!UseG1GC) { 241 GenCollectedHeap* gch = GenCollectedHeap::heap(); 242 gch->release_scratch(); 243 } 244 245 _preserved_mark_stack.clear(true); 246 _preserved_oop_stack.clear(true); 247 _marking_stack.clear(); 248 _objarray_stack.clear(true); 249 _revisit_klass_stack.clear(true); 250 _revisit_mdo_stack.clear(true); 251 252 #ifdef VALIDATE_MARK_SWEEP 253 if (ValidateMarkSweep) { 254 delete _root_refs_stack; 255 delete _other_refs_stack; 256 delete _adjusted_pointers; 257 delete _live_oops; 258 delete _live_oops_size; 259 delete _live_oops_moved_to; 260 _live_oops_index = 0; 261 _live_oops_index_at_perm = 0; 262 } 263 #endif 264 } 265 266 void GenMarkSweep::mark_sweep_phase1(int level, 267 bool clear_all_softrefs) { 268 // Recursively traverse all live objects and mark them 269 GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer); 270 trace(" 1"); 271 272 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); 273 274 GenCollectedHeap* gch = GenCollectedHeap::heap(); 275 276 // Because follow_root_closure is created statically, cannot 277 // use OopsInGenClosure constructor which takes a generation, 278 // as the Universe has not been created when the static constructors 279 // are run. 280 follow_root_closure.set_orig_generation(gch->get_gen(level)); 281 282 gch->gen_process_strong_roots(level, 283 false, // Younger gens are not roots. 284 true, // activate StrongRootsScope 285 true, // Collecting permanent generation. 286 SharedHeap::SO_SystemClasses, 287 &follow_root_closure, 288 true, // walk code active on stacks 289 &follow_root_closure); 290 291 // Process reference objects found during marking 292 { 293 ref_processor()->setup_policy(clear_all_softrefs); 294 const ReferenceProcessorStats& stats = 295 ref_processor()->process_discovered_references( 296 &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer); 297 gc_tracer()->report_gc_reference_stats(stats); 298 } 299 300 // Follow system dictionary roots and unload classes 301 bool purged_class = SystemDictionary::do_unloading(&is_alive); 302 303 // Follow code cache roots 304 CodeCache::do_unloading(&is_alive, &keep_alive, purged_class); 305 follow_stack(); // Flush marking stack 306 307 // Update subklass/sibling/implementor links of live klasses 308 follow_weak_klass_links(); 309 assert(_marking_stack.is_empty(), "just drained"); 310 311 // Visit memoized MDO's and clear any unmarked weak refs 312 follow_mdo_weak_refs(); 313 assert(_marking_stack.is_empty(), "just drained"); 314 315 // Visit interned string tables and delete unmarked oops 316 StringTable::unlink(&is_alive); 317 // Clean up unreferenced symbols in symbol table. 318 SymbolTable::unlink(); 319 320 assert(_marking_stack.is_empty(), "stack should be empty by now"); 321 } 322 323 324 void GenMarkSweep::mark_sweep_phase2() { 325 // Now all live objects are marked, compute the new object addresses. 326 327 // It is imperative that we traverse perm_gen LAST. If dead space is 328 // allowed a range of dead object may get overwritten by a dead int 329 // array. If perm_gen is not traversed last a klassOop may get 330 // overwritten. This is fine since it is dead, but if the class has dead 331 // instances we have to skip them, and in order to find their size we 332 // need the klassOop! 333 // 334 // It is not required that we traverse spaces in the same order in 335 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 336 // tracking expects us to do so. See comment under phase4. 337 338 GenCollectedHeap* gch = GenCollectedHeap::heap(); 339 Generation* pg = gch->perm_gen(); 340 341 GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer); 342 trace("2"); 343 344 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); 345 346 gch->prepare_for_compaction(); 347 348 VALIDATE_MARK_SWEEP_ONLY(_live_oops_index_at_perm = _live_oops_index); 349 CompactPoint perm_cp(pg, NULL, NULL); 350 pg->prepare_for_compaction(&perm_cp); 351 } 352 353 class GenAdjustPointersClosure: public GenCollectedHeap::GenClosure { 354 public: 355 void do_generation(Generation* gen) { 356 gen->adjust_pointers(); 357 } 358 }; 359 360 void GenMarkSweep::mark_sweep_phase3(int level) { 361 GenCollectedHeap* gch = GenCollectedHeap::heap(); 362 Generation* pg = gch->perm_gen(); 363 364 // Adjust the pointers to reflect the new locations 365 GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer); 366 trace("3"); 367 368 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); 369 370 // Needs to be done before the system dictionary is adjusted. 371 pg->pre_adjust_pointers(); 372 373 // Because the two closures below are created statically, cannot 374 // use OopsInGenClosure constructor which takes a generation, 375 // as the Universe has not been created when the static constructors 376 // are run. 377 adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level)); 378 adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); 379 380 gch->gen_process_strong_roots(level, 381 false, // Younger gens are not roots. 382 true, // activate StrongRootsScope 383 true, // Collecting permanent generation. 384 SharedHeap::SO_AllClasses, 385 &adjust_root_pointer_closure, 386 false, // do not walk code 387 &adjust_root_pointer_closure); 388 389 // Now adjust pointers in remaining weak roots. (All of which should 390 // have been cleared if they pointed to non-surviving objects.) 391 CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure, 392 /*do_marking=*/ false); 393 gch->gen_process_weak_roots(&adjust_root_pointer_closure, 394 &adjust_code_pointer_closure, 395 &adjust_pointer_closure); 396 397 adjust_marks(); 398 GenAdjustPointersClosure blk; 399 gch->generation_iterate(&blk, true); 400 pg->adjust_pointers(); 401 } 402 403 class GenCompactClosure: public GenCollectedHeap::GenClosure { 404 public: 405 void do_generation(Generation* gen) { 406 gen->compact(); 407 } 408 }; 409 410 void GenMarkSweep::mark_sweep_phase4() { 411 // All pointers are now adjusted, move objects accordingly 412 413 // It is imperative that we traverse perm_gen first in phase4. All 414 // classes must be allocated earlier than their instances, and traversing 415 // perm_gen first makes sure that all klassOops have moved to their new 416 // location before any instance does a dispatch through it's klass! 417 418 // The ValidateMarkSweep live oops tracking expects us to traverse spaces 419 // in the same order in phase2, phase3 and phase4. We don't quite do that 420 // here (perm_gen first rather than last), so we tell the validate code 421 // to use a higher index (saved from phase2) when verifying perm_gen. 422 GenCollectedHeap* gch = GenCollectedHeap::heap(); 423 Generation* pg = gch->perm_gen(); 424 425 GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); 426 trace("4"); 427 428 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(true)); 429 430 pg->compact(); 431 432 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false)); 433 434 GenCompactClosure blk; 435 gch->generation_iterate(&blk, true); 436 437 VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); 438 439 pg->post_compact(); // Shared spaces verification. 440 }