1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "code/codeCache.hpp" 31 #include "code/icBuffer.hpp" 32 #include "gc_interface/collectedHeap.inline.hpp" 33 #include "memory/genCollectedHeap.hpp" 34 #include "memory/genMarkSweep.hpp" 35 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/generation.inline.hpp" 37 #include "memory/modRefBarrierSet.hpp" 38 #include "memory/referencePolicy.hpp" 39 #include "memory/space.hpp" 40 #include "oops/instanceRefKlass.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "prims/jvmtiExport.hpp" 43 #include "runtime/fprofiler.hpp" 44 #include "runtime/handles.inline.hpp" 45 #include "runtime/synchronizer.hpp" 46 #include "runtime/vmThread.hpp" 47 #include "utilities/copy.hpp" 48 #include "utilities/events.hpp" 49 #ifdef TARGET_OS_FAMILY_linux 50 # include "thread_linux.inline.hpp" 51 #endif 52 #ifdef TARGET_OS_FAMILY_solaris 53 # include "thread_solaris.inline.hpp" 54 #endif 55 #ifdef TARGET_OS_FAMILY_windows 56 # include "thread_windows.inline.hpp" 57 #endif 58 #ifdef TARGET_OS_FAMILY_bsd 59 # include "thread_bsd.inline.hpp" 60 #endif 61 62 void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, 63 bool clear_all_softrefs) { 64 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 65 66 GenCollectedHeap* gch = GenCollectedHeap::heap(); 67 #ifdef ASSERT 68 if (gch->collector_policy()->should_clear_all_soft_refs()) { 69 assert(clear_all_softrefs, "Policy should have been checked earlier"); 70 } 71 #endif 72 73 // hook up weak ref data so it can be used during Mark-Sweep 74 assert(ref_processor() == NULL, "no stomping"); 75 assert(rp != NULL, "should be non-NULL"); 76 _ref_processor = rp; 77 rp->setup_policy(clear_all_softrefs); 78 79 TraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); 80 81 // When collecting the permanent generation Method*s may be moving, 82 // so we either have to flush all bcp data or convert it into bci. 83 CodeCache::gc_prologue(); 84 Threads::gc_prologue(); 85 86 // Increment the invocation count 87 _total_invocations++; 88 89 // Capture heap size before collection for printing. 90 size_t gch_prev_used = gch->used(); 91 92 // Some of the card table updates below assume that the perm gen is 93 // also being collected. 94 assert(level == gch->n_gens() - 1, 95 "All generations are being collected, ergo perm gen too."); 96 97 // Capture used regions for each generation that will be 98 // subject to collection, so that card table adjustments can 99 // be made intelligently (see clear / invalidate further below). 100 gch->save_used_regions(level); 101 102 allocate_stacks(); 103 104 mark_sweep_phase1(level, clear_all_softrefs); 105 106 mark_sweep_phase2(); 107 108 // Don't add any more derived pointers during phase3 109 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 110 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 111 112 mark_sweep_phase3(level); 113 114 VALIDATE_MARK_SWEEP_ONLY( 115 if (ValidateMarkSweep) { 116 guarantee(_root_refs_stack->length() == 0, "should be empty by now"); 117 } 118 ) 119 120 mark_sweep_phase4(); 121 122 VALIDATE_MARK_SWEEP_ONLY( 123 if (ValidateMarkSweep) { 124 guarantee(_live_oops->length() == _live_oops_moved_to->length(), 125 "should be the same size"); 126 } 127 ) 128 129 restore_marks(); 130 131 // Set saved marks for allocation profiler (and other things? -- dld) 132 // (Should this be in general part?) 133 gch->save_marks(); 134 135 deallocate_stacks(); 136 137 // If compaction completely evacuated all generations younger than this 138 // one, then we can clear the card table. Otherwise, we must invalidate 139 // it (consider all cards dirty). In the future, we might consider doing 140 // compaction within generations only, and doing card-table sliding. 141 bool all_empty = true; 142 for (int i = 0; all_empty && i < level; i++) { 143 Generation* g = gch->get_gen(i); 144 all_empty = all_empty && gch->get_gen(i)->used() == 0; 145 } 146 GenRemSet* rs = gch->rem_set(); 147 // Clear/invalidate below make use of the "prev_used_regions" saved earlier. 148 if (all_empty) { 149 // We've evacuated all generations below us. 150 Generation* g = gch->get_gen(level); 151 rs->clear_into_younger(g); 152 } else { 153 // Invalidate the cards corresponding to the currently used 154 // region and clear those corresponding to the evacuated region 155 // of all generations just collected (i.e. level and younger). 156 rs->invalidate_or_clear(gch->get_gen(level), 157 true /* younger */); 158 } 159 160 Threads::gc_epilogue(); 161 CodeCache::gc_epilogue(); 162 JvmtiExport::gc_epilogue(); 163 164 if (PrintGC && !PrintGCDetails) { 165 gch->print_heap_change(gch_prev_used); 166 } 167 168 // refs processing: clean slate 169 _ref_processor = NULL; 170 171 // Update heap occupancy information which is used as 172 // input to soft ref clearing policy at the next gc. 173 Universe::update_heap_info_at_gc(); 174 175 // Update time of last gc for all generations we collected 176 // (which curently is all the generations in the heap). 177 // We need to use a monotonically non-deccreasing time in ms 178 // or we will see time-warp warnings and os::javaTimeMillis() 179 // does not guarantee monotonicity. 180 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 181 gch->update_time_of_last_gc(now); 182 } 183 184 void GenMarkSweep::allocate_stacks() { 185 GenCollectedHeap* gch = GenCollectedHeap::heap(); 186 // Scratch request on behalf of oldest generation; will do no 187 // allocation. 188 ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0); 189 190 // $$$ To cut a corner, we'll only use the first scratch block, and then 191 // revert to malloc. 192 if (scratch != NULL) { 193 _preserved_count_max = 194 scratch->num_words * HeapWordSize / sizeof(PreservedMark); 195 } else { 196 _preserved_count_max = 0; 197 } 198 199 _preserved_marks = (PreservedMark*)scratch; 200 _preserved_count = 0; 201 202 #ifdef VALIDATE_MARK_SWEEP 203 if (ValidateMarkSweep) { 204 _root_refs_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); 205 _other_refs_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); 206 _adjusted_pointers = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); 207 _live_oops = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true); 208 _live_oops_moved_to = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true); 209 _live_oops_size = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); 210 } 211 if (RecordMarkSweepCompaction) { 212 if (_cur_gc_live_oops == NULL) { 213 _cur_gc_live_oops = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 214 _cur_gc_live_oops_moved_to = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 215 _cur_gc_live_oops_size = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); 216 _last_gc_live_oops = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 217 _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); 218 _last_gc_live_oops_size = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); 219 } else { 220 _cur_gc_live_oops->clear(); 221 _cur_gc_live_oops_moved_to->clear(); 222 _cur_gc_live_oops_size->clear(); 223 } 224 } 225 #endif 226 } 227 228 229 void GenMarkSweep::deallocate_stacks() { 230 if (!UseG1GC) { 231 GenCollectedHeap* gch = GenCollectedHeap::heap(); 232 gch->release_scratch(); 233 } 234 235 _preserved_mark_stack.clear(true); 236 _preserved_oop_stack.clear(true); 237 _marking_stack.clear(); 238 _objarray_stack.clear(true); 239 240 #ifdef VALIDATE_MARK_SWEEP 241 if (ValidateMarkSweep) { 242 delete _root_refs_stack; 243 delete _other_refs_stack; 244 delete _adjusted_pointers; 245 delete _live_oops; 246 delete _live_oops_size; 247 delete _live_oops_moved_to; 248 _live_oops_index = 0; 249 _live_oops_index_at_perm = 0; 250 } 251 #endif 252 } 253 254 void GenMarkSweep::mark_sweep_phase1(int level, 255 bool clear_all_softrefs) { 256 // Recursively traverse all live objects and mark them 257 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty); 258 trace(" 1"); 259 260 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking()); 261 262 GenCollectedHeap* gch = GenCollectedHeap::heap(); 263 264 // Because follow_root_closure is created statically, cannot 265 // use OopsInGenClosure constructor which takes a generation, 266 // as the Universe has not been created when the static constructors 267 // are run. 268 follow_root_closure.set_orig_generation(gch->get_gen(level)); 269 270 // Need new claim bits before marking starts. 271 ClassLoaderDataGraph::clear_claimed_marks(); 272 273 gch->gen_process_strong_roots(level, 274 false, // Younger gens are not roots. 275 true, // activate StrongRootsScope 276 false, // not scavenging 277 SharedHeap::SO_SystemClasses, 278 &follow_root_closure, 279 true, // walk code active on stacks 280 &follow_root_closure, 281 &follow_klass_closure); 282 283 // Process reference objects found during marking 284 { 285 ref_processor()->setup_policy(clear_all_softrefs); 286 ref_processor()->process_discovered_references( 287 &is_alive, &keep_alive, &follow_stack_closure, NULL); 288 } 289 290 // Follow system dictionary roots and unload classes 291 bool purged_class = SystemDictionary::do_unloading(&is_alive); 292 293 // Follow code cache roots 294 CodeCache::do_unloading(&is_alive, purged_class); 295 follow_stack(); // Flush marking stack 296 297 // Update subklass/sibling/implementor links of live klasses 298 Klass::clean_weak_klass_links(&is_alive); 299 assert(_marking_stack.is_empty(), "just drained"); 300 301 // Visit interned string tables and delete unmarked oops 302 StringTable::unlink(&is_alive); 303 // Clean up unreferenced symbols in symbol table. 304 SymbolTable::unlink(); 305 306 assert(_marking_stack.is_empty(), "stack should be empty by now"); 307 } 308 309 310 void GenMarkSweep::mark_sweep_phase2() { 311 // Now all live objects are marked, compute the new object addresses. 312 313 // It is imperative that we traverse perm_gen LAST. If dead space is 314 // allowed a range of dead object may get overwritten by a dead int 315 // array. If perm_gen is not traversed last a Klass* may get 316 // overwritten. This is fine since it is dead, but if the class has dead 317 // instances we have to skip them, and in order to find their size we 318 // need the Klass*! 319 // 320 // It is not required that we traverse spaces in the same order in 321 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 322 // tracking expects us to do so. See comment under phase4. 323 324 GenCollectedHeap* gch = GenCollectedHeap::heap(); 325 326 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); 327 trace("2"); 328 329 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking()); 330 331 gch->prepare_for_compaction(); 332 } 333 334 class GenAdjustPointersClosure: public GenCollectedHeap::GenClosure { 335 public: 336 void do_generation(Generation* gen) { 337 gen->adjust_pointers(); 338 } 339 }; 340 341 void GenMarkSweep::mark_sweep_phase3(int level) { 342 GenCollectedHeap* gch = GenCollectedHeap::heap(); 343 344 // Adjust the pointers to reflect the new locations 345 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty); 346 trace("3"); 347 348 // Need new claim bits for the pointer adjustment tracing. 349 ClassLoaderDataGraph::clear_claimed_marks(); 350 351 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking()); 352 353 // Because the two closures below are created statically, cannot 354 // use OopsInGenClosure constructor which takes a generation, 355 // as the Universe has not been created when the static constructors 356 // are run. 357 adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level)); 358 adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); 359 360 gch->gen_process_strong_roots(level, 361 false, // Younger gens are not roots. 362 true, // activate StrongRootsScope 363 false, // not scavenging 364 SharedHeap::SO_AllClasses, 365 &adjust_root_pointer_closure, 366 false, // do not walk code 367 &adjust_root_pointer_closure, 368 &adjust_klass_closure); 369 370 // Now adjust pointers in remaining weak roots. (All of which should 371 // have been cleared if they pointed to non-surviving objects.) 372 CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure, 373 /*do_marking=*/ false); 374 gch->gen_process_weak_roots(&adjust_root_pointer_closure, 375 &adjust_code_pointer_closure, 376 &adjust_pointer_closure); 377 378 adjust_marks(); 379 GenAdjustPointersClosure blk; 380 gch->generation_iterate(&blk, true); 381 } 382 383 class GenCompactClosure: public GenCollectedHeap::GenClosure { 384 public: 385 void do_generation(Generation* gen) { 386 gen->compact(); 387 } 388 }; 389 390 void GenMarkSweep::mark_sweep_phase4() { 391 // All pointers are now adjusted, move objects accordingly 392 393 // It is imperative that we traverse perm_gen first in phase4. All 394 // classes must be allocated earlier than their instances, and traversing 395 // perm_gen first makes sure that all Klass*s have moved to their new 396 // location before any instance does a dispatch through it's klass! 397 398 // The ValidateMarkSweep live oops tracking expects us to traverse spaces 399 // in the same order in phase2, phase3 and phase4. We don't quite do that 400 // here (perm_gen first rather than last), so we tell the validate code 401 // to use a higher index (saved from phase2) when verifying perm_gen. 402 GenCollectedHeap* gch = GenCollectedHeap::heap(); 403 404 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty); 405 trace("4"); 406 407 VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking()); 408 409 GenCompactClosure blk; 410 gch->generation_iterate(&blk, true); 411 412 VALIDATE_MARK_SWEEP_ONLY(compaction_complete()); 413 }