1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" 34 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 35 #include "gc_implementation/parallelScavenge/psPermGen.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 38 #include "gc_implementation/shared/isGCActiveMark.hpp" 39 #include "gc_implementation/shared/spaceDecorator.hpp" 40 #include "gc_interface/gcCause.hpp" 41 #include "memory/gcLocker.inline.hpp" 42 #include "memory/referencePolicy.hpp" 43 #include "memory/referenceProcessor.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/biasedLocking.hpp" 46 #include "runtime/fprofiler.hpp" 47 #include "runtime/safepoint.hpp" 48 #include "runtime/vmThread.hpp" 49 #include "services/management.hpp" 50 #include "services/memoryService.hpp" 51 #include "utilities/events.hpp" 52 #include "utilities/stack.inline.hpp" 53 54 elapsedTimer PSMarkSweep::_accumulated_time; 55 unsigned int PSMarkSweep::_total_invocations = 0; 56 jlong PSMarkSweep::_time_of_last_gc = 0; 57 CollectorCounters* PSMarkSweep::_counters = NULL; 58 59 void PSMarkSweep::initialize() { 60 MemRegion mr = Universe::heap()->reserved_region(); 61 _ref_processor = new ReferenceProcessor(mr, 62 true, // atomic_discovery 63 false); // mt_discovery 64 _counters = new CollectorCounters("PSMarkSweep", 1); 65 } 66 67 // This method contains all heap specific policy for invoking mark sweep. 68 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 69 // the heap. It will do nothing further. If we need to bail out for policy 70 // reasons, scavenge before full gc, or any other specialized behavior, it 71 // needs to be added here. 72 // 73 // Note that this method should only be called from the vm_thread while 74 // at a safepoint! 75 // 76 // Note that the all_soft_refs_clear flag in the collector policy 77 // may be true because this method can be called without intervening 78 // activity. For example when the heap space is tight and full measure 79 // are being taken to free space. 80 81 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 82 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 83 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 84 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 85 86 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 87 GCCause::Cause gc_cause = heap->gc_cause(); 88 PSAdaptiveSizePolicy* policy = heap->size_policy(); 89 IsGCActiveMark mark; 90 91 if (ScavengeBeforeFullGC) { 92 PSScavenge::invoke_no_policy(); 93 } 94 95 const bool clear_all_soft_refs = 96 heap->collector_policy()->should_clear_all_soft_refs(); 97 98 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; 99 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 100 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); 101 } 102 103 // This method contains no policy. You should probably 104 // be calling invoke() instead. 105 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 106 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 107 assert(ref_processor() != NULL, "Sanity"); 108 109 if (GC_locker::check_active_before_gc()) { 110 return; 111 } 112 113 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 114 GCCause::Cause gc_cause = heap->gc_cause(); 115 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 116 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 117 118 // The scope of casr should end after code that can change 119 // CollectorPolicy::_should_clear_all_soft_refs. 120 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); 121 122 PSYoungGen* young_gen = heap->young_gen(); 123 PSOldGen* old_gen = heap->old_gen(); 124 PSPermGen* perm_gen = heap->perm_gen(); 125 126 // Increment the invocation count 127 heap->increment_total_collections(true /* full */); 128 129 // Save information needed to minimize mangling 130 heap->record_gen_tops_before_GC(); 131 132 // We need to track unique mark sweep invocations as well. 133 _total_invocations++; 134 135 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 136 137 if (PrintHeapAtGC) { 138 Universe::print_heap_before_gc(); 139 } 140 141 // Fill in TLABs 142 heap->accumulate_statistics_all_tlabs(); 143 heap->ensure_parsability(true); // retire TLABs 144 145 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 146 HandleMark hm; // Discard invalid handles created during verification 147 gclog_or_tty->print(" VerifyBeforeGC:"); 148 Universe::verify(true); 149 } 150 151 // Verify object start arrays 152 if (VerifyObjectStartArray && 153 VerifyBeforeGC) { 154 old_gen->verify_object_start_array(); 155 perm_gen->verify_object_start_array(); 156 } 157 158 heap->pre_full_gc_dump(); 159 160 // Filled in below to track the state of the young gen after the collection. 161 bool eden_empty; 162 bool survivors_empty; 163 bool young_gen_empty; 164 165 { 166 HandleMark hm; 167 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc; 168 // This is useful for debugging but don't change the output the 169 // the customer sees. 170 const char* gc_cause_str = "Full GC"; 171 if (is_system_gc && PrintGCDetails) { 172 gc_cause_str = "Full GC (System)"; 173 } 174 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 175 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 176 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); 177 TraceCollectorStats tcs(counters()); 178 TraceMemoryManagerStats tms(true /* Full GC */); 179 180 if (TraceGen1Time) accumulated_time()->start(); 181 182 // Let the size policy know we're starting 183 size_policy->major_collection_begin(); 184 185 // When collecting the permanent generation methodOops may be moving, 186 // so we either have to flush all bcp data or convert it into bci. 187 CodeCache::gc_prologue(); 188 Threads::gc_prologue(); 189 BiasedLocking::preserve_marks(); 190 191 // Capture heap size before collection for printing. 192 size_t prev_used = heap->used(); 193 194 // Capture perm gen size before collection for sizing. 195 size_t perm_gen_prev_used = perm_gen->used_in_bytes(); 196 197 // For PrintGCDetails 198 size_t old_gen_prev_used = old_gen->used_in_bytes(); 199 size_t young_gen_prev_used = young_gen->used_in_bytes(); 200 201 allocate_stacks(); 202 203 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 204 COMPILER2_PRESENT(DerivedPointerTable::clear()); 205 206 ref_processor()->enable_discovery(); 207 ref_processor()->setup_policy(clear_all_softrefs); 208 209 mark_sweep_phase1(clear_all_softrefs); 210 211 mark_sweep_phase2(); 212 213 // Don't add any more derived pointers during phase3 214 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 215 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 216 217 mark_sweep_phase3(); 218 219 mark_sweep_phase4(); 220 221 restore_marks(); 222 223 deallocate_stacks(); 224 225 if (ZapUnusedHeapArea) { 226 // Do a complete mangle (top to end) because the usage for 227 // scratch does not maintain a top pointer. 228 young_gen->to_space()->mangle_unused_area_complete(); 229 } 230 231 eden_empty = young_gen->eden_space()->is_empty(); 232 if (!eden_empty) { 233 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 234 } 235 236 // Update heap occupancy information which is used as 237 // input to soft ref clearing policy at the next gc. 238 Universe::update_heap_info_at_gc(); 239 240 survivors_empty = young_gen->from_space()->is_empty() && 241 young_gen->to_space()->is_empty(); 242 young_gen_empty = eden_empty && survivors_empty; 243 244 BarrierSet* bs = heap->barrier_set(); 245 if (bs->is_a(BarrierSet::ModRef)) { 246 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; 247 MemRegion old_mr = heap->old_gen()->reserved(); 248 MemRegion perm_mr = heap->perm_gen()->reserved(); 249 assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); 250 251 if (young_gen_empty) { 252 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); 253 } else { 254 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); 255 } 256 } 257 258 BiasedLocking::restore_marks(); 259 Threads::gc_epilogue(); 260 CodeCache::gc_epilogue(); 261 262 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 263 264 ref_processor()->enqueue_discovered_references(NULL); 265 266 // Update time of last GC 267 reset_millis_since_last_gc(); 268 269 // Let the size policy know we're done 270 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 271 272 if (UseAdaptiveSizePolicy) { 273 274 if (PrintAdaptiveSizePolicy) { 275 gclog_or_tty->print("AdaptiveSizeStart: "); 276 gclog_or_tty->stamp(); 277 gclog_or_tty->print_cr(" collection: %d ", 278 heap->total_collections()); 279 if (Verbose) { 280 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 281 " perm_gen_capacity: %d ", 282 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 283 perm_gen->capacity_in_bytes()); 284 } 285 } 286 287 // Don't check if the size_policy is ready here. Let 288 // the size_policy check that internally. 289 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 290 ((gc_cause != GCCause::_java_lang_system_gc) || 291 UseAdaptiveSizePolicyWithSystemGC)) { 292 // Calculate optimal free space amounts 293 assert(young_gen->max_size() > 294 young_gen->from_space()->capacity_in_bytes() + 295 young_gen->to_space()->capacity_in_bytes(), 296 "Sizes of space in young gen are out-of-bounds"); 297 size_t max_eden_size = young_gen->max_size() - 298 young_gen->from_space()->capacity_in_bytes() - 299 young_gen->to_space()->capacity_in_bytes(); 300 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 301 young_gen->eden_space()->used_in_bytes(), 302 old_gen->used_in_bytes(), 303 perm_gen->used_in_bytes(), 304 young_gen->eden_space()->capacity_in_bytes(), 305 old_gen->max_gen_size(), 306 max_eden_size, 307 true /* full gc*/, 308 gc_cause, 309 heap->collector_policy()); 310 311 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 312 313 // Don't resize the young generation at an major collection. A 314 // desired young generation size may have been calculated but 315 // resizing the young generation complicates the code because the 316 // resizing of the old generation may have moved the boundary 317 // between the young generation and the old generation. Let the 318 // young generation resizing happen at the minor collections. 319 } 320 if (PrintAdaptiveSizePolicy) { 321 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 322 heap->total_collections()); 323 } 324 } 325 326 if (UsePerfData) { 327 heap->gc_policy_counters()->update_counters(); 328 heap->gc_policy_counters()->update_old_capacity( 329 old_gen->capacity_in_bytes()); 330 heap->gc_policy_counters()->update_young_capacity( 331 young_gen->capacity_in_bytes()); 332 } 333 334 heap->resize_all_tlabs(); 335 336 // We collected the perm gen, so we'll resize it here. 337 perm_gen->compute_new_size(perm_gen_prev_used); 338 339 if (TraceGen1Time) accumulated_time()->stop(); 340 341 if (PrintGC) { 342 if (PrintGCDetails) { 343 // Don't print a GC timestamp here. This is after the GC so 344 // would be confusing. 345 young_gen->print_used_change(young_gen_prev_used); 346 old_gen->print_used_change(old_gen_prev_used); 347 } 348 heap->print_heap_change(prev_used); 349 // Do perm gen after heap becase prev_used does 350 // not include the perm gen (done this way in the other 351 // collectors). 352 if (PrintGCDetails) { 353 perm_gen->print_used_change(perm_gen_prev_used); 354 } 355 } 356 357 // Track memory usage and detect low memory 358 MemoryService::track_memory_usage(); 359 heap->update_counters(); 360 } 361 362 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 363 HandleMark hm; // Discard invalid handles created during verification 364 gclog_or_tty->print(" VerifyAfterGC:"); 365 Universe::verify(false); 366 } 367 368 // Re-verify object start arrays 369 if (VerifyObjectStartArray && 370 VerifyAfterGC) { 371 old_gen->verify_object_start_array(); 372 perm_gen->verify_object_start_array(); 373 } 374 375 if (ZapUnusedHeapArea) { 376 old_gen->object_space()->check_mangled_unused_area_complete(); 377 perm_gen->object_space()->check_mangled_unused_area_complete(); 378 } 379 380 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 381 382 if (PrintHeapAtGC) { 383 Universe::print_heap_after_gc(); 384 } 385 386 heap->post_full_gc_dump(); 387 388 #ifdef TRACESPINNING 389 ParallelTaskTerminator::print_termination_counts(); 390 #endif 391 } 392 393 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 394 PSYoungGen* young_gen, 395 PSOldGen* old_gen) { 396 MutableSpace* const eden_space = young_gen->eden_space(); 397 assert(!eden_space->is_empty(), "eden must be non-empty"); 398 assert(young_gen->virtual_space()->alignment() == 399 old_gen->virtual_space()->alignment(), "alignments do not match"); 400 401 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 402 return false; 403 } 404 405 // Both generations must be completely committed. 406 if (young_gen->virtual_space()->uncommitted_size() != 0) { 407 return false; 408 } 409 if (old_gen->virtual_space()->uncommitted_size() != 0) { 410 return false; 411 } 412 413 // Figure out how much to take from eden. Include the average amount promoted 414 // in the total; otherwise the next young gen GC will simply bail out to a 415 // full GC. 416 const size_t alignment = old_gen->virtual_space()->alignment(); 417 const size_t eden_used = eden_space->used_in_bytes(); 418 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 419 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); 420 const size_t eden_capacity = eden_space->capacity_in_bytes(); 421 422 if (absorb_size >= eden_capacity) { 423 return false; // Must leave some space in eden. 424 } 425 426 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 427 if (new_young_size < young_gen->min_gen_size()) { 428 return false; // Respect young gen minimum size. 429 } 430 431 if (TraceAdaptiveGCBoundary && Verbose) { 432 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " 433 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 434 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 435 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 436 absorb_size / K, 437 eden_capacity / K, (eden_capacity - absorb_size) / K, 438 young_gen->from_space()->used_in_bytes() / K, 439 young_gen->to_space()->used_in_bytes() / K, 440 young_gen->capacity_in_bytes() / K, new_young_size / K); 441 } 442 443 // Fill the unused part of the old gen. 444 MutableSpace* const old_space = old_gen->object_space(); 445 HeapWord* const unused_start = old_space->top(); 446 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 447 448 if (unused_words > 0) { 449 if (unused_words < CollectedHeap::min_fill_size()) { 450 return false; // If the old gen cannot be filled, must give up. 451 } 452 CollectedHeap::fill_with_objects(unused_start, unused_words); 453 } 454 455 // Take the live data from eden and set both top and end in the old gen to 456 // eden top. (Need to set end because reset_after_change() mangles the region 457 // from end to virtual_space->high() in debug builds). 458 HeapWord* const new_top = eden_space->top(); 459 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 460 absorb_size); 461 young_gen->reset_after_change(); 462 old_space->set_top(new_top); 463 old_space->set_end(new_top); 464 old_gen->reset_after_change(); 465 466 // Update the object start array for the filler object and the data from eden. 467 ObjectStartArray* const start_array = old_gen->start_array(); 468 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 469 start_array->allocate_block(p); 470 } 471 472 // Could update the promoted average here, but it is not typically updated at 473 // full GCs and the value to use is unclear. Something like 474 // 475 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 476 477 size_policy->set_bytes_absorbed_from_eden(absorb_size); 478 return true; 479 } 480 481 void PSMarkSweep::allocate_stacks() { 482 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 483 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 484 485 PSYoungGen* young_gen = heap->young_gen(); 486 487 MutableSpace* to_space = young_gen->to_space(); 488 _preserved_marks = (PreservedMark*)to_space->top(); 489 _preserved_count = 0; 490 491 // We want to calculate the size in bytes first. 492 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 493 // Now divide by the size of a PreservedMark 494 _preserved_count_max /= sizeof(PreservedMark); 495 } 496 497 498 void PSMarkSweep::deallocate_stacks() { 499 _preserved_mark_stack.clear(true); 500 _preserved_oop_stack.clear(true); 501 _marking_stack.clear(); 502 _objarray_stack.clear(true); 503 _revisit_klass_stack.clear(true); 504 _revisit_mdo_stack.clear(true); 505 } 506 507 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 508 // Recursively traverse all live objects and mark them 509 EventMark m("1 mark object"); 510 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); 511 trace(" 1"); 512 513 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 514 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 515 516 // General strong roots. 517 { 518 ParallelScavengeHeap::ParStrongRootsScope psrs; 519 Universe::oops_do(mark_and_push_closure()); 520 ReferenceProcessor::oops_do(mark_and_push_closure()); 521 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 522 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true); 523 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob); 524 ObjectSynchronizer::oops_do(mark_and_push_closure()); 525 FlatProfiler::oops_do(mark_and_push_closure()); 526 Management::oops_do(mark_and_push_closure()); 527 JvmtiExport::oops_do(mark_and_push_closure()); 528 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); 529 vmSymbols::oops_do(mark_and_push_closure()); 530 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. 531 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); 532 } 533 534 // Flush marking stack. 535 follow_stack(); 536 537 // Process reference objects found during marking 538 { 539 ref_processor()->setup_policy(clear_all_softrefs); 540 ref_processor()->process_discovered_references( 541 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); 542 } 543 544 // Follow system dictionary roots and unload classes 545 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); 546 547 // Follow code cache roots 548 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), 549 purged_class); 550 follow_stack(); // Flush marking stack 551 552 // Update subklass/sibling/implementor links of live klasses 553 follow_weak_klass_links(); 554 assert(_marking_stack.is_empty(), "just drained"); 555 556 // Visit memoized mdo's and clear unmarked weak refs 557 follow_mdo_weak_refs(); 558 assert(_marking_stack.is_empty(), "just drained"); 559 560 // Visit symbol and interned string tables and delete unmarked oops 561 SymbolTable::unlink(is_alive_closure()); 562 StringTable::unlink(is_alive_closure()); 563 564 assert(_marking_stack.is_empty(), "stack should be empty by now"); 565 } 566 567 568 void PSMarkSweep::mark_sweep_phase2() { 569 EventMark m("2 compute new addresses"); 570 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); 571 trace("2"); 572 573 // Now all live objects are marked, compute the new object addresses. 574 575 // It is imperative that we traverse perm_gen LAST. If dead space is 576 // allowed a range of dead object may get overwritten by a dead int 577 // array. If perm_gen is not traversed last a klassOop may get 578 // overwritten. This is fine since it is dead, but if the class has dead 579 // instances we have to skip them, and in order to find their size we 580 // need the klassOop! 581 // 582 // It is not required that we traverse spaces in the same order in 583 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 584 // tracking expects us to do so. See comment under phase4. 585 586 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 587 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 588 589 PSOldGen* old_gen = heap->old_gen(); 590 PSPermGen* perm_gen = heap->perm_gen(); 591 592 // Begin compacting into the old gen 593 PSMarkSweepDecorator::set_destination_decorator_tenured(); 594 595 // This will also compact the young gen spaces. 596 old_gen->precompact(); 597 598 // Compact the perm gen into the perm gen 599 PSMarkSweepDecorator::set_destination_decorator_perm_gen(); 600 601 perm_gen->precompact(); 602 } 603 604 // This should be moved to the shared markSweep code! 605 class PSAlwaysTrueClosure: public BoolObjectClosure { 606 public: 607 void do_object(oop p) { ShouldNotReachHere(); } 608 bool do_object_b(oop p) { return true; } 609 }; 610 static PSAlwaysTrueClosure always_true; 611 612 void PSMarkSweep::mark_sweep_phase3() { 613 // Adjust the pointers to reflect the new locations 614 EventMark m("3 adjust pointers"); 615 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty); 616 trace("3"); 617 618 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 619 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 620 621 PSYoungGen* young_gen = heap->young_gen(); 622 PSOldGen* old_gen = heap->old_gen(); 623 PSPermGen* perm_gen = heap->perm_gen(); 624 625 // General strong roots. 626 Universe::oops_do(adjust_root_pointer_closure()); 627 ReferenceProcessor::oops_do(adjust_root_pointer_closure()); 628 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles 629 Threads::oops_do(adjust_root_pointer_closure(), NULL); 630 ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); 631 FlatProfiler::oops_do(adjust_root_pointer_closure()); 632 Management::oops_do(adjust_root_pointer_closure()); 633 JvmtiExport::oops_do(adjust_root_pointer_closure()); 634 // SO_AllClasses 635 SystemDictionary::oops_do(adjust_root_pointer_closure()); 636 vmSymbols::oops_do(adjust_root_pointer_closure()); 637 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); 638 639 // Now adjust pointers in remaining weak roots. (All of which should 640 // have been cleared if they pointed to non-surviving objects.) 641 // Global (weak) JNI handles 642 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); 643 644 CodeCache::oops_do(adjust_pointer_closure()); 645 SymbolTable::oops_do(adjust_root_pointer_closure()); 646 StringTable::oops_do(adjust_root_pointer_closure()); 647 ref_processor()->weak_oops_do(adjust_root_pointer_closure()); 648 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); 649 650 adjust_marks(); 651 652 young_gen->adjust_pointers(); 653 old_gen->adjust_pointers(); 654 perm_gen->adjust_pointers(); 655 } 656 657 void PSMarkSweep::mark_sweep_phase4() { 658 EventMark m("4 compact heap"); 659 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); 660 trace("4"); 661 662 // All pointers are now adjusted, move objects accordingly 663 664 // It is imperative that we traverse perm_gen first in phase4. All 665 // classes must be allocated earlier than their instances, and traversing 666 // perm_gen first makes sure that all klassOops have moved to their new 667 // location before any instance does a dispatch through it's klass! 668 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 669 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 670 671 PSYoungGen* young_gen = heap->young_gen(); 672 PSOldGen* old_gen = heap->old_gen(); 673 PSPermGen* perm_gen = heap->perm_gen(); 674 675 perm_gen->compact(); 676 old_gen->compact(); 677 young_gen->compact(); 678 } 679 680 jlong PSMarkSweep::millis_since_last_gc() { 681 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc; 682 // XXX See note in genCollectedHeap::millis_since_last_gc(). 683 if (ret_val < 0) { 684 NOT_PRODUCT(warning("time warp: %d", ret_val);) 685 return 0; 686 } 687 return ret_val; 688 } 689 690 void PSMarkSweep::reset_millis_since_last_gc() { 691 _time_of_last_gc = os::javaTimeMillis(); 692 }