1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)psMarkSweep.cpp 1.92 07/06/08 23:11:01 JVM" 3 #endif 4 /* 5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 #include "incls/_precompiled.incl" 29 #include "incls/_psMarkSweep.cpp.incl" 30 31 elapsedTimer PSMarkSweep::_accumulated_time; 32 unsigned int PSMarkSweep::_total_invocations = 0; 33 jlong PSMarkSweep::_time_of_last_gc = 0; 34 CollectorCounters* PSMarkSweep::_counters = NULL; 35 36 void PSMarkSweep::initialize() { 37 MemRegion mr = Universe::heap()->reserved_region(); 38 _ref_processor = new ReferenceProcessor(mr, 39 true, // atomic_discovery 40 false); // mt_discovery 41 _counters = new CollectorCounters("PSMarkSweep", 1); 42 } 43 44 // This method contains all heap specific policy for invoking mark sweep. 45 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 46 // the heap. It will do nothing further. If we need to bail out for policy 47 // reasons, scavenge before full gc, or any other specialized behavior, it 48 // needs to be added here. 49 // 50 // Note that this method should only be called from the vm_thread while 51 // at a safepoint! 52 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 53 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 54 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 55 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 56 57 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 58 GCCause::Cause gc_cause = heap->gc_cause(); 59 PSAdaptiveSizePolicy* policy = heap->size_policy(); 60 61 // Before each allocation/collection attempt, find out from the 62 // policy object if GCs are, on the whole, taking too long. If so, 63 // bail out without attempting a collection. The exceptions are 64 // for explicitly requested GC's. 65 if (!policy->gc_time_limit_exceeded() || 66 GCCause::is_user_requested_gc(gc_cause) || 67 GCCause::is_serviceability_requested_gc(gc_cause)) { 68 IsGCActiveMark mark; 69 70 if (ScavengeBeforeFullGC) { 71 PSScavenge::invoke_no_policy(); 72 } 73 74 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; 75 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 76 PSMarkSweep::invoke_no_policy(maximum_heap_compaction); 77 } 78 } 79 80 // This method contains no policy. You should probably 81 // be calling invoke() instead. 82 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 83 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 84 assert(ref_processor() != NULL, "Sanity"); 85 86 if (GC_locker::check_active_before_gc()) { 87 return; 88 } 89 90 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 91 GCCause::Cause gc_cause = heap->gc_cause(); 92 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 93 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 94 95 PSYoungGen* young_gen = heap->young_gen(); 96 PSOldGen* old_gen = heap->old_gen(); 97 PSPermGen* perm_gen = heap->perm_gen(); 98 99 // Increment the invocation count 100 heap->increment_total_collections(true /* full */); 101 102 // Save information needed to minimize mangling 103 heap->record_gen_tops_before_GC(); 104 105 // We need to track unique mark sweep invocations as well. 106 _total_invocations++; 107 108 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 109 110 if (PrintHeapAtGC) { 111 Universe::print_heap_before_gc(); 112 } 113 114 // Fill in TLABs 115 heap->accumulate_statistics_all_tlabs(); 116 heap->ensure_parsability(true); // retire TLABs 117 118 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 119 HandleMark hm; // Discard invalid handles created during verification 120 gclog_or_tty->print(" VerifyBeforeGC:"); 121 Universe::verify(true); 122 } 123 124 // Verify object start arrays 125 if (VerifyObjectStartArray && 126 VerifyBeforeGC) { 127 old_gen->verify_object_start_array(); 128 perm_gen->verify_object_start_array(); 129 } 130 131 // Filled in below to track the state of the young gen after the collection. 132 bool eden_empty; 133 bool survivors_empty; 134 bool young_gen_empty; 135 136 { 137 HandleMark hm; 138 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc; 139 // This is useful for debugging but don't change the output the 140 // the customer sees. 141 const char* gc_cause_str = "Full GC"; 142 if (is_system_gc && PrintGCDetails) { 143 gc_cause_str = "Full GC (System)"; 144 } 145 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 146 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 147 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty); 148 TraceCollectorStats tcs(counters()); 149 TraceMemoryManagerStats tms(true /* Full GC */); 150 151 if (TraceGen1Time) accumulated_time()->start(); 152 153 // Let the size policy know we're starting 154 size_policy->major_collection_begin(); 155 156 // When collecting the permanent generation methodOops may be moving, 157 // so we either have to flush all bcp data or convert it into bci. 158 CodeCache::gc_prologue(); 159 Threads::gc_prologue(); 160 BiasedLocking::preserve_marks(); 161 162 // Capture heap size before collection for printing. 163 size_t prev_used = heap->used(); 164 165 // Capture perm gen size before collection for sizing. 166 size_t perm_gen_prev_used = perm_gen->used_in_bytes(); 167 168 // For PrintGCDetails 169 size_t old_gen_prev_used = old_gen->used_in_bytes(); 170 size_t young_gen_prev_used = young_gen->used_in_bytes(); 171 172 allocate_stacks(); 173 174 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 175 COMPILER2_PRESENT(DerivedPointerTable::clear()); 176 177 ref_processor()->enable_discovery(); 178 ref_processor()->setup_policy(clear_all_softrefs); 179 180 mark_sweep_phase1(clear_all_softrefs); 181 182 mark_sweep_phase2(); 183 184 // Don't add any more derived pointers during phase3 185 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 186 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 187 188 mark_sweep_phase3(); 189 190 mark_sweep_phase4(); 191 192 restore_marks(); 193 194 deallocate_stacks(); 195 196 if (ZapUnusedHeapArea) { 197 // Do a complete mangle (top to end) because the usage for 198 // scratch does not maintain a top pointer. 199 young_gen->to_space()->mangle_unused_area_complete(); 200 } 201 202 eden_empty = young_gen->eden_space()->is_empty(); 203 if (!eden_empty) { 204 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 205 } 206 207 // Update heap occupancy information which is used as 208 // input to soft ref clearing policy at the next gc. 209 Universe::update_heap_info_at_gc(); 210 211 survivors_empty = young_gen->from_space()->is_empty() && 212 young_gen->to_space()->is_empty(); 213 young_gen_empty = eden_empty && survivors_empty; 214 215 BarrierSet* bs = heap->barrier_set(); 216 if (bs->is_a(BarrierSet::ModRef)) { 217 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; 218 MemRegion old_mr = heap->old_gen()->reserved(); 219 MemRegion perm_mr = heap->perm_gen()->reserved(); 220 assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); 221 222 if (young_gen_empty) { 223 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); 224 } else { 225 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); 226 } 227 } 228 229 BiasedLocking::restore_marks(); 230 Threads::gc_epilogue(); 231 CodeCache::gc_epilogue(); 232 233 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 234 235 ref_processor()->enqueue_discovered_references(NULL); 236 237 // Update time of last GC 238 reset_millis_since_last_gc(); 239 240 // Let the size policy know we're done 241 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 242 243 if (UseAdaptiveSizePolicy) { 244 245 if (PrintAdaptiveSizePolicy) { 246 gclog_or_tty->print("AdaptiveSizeStart: "); 247 gclog_or_tty->stamp(); 248 gclog_or_tty->print_cr(" collection: %d ", 249 heap->total_collections()); 250 if (Verbose) { 251 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 252 " perm_gen_capacity: %d ", 253 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 254 perm_gen->capacity_in_bytes()); 255 } 256 } 257 258 // Don't check if the size_policy is ready here. Let 259 // the size_policy check that internally. 260 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 261 ((gc_cause != GCCause::_java_lang_system_gc) || 262 UseAdaptiveSizePolicyWithSystemGC)) { 263 // Calculate optimal free space amounts 264 assert(young_gen->max_size() > 265 young_gen->from_space()->capacity_in_bytes() + 266 young_gen->to_space()->capacity_in_bytes(), 267 "Sizes of space in young gen are out-of-bounds"); 268 size_t max_eden_size = young_gen->max_size() - 269 young_gen->from_space()->capacity_in_bytes() - 270 young_gen->to_space()->capacity_in_bytes(); 271 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 272 young_gen->eden_space()->used_in_bytes(), 273 old_gen->used_in_bytes(), 274 perm_gen->used_in_bytes(), 275 young_gen->eden_space()->capacity_in_bytes(), 276 old_gen->max_gen_size(), 277 max_eden_size, 278 true /* full gc*/, 279 gc_cause); 280 281 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 282 283 // Don't resize the young generation at an major collection. A 284 // desired young generation size may have been calculated but 285 // resizing the young generation complicates the code because the 286 // resizing of the old generation may have moved the boundary 287 // between the young generation and the old generation. Let the 288 // young generation resizing happen at the minor collections. 289 } 290 if (PrintAdaptiveSizePolicy) { 291 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 292 heap->total_collections()); 293 } 294 } 295 296 if (UsePerfData) { 297 heap->gc_policy_counters()->update_counters(); 298 heap->gc_policy_counters()->update_old_capacity( 299 old_gen->capacity_in_bytes()); 300 heap->gc_policy_counters()->update_young_capacity( 301 young_gen->capacity_in_bytes()); 302 } 303 304 heap->resize_all_tlabs(); 305 306 // We collected the perm gen, so we'll resize it here. 307 perm_gen->compute_new_size(perm_gen_prev_used); 308 309 if (TraceGen1Time) accumulated_time()->stop(); 310 311 if (PrintGC) { 312 if (PrintGCDetails) { 313 // Don't print a GC timestamp here. This is after the GC so 314 // would be confusing. 315 young_gen->print_used_change(young_gen_prev_used); 316 old_gen->print_used_change(old_gen_prev_used); 317 } 318 heap->print_heap_change(prev_used); 319 // Do perm gen after heap becase prev_used does 320 // not include the perm gen (done this way in the other 321 // collectors). 322 if (PrintGCDetails) { 323 perm_gen->print_used_change(perm_gen_prev_used); 324 } 325 } 326 327 // Track memory usage and detect low memory 328 MemoryService::track_memory_usage(); 329 heap->update_counters(); 330 331 if (PrintGCDetails) { 332 if (size_policy->print_gc_time_limit_would_be_exceeded()) { 333 if (size_policy->gc_time_limit_exceeded()) { 334 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit " 335 "of %d%%", GCTimeLimit); 336 } else { 337 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit " 338 "of %d%%", GCTimeLimit); 339 } 340 } 341 size_policy->set_print_gc_time_limit_would_be_exceeded(false); 342 } 343 } 344 345 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 346 HandleMark hm; // Discard invalid handles created during verification 347 gclog_or_tty->print(" VerifyAfterGC:"); 348 Universe::verify(false); 349 } 350 351 // Re-verify object start arrays 352 if (VerifyObjectStartArray && 353 VerifyAfterGC) { 354 old_gen->verify_object_start_array(); 355 perm_gen->verify_object_start_array(); 356 } 357 358 if (ZapUnusedHeapArea) { 359 old_gen->object_space()->check_mangled_unused_area_complete(); 360 perm_gen->object_space()->check_mangled_unused_area_complete(); 361 } 362 363 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 364 365 if (PrintHeapAtGC) { 366 Universe::print_heap_after_gc(); 367 } 368 } 369 370 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 371 PSYoungGen* young_gen, 372 PSOldGen* old_gen) { 373 MutableSpace* const eden_space = young_gen->eden_space(); 374 assert(!eden_space->is_empty(), "eden must be non-empty"); 375 assert(young_gen->virtual_space()->alignment() == 376 old_gen->virtual_space()->alignment(), "alignments do not match"); 377 378 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 379 return false; 380 } 381 382 // Both generations must be completely committed. 383 if (young_gen->virtual_space()->uncommitted_size() != 0) { 384 return false; 385 } 386 if (old_gen->virtual_space()->uncommitted_size() != 0) { 387 return false; 388 } 389 390 // Figure out how much to take from eden. Include the average amount promoted 391 // in the total; otherwise the next young gen GC will simply bail out to a 392 // full GC. 393 const size_t alignment = old_gen->virtual_space()->alignment(); 394 const size_t eden_used = eden_space->used_in_bytes(); 395 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 396 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); 397 const size_t eden_capacity = eden_space->capacity_in_bytes(); 398 399 if (absorb_size >= eden_capacity) { 400 return false; // Must leave some space in eden. 401 } 402 403 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 404 if (new_young_size < young_gen->min_gen_size()) { 405 return false; // Respect young gen minimum size. 406 } 407 408 if (TraceAdaptiveGCBoundary && Verbose) { 409 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " 410 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 411 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 412 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 413 absorb_size / K, 414 eden_capacity / K, (eden_capacity - absorb_size) / K, 415 young_gen->from_space()->used_in_bytes() / K, 416 young_gen->to_space()->used_in_bytes() / K, 417 young_gen->capacity_in_bytes() / K, new_young_size / K); 418 } 419 420 // Fill the unused part of the old gen. 421 MutableSpace* const old_space = old_gen->object_space(); 422 HeapWord* const unused_start = old_space->top(); 423 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 424 425 if (unused_words > 0) { 426 if (unused_words < CollectedHeap::min_fill_size()) { 427 return false; // If the old gen cannot be filled, must give up. 428 } 429 CollectedHeap::fill_with_objects(unused_start, unused_words); 430 } 431 432 // Take the live data from eden and set both top and end in the old gen to 433 // eden top. (Need to set end because reset_after_change() mangles the region 434 // from end to virtual_space->high() in debug builds). 435 HeapWord* const new_top = eden_space->top(); 436 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 437 absorb_size); 438 young_gen->reset_after_change(); 439 old_space->set_top(new_top); 440 old_space->set_end(new_top); 441 old_gen->reset_after_change(); 442 443 // Update the object start array for the filler object and the data from eden. 444 ObjectStartArray* const start_array = old_gen->start_array(); 445 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 446 start_array->allocate_block(p); 447 } 448 449 // Could update the promoted average here, but it is not typically updated at 450 // full GCs and the value to use is unclear. Something like 451 // 452 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 453 454 size_policy->set_bytes_absorbed_from_eden(absorb_size); 455 return true; 456 } 457 458 void PSMarkSweep::allocate_stacks() { 459 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 460 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 461 462 PSYoungGen* young_gen = heap->young_gen(); 463 464 MutableSpace* to_space = young_gen->to_space(); 465 _preserved_marks = (PreservedMark*)to_space->top(); 466 _preserved_count = 0; 467 468 // We want to calculate the size in bytes first. 469 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 470 // Now divide by the size of a PreservedMark 471 _preserved_count_max /= sizeof(PreservedMark); 472 473 _preserved_mark_stack = NULL; 474 _preserved_oop_stack = NULL; 475 476 _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); 477 478 int size = SystemDictionary::number_of_classes() * 2; 479 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); 480 } 481 482 483 void PSMarkSweep::deallocate_stacks() { 484 if (_preserved_oop_stack) { 485 delete _preserved_mark_stack; 486 _preserved_mark_stack = NULL; 487 delete _preserved_oop_stack; 488 _preserved_oop_stack = NULL; 489 } 490 491 delete _marking_stack; 492 delete _revisit_klass_stack; 493 } 494 495 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 496 // Recursively traverse all live objects and mark them 497 EventMark m("1 mark object"); 498 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); 499 trace(" 1"); 500 501 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 502 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 503 504 // General strong roots. 505 Universe::oops_do(mark_and_push_closure()); 506 ReferenceProcessor::oops_do(mark_and_push_closure()); 507 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 508 Threads::oops_do(mark_and_push_closure()); 509 ObjectSynchronizer::oops_do(mark_and_push_closure()); 510 FlatProfiler::oops_do(mark_and_push_closure()); 511 Management::oops_do(mark_and_push_closure()); 512 JvmtiExport::oops_do(mark_and_push_closure()); 513 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); 514 vmSymbols::oops_do(mark_and_push_closure()); 515 516 // Flush marking stack. 517 follow_stack(); 518 519 // Process reference objects found during marking 520 { 521 ref_processor()->setup_policy(clear_all_softrefs); 522 ref_processor()->process_discovered_references( 523 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); 524 } 525 526 // Follow system dictionary roots and unload classes 527 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); 528 529 // Follow code cache roots 530 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), 531 purged_class); 532 follow_stack(); // Flush marking stack 533 534 // Update subklass/sibling/implementor links of live klasses 535 follow_weak_klass_links(); 536 assert(_marking_stack->is_empty(), "just drained"); 537 538 // Visit symbol and interned string tables and delete unmarked oops 539 SymbolTable::unlink(is_alive_closure()); 540 StringTable::unlink(is_alive_closure()); 541 542 assert(_marking_stack->is_empty(), "stack should be empty by now"); 543 } 544 545 546 void PSMarkSweep::mark_sweep_phase2() { 547 EventMark m("2 compute new addresses"); 548 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); 549 trace("2"); 550 551 // Now all live objects are marked, compute the new object addresses. 552 553 // It is imperative that we traverse perm_gen LAST. If dead space is 554 // allowed a range of dead object may get overwritten by a dead int 555 // array. If perm_gen is not traversed last a klassOop may get 556 // overwritten. This is fine since it is dead, but if the class has dead 557 // instances we have to skip them, and in order to find their size we 558 // need the klassOop! 559 // 560 // It is not required that we traverse spaces in the same order in 561 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 562 // tracking expects us to do so. See comment under phase4. 563 564 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 565 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 566 567 PSOldGen* old_gen = heap->old_gen(); 568 PSPermGen* perm_gen = heap->perm_gen(); 569 570 // Begin compacting into the old gen 571 PSMarkSweepDecorator::set_destination_decorator_tenured(); 572 573 // This will also compact the young gen spaces. 574 old_gen->precompact(); 575 576 // Compact the perm gen into the perm gen 577 PSMarkSweepDecorator::set_destination_decorator_perm_gen(); 578 579 perm_gen->precompact(); 580 } 581 582 // This should be moved to the shared markSweep code! 583 class PSAlwaysTrueClosure: public BoolObjectClosure { 584 public: 585 void do_object(oop p) { ShouldNotReachHere(); } 586 bool do_object_b(oop p) { return true; } 587 }; 588 static PSAlwaysTrueClosure always_true; 589 590 void PSMarkSweep::mark_sweep_phase3() { 591 // Adjust the pointers to reflect the new locations 592 EventMark m("3 adjust pointers"); 593 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty); 594 trace("3"); 595 596 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 597 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 598 599 PSYoungGen* young_gen = heap->young_gen(); 600 PSOldGen* old_gen = heap->old_gen(); 601 PSPermGen* perm_gen = heap->perm_gen(); 602 603 // General strong roots. 604 Universe::oops_do(adjust_root_pointer_closure()); 605 ReferenceProcessor::oops_do(adjust_root_pointer_closure()); 606 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles 607 Threads::oops_do(adjust_root_pointer_closure()); 608 ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); 609 FlatProfiler::oops_do(adjust_root_pointer_closure()); 610 Management::oops_do(adjust_root_pointer_closure()); 611 JvmtiExport::oops_do(adjust_root_pointer_closure()); 612 // SO_AllClasses 613 SystemDictionary::oops_do(adjust_root_pointer_closure()); 614 vmSymbols::oops_do(adjust_root_pointer_closure()); 615 616 // Now adjust pointers in remaining weak roots. (All of which should 617 // have been cleared if they pointed to non-surviving objects.) 618 // Global (weak) JNI handles 619 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); 620 621 CodeCache::oops_do(adjust_pointer_closure()); 622 SymbolTable::oops_do(adjust_root_pointer_closure()); 623 StringTable::oops_do(adjust_root_pointer_closure()); 624 ref_processor()->weak_oops_do(adjust_root_pointer_closure()); 625 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); 626 627 adjust_marks(); 628 629 young_gen->adjust_pointers(); 630 old_gen->adjust_pointers(); 631 perm_gen->adjust_pointers(); 632 } 633 634 void PSMarkSweep::mark_sweep_phase4() { 635 EventMark m("4 compact heap"); 636 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); 637 trace("4"); 638 639 // All pointers are now adjusted, move objects accordingly 640 641 // It is imperative that we traverse perm_gen first in phase4. All 642 // classes must be allocated earlier than their instances, and traversing 643 // perm_gen first makes sure that all klassOops have moved to their new 644 // location before any instance does a dispatch through it's klass! 645 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 646 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 647 648 PSYoungGen* young_gen = heap->young_gen(); 649 PSOldGen* old_gen = heap->old_gen(); 650 PSPermGen* perm_gen = heap->perm_gen(); 651 652 perm_gen->compact(); 653 old_gen->compact(); 654 young_gen->compact(); 655 } 656 657 jlong PSMarkSweep::millis_since_last_gc() { 658 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc; 659 // XXX See note in genCollectedHeap::millis_since_last_gc(). 660 if (ret_val < 0) { 661 NOT_PRODUCT(warning("time warp: %d", ret_val);) 662 return 0; 663 } 664 return ret_val; 665 } 666 667 void PSMarkSweep::reset_millis_since_last_gc() { 668 _time_of_last_gc = os::javaTimeMillis(); 669 }