1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" 34 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 35 #include "gc_implementation/parallelScavenge/psPermGen.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 38 #include "gc_implementation/shared/gcHeapSummary.hpp" 39 #include "gc_implementation/shared/gcTimer.hpp" 40 #include "gc_implementation/shared/gcTrace.hpp" 41 #include "gc_implementation/shared/gcTraceTime.hpp" 42 #include "gc_implementation/shared/isGCActiveMark.hpp" 43 #include "gc_implementation/shared/spaceDecorator.hpp" 44 #include "gc_interface/gcCause.hpp" 45 #include "memory/gcLocker.inline.hpp" 46 #include "memory/referencePolicy.hpp" 47 #include "memory/referenceProcessor.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/biasedLocking.hpp" 50 #include "runtime/fprofiler.hpp" 51 #include "runtime/safepoint.hpp" 52 #include "runtime/vmThread.hpp" 53 #include "services/management.hpp" 54 #include "services/memoryService.hpp" 55 #include "utilities/events.hpp" 56 #include "utilities/stack.inline.hpp" 57 58 elapsedTimer PSMarkSweep::_accumulated_time; 59 unsigned int PSMarkSweep::_total_invocations = 0; 60 jlong PSMarkSweep::_time_of_last_gc = 0; 61 CollectorCounters* PSMarkSweep::_counters = NULL; 62 63 void PSMarkSweep::initialize() { 64 MemRegion mr = Universe::heap()->reserved_region(); 65 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc 66 _counters = new CollectorCounters("PSMarkSweep", 1); 67 } 68 69 // This method contains all heap specific policy for invoking mark sweep. 70 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 71 // the heap. It will do nothing further. If we need to bail out for policy 72 // reasons, scavenge before full gc, or any other specialized behavior, it 73 // needs to be added here. 74 // 75 // Note that this method should only be called from the vm_thread while 76 // at a safepoint! 77 // 78 // Note that the all_soft_refs_clear flag in the collector policy 79 // may be true because this method can be called without intervening 80 // activity. For example when the heap space is tight and full measure 81 // are being taken to free space. 82 83 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 84 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 85 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 86 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 87 88 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 89 GCCause::Cause gc_cause = heap->gc_cause(); 90 PSAdaptiveSizePolicy* policy = heap->size_policy(); 91 IsGCActiveMark mark; 92 93 if (ScavengeBeforeFullGC) { 94 PSScavenge::invoke_no_policy(); 95 } 96 97 const bool clear_all_soft_refs = 98 heap->collector_policy()->should_clear_all_soft_refs(); 99 100 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; 101 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 102 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); 103 } 104 105 // This method contains no policy. You should probably 106 // be calling invoke() instead. 107 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 108 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 109 assert(ref_processor() != NULL, "Sanity"); 110 111 if (GC_locker::check_active_before_gc()) { 112 return false; 113 } 114 115 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 116 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 117 GCCause::Cause gc_cause = heap->gc_cause(); 118 119 _gc_timer->register_gc_start(os::elapsed_counter()); 120 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); 121 122 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 123 124 // The scope of casr should end after code that can change 125 // CollectorPolicy::_should_clear_all_soft_refs. 126 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); 127 128 PSYoungGen* young_gen = heap->young_gen(); 129 PSOldGen* old_gen = heap->old_gen(); 130 PSPermGen* perm_gen = heap->perm_gen(); 131 132 // Increment the invocation count 133 heap->increment_total_collections(true /* full */); 134 135 // Save information needed to minimize mangling 136 heap->record_gen_tops_before_GC(); 137 138 // We need to track unique mark sweep invocations as well. 139 _total_invocations++; 140 141 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 142 143 heap->print_heap_before_gc(); 144 heap->trace_heap_before_gc(_gc_tracer); 145 146 // Fill in TLABs 147 heap->accumulate_statistics_all_tlabs(); 148 heap->ensure_parsability(true); // retire TLABs 149 150 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 151 HandleMark hm; // Discard invalid handles created during verification 152 gclog_or_tty->print(" VerifyBeforeGC:"); 153 Universe::verify(); 154 } 155 156 // Verify object start arrays 157 if (VerifyObjectStartArray && 158 VerifyBeforeGC) { 159 old_gen->verify_object_start_array(); 160 perm_gen->verify_object_start_array(); 161 } 162 163 heap->pre_full_gc_dump(_gc_timer); 164 165 // Filled in below to track the state of the young gen after the collection. 166 bool eden_empty; 167 bool survivors_empty; 168 bool young_gen_empty; 169 170 { 171 HandleMark hm; 172 173 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 174 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 175 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); 176 TraceCollectorStats tcs(counters()); 177 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); 178 179 if (TraceGen1Time) accumulated_time()->start(); 180 181 // Let the size policy know we're starting 182 size_policy->major_collection_begin(); 183 184 // When collecting the permanent generation methodOops may be moving, 185 // so we either have to flush all bcp data or convert it into bci. 186 CodeCache::gc_prologue(); 187 Threads::gc_prologue(); 188 BiasedLocking::preserve_marks(); 189 190 // Capture heap size before collection for printing. 191 size_t prev_used = heap->used(); 192 193 // Capture perm gen size before collection for sizing. 194 size_t perm_gen_prev_used = perm_gen->used_in_bytes(); 195 196 // For PrintGCDetails 197 size_t old_gen_prev_used = old_gen->used_in_bytes(); 198 size_t young_gen_prev_used = young_gen->used_in_bytes(); 199 200 allocate_stacks(); 201 202 COMPILER2_PRESENT(DerivedPointerTable::clear()); 203 204 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 205 ref_processor()->setup_policy(clear_all_softrefs); 206 207 mark_sweep_phase1(clear_all_softrefs); 208 209 mark_sweep_phase2(); 210 211 // Don't add any more derived pointers during phase3 212 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 213 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 214 215 mark_sweep_phase3(); 216 217 mark_sweep_phase4(); 218 219 restore_marks(); 220 221 deallocate_stacks(); 222 223 if (ZapUnusedHeapArea) { 224 // Do a complete mangle (top to end) because the usage for 225 // scratch does not maintain a top pointer. 226 young_gen->to_space()->mangle_unused_area_complete(); 227 } 228 229 eden_empty = young_gen->eden_space()->is_empty(); 230 if (!eden_empty) { 231 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 232 } 233 234 // Update heap occupancy information which is used as 235 // input to soft ref clearing policy at the next gc. 236 Universe::update_heap_info_at_gc(); 237 238 survivors_empty = young_gen->from_space()->is_empty() && 239 young_gen->to_space()->is_empty(); 240 young_gen_empty = eden_empty && survivors_empty; 241 242 BarrierSet* bs = heap->barrier_set(); 243 if (bs->is_a(BarrierSet::ModRef)) { 244 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; 245 MemRegion old_mr = heap->old_gen()->reserved(); 246 MemRegion perm_mr = heap->perm_gen()->reserved(); 247 assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); 248 249 if (young_gen_empty) { 250 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); 251 } else { 252 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); 253 } 254 } 255 256 BiasedLocking::restore_marks(); 257 Threads::gc_epilogue(); 258 CodeCache::gc_epilogue(); 259 JvmtiExport::gc_epilogue(); 260 261 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 262 263 ref_processor()->enqueue_discovered_references(NULL); 264 265 // Update time of last GC 266 reset_millis_since_last_gc(); 267 268 // Let the size policy know we're done 269 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 270 271 if (UseAdaptiveSizePolicy) { 272 273 if (PrintAdaptiveSizePolicy) { 274 gclog_or_tty->print("AdaptiveSizeStart: "); 275 gclog_or_tty->stamp(); 276 gclog_or_tty->print_cr(" collection: %d ", 277 heap->total_collections()); 278 if (Verbose) { 279 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 280 " perm_gen_capacity: %d ", 281 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 282 perm_gen->capacity_in_bytes()); 283 } 284 } 285 286 // Don't check if the size_policy is ready here. Let 287 // the size_policy check that internally. 288 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 289 ((gc_cause != GCCause::_java_lang_system_gc) || 290 UseAdaptiveSizePolicyWithSystemGC)) { 291 // Calculate optimal free space amounts 292 assert(young_gen->max_size() > 293 young_gen->from_space()->capacity_in_bytes() + 294 young_gen->to_space()->capacity_in_bytes(), 295 "Sizes of space in young gen are out-of-bounds"); 296 size_t max_eden_size = young_gen->max_size() - 297 young_gen->from_space()->capacity_in_bytes() - 298 young_gen->to_space()->capacity_in_bytes(); 299 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 300 young_gen->eden_space()->used_in_bytes(), 301 old_gen->used_in_bytes(), 302 perm_gen->used_in_bytes(), 303 young_gen->eden_space()->capacity_in_bytes(), 304 old_gen->max_gen_size(), 305 max_eden_size, 306 true /* full gc*/, 307 gc_cause, 308 heap->collector_policy()); 309 310 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 311 312 // Don't resize the young generation at an major collection. A 313 // desired young generation size may have been calculated but 314 // resizing the young generation complicates the code because the 315 // resizing of the old generation may have moved the boundary 316 // between the young generation and the old generation. Let the 317 // young generation resizing happen at the minor collections. 318 } 319 if (PrintAdaptiveSizePolicy) { 320 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 321 heap->total_collections()); 322 } 323 } 324 325 if (UsePerfData) { 326 heap->gc_policy_counters()->update_counters(); 327 heap->gc_policy_counters()->update_old_capacity( 328 old_gen->capacity_in_bytes()); 329 heap->gc_policy_counters()->update_young_capacity( 330 young_gen->capacity_in_bytes()); 331 } 332 333 heap->resize_all_tlabs(); 334 335 // We collected the perm gen, so we'll resize it here. 336 perm_gen->compute_new_size(perm_gen_prev_used); 337 338 if (TraceGen1Time) accumulated_time()->stop(); 339 340 if (PrintGC) { 341 if (PrintGCDetails) { 342 // Don't print a GC timestamp here. This is after the GC so 343 // would be confusing. 344 young_gen->print_used_change(young_gen_prev_used); 345 old_gen->print_used_change(old_gen_prev_used); 346 } 347 heap->print_heap_change(prev_used); 348 // Do perm gen after heap becase prev_used does 349 // not include the perm gen (done this way in the other 350 // collectors). 351 if (PrintGCDetails) { 352 perm_gen->print_used_change(perm_gen_prev_used); 353 } 354 } 355 356 // Track memory usage and detect low memory 357 MemoryService::track_memory_usage(); 358 heap->update_counters(); 359 } 360 361 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 362 HandleMark hm; // Discard invalid handles created during verification 363 gclog_or_tty->print(" VerifyAfterGC:"); 364 Universe::verify(); 365 } 366 367 // Re-verify object start arrays 368 if (VerifyObjectStartArray && 369 VerifyAfterGC) { 370 old_gen->verify_object_start_array(); 371 perm_gen->verify_object_start_array(); 372 } 373 374 if (ZapUnusedHeapArea) { 375 old_gen->object_space()->check_mangled_unused_area_complete(); 376 perm_gen->object_space()->check_mangled_unused_area_complete(); 377 } 378 379 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 380 381 heap->print_heap_after_gc(); 382 heap->trace_heap_after_gc(_gc_tracer); 383 384 heap->post_full_gc_dump(_gc_timer); 385 386 #ifdef TRACESPINNING 387 ParallelTaskTerminator::print_termination_counts(); 388 #endif 389 390 _gc_timer->register_gc_end(os::elapsed_counter()); 391 392 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 393 394 return true; 395 } 396 397 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 398 PSYoungGen* young_gen, 399 PSOldGen* old_gen) { 400 MutableSpace* const eden_space = young_gen->eden_space(); 401 assert(!eden_space->is_empty(), "eden must be non-empty"); 402 assert(young_gen->virtual_space()->alignment() == 403 old_gen->virtual_space()->alignment(), "alignments do not match"); 404 405 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 406 return false; 407 } 408 409 // Both generations must be completely committed. 410 if (young_gen->virtual_space()->uncommitted_size() != 0) { 411 return false; 412 } 413 if (old_gen->virtual_space()->uncommitted_size() != 0) { 414 return false; 415 } 416 417 // Figure out how much to take from eden. Include the average amount promoted 418 // in the total; otherwise the next young gen GC will simply bail out to a 419 // full GC. 420 const size_t alignment = old_gen->virtual_space()->alignment(); 421 const size_t eden_used = eden_space->used_in_bytes(); 422 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 423 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); 424 const size_t eden_capacity = eden_space->capacity_in_bytes(); 425 426 if (absorb_size >= eden_capacity) { 427 return false; // Must leave some space in eden. 428 } 429 430 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 431 if (new_young_size < young_gen->min_gen_size()) { 432 return false; // Respect young gen minimum size. 433 } 434 435 if (TraceAdaptiveGCBoundary && Verbose) { 436 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " 437 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 438 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 439 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 440 absorb_size / K, 441 eden_capacity / K, (eden_capacity - absorb_size) / K, 442 young_gen->from_space()->used_in_bytes() / K, 443 young_gen->to_space()->used_in_bytes() / K, 444 young_gen->capacity_in_bytes() / K, new_young_size / K); 445 } 446 447 // Fill the unused part of the old gen. 448 MutableSpace* const old_space = old_gen->object_space(); 449 HeapWord* const unused_start = old_space->top(); 450 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 451 452 if (unused_words > 0) { 453 if (unused_words < CollectedHeap::min_fill_size()) { 454 return false; // If the old gen cannot be filled, must give up. 455 } 456 CollectedHeap::fill_with_objects(unused_start, unused_words); 457 } 458 459 // Take the live data from eden and set both top and end in the old gen to 460 // eden top. (Need to set end because reset_after_change() mangles the region 461 // from end to virtual_space->high() in debug builds). 462 HeapWord* const new_top = eden_space->top(); 463 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 464 absorb_size); 465 young_gen->reset_after_change(); 466 old_space->set_top(new_top); 467 old_space->set_end(new_top); 468 old_gen->reset_after_change(); 469 470 // Update the object start array for the filler object and the data from eden. 471 ObjectStartArray* const start_array = old_gen->start_array(); 472 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 473 start_array->allocate_block(p); 474 } 475 476 // Could update the promoted average here, but it is not typically updated at 477 // full GCs and the value to use is unclear. Something like 478 // 479 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 480 481 size_policy->set_bytes_absorbed_from_eden(absorb_size); 482 return true; 483 } 484 485 void PSMarkSweep::allocate_stacks() { 486 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 487 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 488 489 PSYoungGen* young_gen = heap->young_gen(); 490 491 MutableSpace* to_space = young_gen->to_space(); 492 _preserved_marks = (PreservedMark*)to_space->top(); 493 _preserved_count = 0; 494 495 // We want to calculate the size in bytes first. 496 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 497 // Now divide by the size of a PreservedMark 498 _preserved_count_max /= sizeof(PreservedMark); 499 } 500 501 502 void PSMarkSweep::deallocate_stacks() { 503 _preserved_mark_stack.clear(true); 504 _preserved_oop_stack.clear(true); 505 _marking_stack.clear(); 506 _objarray_stack.clear(true); 507 _revisit_klass_stack.clear(true); 508 _revisit_mdo_stack.clear(true); 509 } 510 511 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 512 // Recursively traverse all live objects and mark them 513 GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); 514 trace(" 1"); 515 516 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 517 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 518 519 // General strong roots. 520 { 521 ParallelScavengeHeap::ParStrongRootsScope psrs; 522 Universe::oops_do(mark_and_push_closure()); 523 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 524 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true); 525 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob); 526 ObjectSynchronizer::oops_do(mark_and_push_closure()); 527 FlatProfiler::oops_do(mark_and_push_closure()); 528 Management::oops_do(mark_and_push_closure()); 529 JvmtiExport::oops_do(mark_and_push_closure()); 530 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); 531 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. 532 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); 533 } 534 535 // Flush marking stack. 536 follow_stack(); 537 538 // Process reference objects found during marking 539 { 540 ref_processor()->setup_policy(clear_all_softrefs); 541 const ReferenceProcessorStats& stats = 542 ref_processor()->process_discovered_references( 543 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); 544 gc_tracer()->report_gc_reference_stats(stats); 545 } 546 547 // Follow system dictionary roots and unload classes 548 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); 549 550 // Follow code cache roots 551 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), 552 purged_class); 553 follow_stack(); // Flush marking stack 554 555 // Update subklass/sibling/implementor links of live klasses 556 follow_weak_klass_links(); 557 assert(_marking_stack.is_empty(), "just drained"); 558 559 // Visit memoized mdo's and clear unmarked weak refs 560 follow_mdo_weak_refs(); 561 assert(_marking_stack.is_empty(), "just drained"); 562 563 // Visit interned string tables and delete unmarked oops 564 StringTable::unlink(is_alive_closure()); 565 // Clean up unreferenced symbols in symbol table. 566 SymbolTable::unlink(); 567 568 assert(_marking_stack.is_empty(), "stack should be empty by now"); 569 } 570 571 572 void PSMarkSweep::mark_sweep_phase2() { 573 GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer); 574 trace("2"); 575 576 // Now all live objects are marked, compute the new object addresses. 577 578 // It is imperative that we traverse perm_gen LAST. If dead space is 579 // allowed a range of dead object may get overwritten by a dead int 580 // array. If perm_gen is not traversed last a klassOop may get 581 // overwritten. This is fine since it is dead, but if the class has dead 582 // instances we have to skip them, and in order to find their size we 583 // need the klassOop! 584 // 585 // It is not required that we traverse spaces in the same order in 586 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 587 // tracking expects us to do so. See comment under phase4. 588 589 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 590 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 591 592 PSOldGen* old_gen = heap->old_gen(); 593 PSPermGen* perm_gen = heap->perm_gen(); 594 595 // Begin compacting into the old gen 596 PSMarkSweepDecorator::set_destination_decorator_tenured(); 597 598 // This will also compact the young gen spaces. 599 old_gen->precompact(); 600 601 // Compact the perm gen into the perm gen 602 PSMarkSweepDecorator::set_destination_decorator_perm_gen(); 603 604 perm_gen->precompact(); 605 } 606 607 // This should be moved to the shared markSweep code! 608 class PSAlwaysTrueClosure: public BoolObjectClosure { 609 public: 610 void do_object(oop p) { ShouldNotReachHere(); } 611 bool do_object_b(oop p) { return true; } 612 }; 613 static PSAlwaysTrueClosure always_true; 614 615 void PSMarkSweep::mark_sweep_phase3() { 616 // Adjust the pointers to reflect the new locations 617 GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer); 618 trace("3"); 619 620 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 621 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 622 623 PSYoungGen* young_gen = heap->young_gen(); 624 PSOldGen* old_gen = heap->old_gen(); 625 PSPermGen* perm_gen = heap->perm_gen(); 626 627 // General strong roots. 628 Universe::oops_do(adjust_root_pointer_closure()); 629 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles 630 Threads::oops_do(adjust_root_pointer_closure(), NULL); 631 ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); 632 FlatProfiler::oops_do(adjust_root_pointer_closure()); 633 Management::oops_do(adjust_root_pointer_closure()); 634 JvmtiExport::oops_do(adjust_root_pointer_closure()); 635 // SO_AllClasses 636 SystemDictionary::oops_do(adjust_root_pointer_closure()); 637 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); 638 639 // Now adjust pointers in remaining weak roots. (All of which should 640 // have been cleared if they pointed to non-surviving objects.) 641 // Global (weak) JNI handles 642 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); 643 644 CodeCache::oops_do(adjust_pointer_closure()); 645 StringTable::oops_do(adjust_root_pointer_closure()); 646 ref_processor()->weak_oops_do(adjust_root_pointer_closure()); 647 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); 648 649 adjust_marks(); 650 651 young_gen->adjust_pointers(); 652 old_gen->adjust_pointers(); 653 perm_gen->adjust_pointers(); 654 } 655 656 void PSMarkSweep::mark_sweep_phase4() { 657 EventMark m("4 compact heap"); 658 GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer); 659 trace("4"); 660 661 // All pointers are now adjusted, move objects accordingly 662 663 // It is imperative that we traverse perm_gen first in phase4. All 664 // classes must be allocated earlier than their instances, and traversing 665 // perm_gen first makes sure that all klassOops have moved to their new 666 // location before any instance does a dispatch through it's klass! 667 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 668 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 669 670 PSYoungGen* young_gen = heap->young_gen(); 671 PSOldGen* old_gen = heap->old_gen(); 672 PSPermGen* perm_gen = heap->perm_gen(); 673 674 perm_gen->compact(); 675 old_gen->compact(); 676 young_gen->compact(); 677 } 678 679 jlong PSMarkSweep::millis_since_last_gc() { 680 // We need a monotonically non-deccreasing time in ms but 681 // os::javaTimeMillis() does not guarantee monotonicity. 682 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 683 jlong ret_val = now - _time_of_last_gc; 684 // XXX See note in genCollectedHeap::millis_since_last_gc(). 685 if (ret_val < 0) { 686 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);) 687 return 0; 688 } 689 return ret_val; 690 } 691 692 void PSMarkSweep::reset_millis_since_last_gc() { 693 // We need a monotonically non-deccreasing time in ms but 694 // os::javaTimeMillis() does not guarantee monotonicity. 695 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 696 }