1 2 /* 3 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "code/codeCache.hpp" 30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" 34 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 35 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 36 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 37 #include "gc_implementation/shared/gcHeapSummary.hpp" 38 #include "gc_implementation/shared/gcTimer.hpp" 39 #include "gc_implementation/shared/gcTrace.hpp" 40 #include "gc_implementation/shared/gcTraceTime.hpp" 41 #include "gc_implementation/shared/isGCActiveMark.hpp" 42 #include "gc_implementation/shared/markSweep.hpp" 43 #include "gc_implementation/shared/spaceDecorator.hpp" 44 #include "gc_interface/gcCause.hpp" 45 #include "memory/gcLocker.inline.hpp" 46 #include "memory/referencePolicy.hpp" 47 #include "memory/referenceProcessor.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/biasedLocking.hpp" 50 #include "runtime/fprofiler.hpp" 51 #include "runtime/safepoint.hpp" 52 #include "runtime/vmThread.hpp" 53 #include "services/management.hpp" 54 #include "services/memoryService.hpp" 55 #include "utilities/events.hpp" 56 #include "utilities/stack.inline.hpp" 57 58 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 59 60 elapsedTimer PSMarkSweep::_accumulated_time; 61 jlong PSMarkSweep::_time_of_last_gc = 0; 62 CollectorCounters* PSMarkSweep::_counters = NULL; 63 64 void PSMarkSweep::initialize() { 65 MemRegion mr = Universe::heap()->reserved_region(); 66 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc 67 _counters = new CollectorCounters("PSMarkSweep", 1); 68 } 69 70 // This method contains all heap specific policy for invoking mark sweep. 71 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 72 // the heap. It will do nothing further. If we need to bail out for policy 73 // reasons, scavenge before full gc, or any other specialized behavior, it 74 // needs to be added here. 75 // 76 // Note that this method should only be called from the vm_thread while 77 // at a safepoint! 78 // 79 // Note that the all_soft_refs_clear flag in the collector policy 80 // may be true because this method can be called without intervening 81 // activity. For example when the heap space is tight and full measure 82 // are being taken to free space. 83 84 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 85 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 86 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 87 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 88 89 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 90 GCCause::Cause gc_cause = heap->gc_cause(); 91 PSAdaptiveSizePolicy* policy = heap->size_policy(); 92 IsGCActiveMark mark; 93 94 if (ScavengeBeforeFullGC) { 95 PSScavenge::invoke_no_policy(); 96 } 97 98 const bool clear_all_soft_refs = 99 heap->collector_policy()->should_clear_all_soft_refs(); 100 101 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; 102 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 103 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); 104 } 105 106 // This method contains no policy. You should probably 107 // be calling invoke() instead. 108 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 109 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 110 assert(ref_processor() != NULL, "Sanity"); 111 112 if (GC_locker::check_active_before_gc()) { 113 return false; 114 } 115 116 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 117 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 118 GCCause::Cause gc_cause = heap->gc_cause(); 119 120 _gc_timer->register_gc_start(); 121 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); 122 123 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 124 125 // The scope of casr should end after code that can change 126 // CollectorPolicy::_should_clear_all_soft_refs. 127 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); 128 129 PSYoungGen* young_gen = heap->young_gen(); 130 PSOldGen* old_gen = heap->old_gen(); 131 132 // Increment the invocation count 133 heap->increment_total_collections(true /* full */); 134 135 // Save information needed to minimize mangling 136 heap->record_gen_tops_before_GC(); 137 138 // We need to track unique mark sweep invocations as well. 139 _total_invocations++; 140 141 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 142 143 heap->print_heap_before_gc(); 144 heap->trace_heap_before_gc(_gc_tracer); 145 146 // Fill in TLABs 147 heap->accumulate_statistics_all_tlabs(); 148 heap->ensure_parsability(true); // retire TLABs 149 150 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 151 HandleMark hm; // Discard invalid handles created during verification 152 Universe::verify(" VerifyBeforeGC:"); 153 } 154 155 // Verify object start arrays 156 if (VerifyObjectStartArray && 157 VerifyBeforeGC) { 158 old_gen->verify_object_start_array(); 159 } 160 161 heap->pre_full_gc_dump(_gc_timer); 162 163 // Filled in below to track the state of the young gen after the collection. 164 bool eden_empty; 165 bool survivors_empty; 166 bool young_gen_empty; 167 168 { 169 HandleMark hm; 170 171 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 172 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 173 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); 174 TraceCollectorStats tcs(counters()); 175 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); 176 177 if (TraceOldGenTime) accumulated_time()->start(); 178 179 // Let the size policy know we're starting 180 size_policy->major_collection_begin(); 181 182 CodeCache::gc_prologue(); 183 Threads::gc_prologue(); 184 BiasedLocking::preserve_marks(); 185 186 // Capture heap size before collection for printing. 187 size_t prev_used = heap->used(); 188 189 // Capture metadata size before collection for sizing. 190 size_t metadata_prev_used = MetaspaceAux::used_bytes(); 191 192 // For PrintGCDetails 193 size_t old_gen_prev_used = old_gen->used_in_bytes(); 194 size_t young_gen_prev_used = young_gen->used_in_bytes(); 195 196 allocate_stacks(); 197 198 COMPILER2_PRESENT(DerivedPointerTable::clear()); 199 200 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 201 ref_processor()->setup_policy(clear_all_softrefs); 202 203 mark_sweep_phase1(clear_all_softrefs); 204 205 mark_sweep_phase2(); 206 207 // Don't add any more derived pointers during phase3 208 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 209 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 210 211 mark_sweep_phase3(); 212 213 mark_sweep_phase4(); 214 215 restore_marks(); 216 217 deallocate_stacks(); 218 219 if (ZapUnusedHeapArea) { 220 // Do a complete mangle (top to end) because the usage for 221 // scratch does not maintain a top pointer. 222 young_gen->to_space()->mangle_unused_area_complete(); 223 } 224 225 eden_empty = young_gen->eden_space()->is_empty(); 226 if (!eden_empty) { 227 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 228 } 229 230 // Update heap occupancy information which is used as 231 // input to soft ref clearing policy at the next gc. 232 Universe::update_heap_info_at_gc(); 233 234 survivors_empty = young_gen->from_space()->is_empty() && 235 young_gen->to_space()->is_empty(); 236 young_gen_empty = eden_empty && survivors_empty; 237 238 BarrierSet* bs = heap->barrier_set(); 239 if (bs->is_a(BarrierSet::ModRef)) { 240 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; 241 MemRegion old_mr = heap->old_gen()->reserved(); 242 if (young_gen_empty) { 243 modBS->clear(MemRegion(old_mr.start(), old_mr.end())); 244 } else { 245 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); 246 } 247 } 248 249 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 250 ClassLoaderDataGraph::purge(); 251 MetaspaceAux::verify_metrics(); 252 253 BiasedLocking::restore_marks(); 254 Threads::gc_epilogue(); 255 CodeCache::gc_epilogue(); 256 JvmtiExport::gc_epilogue(); 257 258 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 259 260 ref_processor()->enqueue_discovered_references(NULL); 261 262 // Update time of last GC 263 reset_millis_since_last_gc(); 264 265 // Let the size policy know we're done 266 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 267 268 if (UseAdaptiveSizePolicy) { 269 270 if (PrintAdaptiveSizePolicy) { 271 gclog_or_tty->print("AdaptiveSizeStart: "); 272 gclog_or_tty->stamp(); 273 gclog_or_tty->print_cr(" collection: %d ", 274 heap->total_collections()); 275 if (Verbose) { 276 gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT 277 " young_gen_capacity: " SIZE_FORMAT, 278 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 279 } 280 } 281 282 // Don't check if the size_policy is ready here. Let 283 // the size_policy check that internally. 284 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 285 ((gc_cause != GCCause::_java_lang_system_gc) || 286 UseAdaptiveSizePolicyWithSystemGC)) { 287 // Swap the survivor spaces if from_space is empty. The 288 // resize_young_gen() called below is normally used after 289 // a successful young GC and swapping of survivor spaces; 290 // otherwise, it will fail to resize the young gen with 291 // the current implementation. 292 if (young_gen->from_space()->is_empty()) { 293 young_gen->from_space()->clear(SpaceDecorator::Mangle); 294 young_gen->swap_spaces(); 295 } 296 297 // Calculate optimal free space amounts 298 assert(young_gen->max_size() > 299 young_gen->from_space()->capacity_in_bytes() + 300 young_gen->to_space()->capacity_in_bytes(), 301 "Sizes of space in young gen are out-of-bounds"); 302 303 size_t young_live = young_gen->used_in_bytes(); 304 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 305 size_t old_live = old_gen->used_in_bytes(); 306 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 307 size_t max_old_gen_size = old_gen->max_gen_size(); 308 size_t max_eden_size = young_gen->max_size() - 309 young_gen->from_space()->capacity_in_bytes() - 310 young_gen->to_space()->capacity_in_bytes(); 311 312 // Used for diagnostics 313 size_policy->clear_generation_free_space_flags(); 314 315 size_policy->compute_generations_free_space(young_live, 316 eden_live, 317 old_live, 318 cur_eden, 319 max_old_gen_size, 320 max_eden_size, 321 true /* full gc*/); 322 323 size_policy->check_gc_overhead_limit(young_live, 324 eden_live, 325 max_old_gen_size, 326 max_eden_size, 327 true /* full gc*/, 328 gc_cause, 329 heap->collector_policy()); 330 331 size_policy->decay_supplemental_growth(true /* full gc*/); 332 333 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 334 335 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 336 size_policy->calculated_survivor_size_in_bytes()); 337 } 338 if (PrintAdaptiveSizePolicy) { 339 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 340 heap->total_collections()); 341 } 342 } 343 344 if (UsePerfData) { 345 heap->gc_policy_counters()->update_counters(); 346 heap->gc_policy_counters()->update_old_capacity( 347 old_gen->capacity_in_bytes()); 348 heap->gc_policy_counters()->update_young_capacity( 349 young_gen->capacity_in_bytes()); 350 } 351 352 heap->resize_all_tlabs(); 353 354 // We collected the heap, recalculate the metaspace capacity 355 MetaspaceGC::compute_new_size(); 356 357 if (TraceOldGenTime) accumulated_time()->stop(); 358 359 if (PrintGC) { 360 if (PrintGCDetails) { 361 // Don't print a GC timestamp here. This is after the GC so 362 // would be confusing. 363 young_gen->print_used_change(young_gen_prev_used); 364 old_gen->print_used_change(old_gen_prev_used); 365 } 366 heap->print_heap_change(prev_used); 367 if (PrintGCDetails) { 368 MetaspaceAux::print_metaspace_change(metadata_prev_used); 369 } 370 } 371 372 // Track memory usage and detect low memory 373 MemoryService::track_memory_usage(); 374 heap->update_counters(); 375 } 376 377 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 378 HandleMark hm; // Discard invalid handles created during verification 379 Universe::verify(" VerifyAfterGC:"); 380 } 381 382 // Re-verify object start arrays 383 if (VerifyObjectStartArray && 384 VerifyAfterGC) { 385 old_gen->verify_object_start_array(); 386 } 387 388 if (ZapUnusedHeapArea) { 389 old_gen->object_space()->check_mangled_unused_area_complete(); 390 } 391 392 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 393 394 heap->print_heap_after_gc(); 395 heap->trace_heap_after_gc(_gc_tracer); 396 397 heap->post_full_gc_dump(_gc_timer); 398 399 #ifdef TRACESPINNING 400 ParallelTaskTerminator::print_termination_counts(); 401 #endif 402 403 _gc_timer->register_gc_end(); 404 405 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 406 407 return true; 408 } 409 410 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 411 PSYoungGen* young_gen, 412 PSOldGen* old_gen) { 413 MutableSpace* const eden_space = young_gen->eden_space(); 414 assert(!eden_space->is_empty(), "eden must be non-empty"); 415 assert(young_gen->virtual_space()->alignment() == 416 old_gen->virtual_space()->alignment(), "alignments do not match"); 417 418 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 419 return false; 420 } 421 422 // Both generations must be completely committed. 423 if (young_gen->virtual_space()->uncommitted_size() != 0) { 424 return false; 425 } 426 if (old_gen->virtual_space()->uncommitted_size() != 0) { 427 return false; 428 } 429 430 // Figure out how much to take from eden. Include the average amount promoted 431 // in the total; otherwise the next young gen GC will simply bail out to a 432 // full GC. 433 const size_t alignment = old_gen->virtual_space()->alignment(); 434 const size_t eden_used = eden_space->used_in_bytes(); 435 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 436 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); 437 const size_t eden_capacity = eden_space->capacity_in_bytes(); 438 439 if (absorb_size >= eden_capacity) { 440 return false; // Must leave some space in eden. 441 } 442 443 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 444 if (new_young_size < young_gen->min_gen_size()) { 445 return false; // Respect young gen minimum size. 446 } 447 448 if (TraceAdaptiveGCBoundary && Verbose) { 449 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " 450 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 451 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 452 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 453 absorb_size / K, 454 eden_capacity / K, (eden_capacity - absorb_size) / K, 455 young_gen->from_space()->used_in_bytes() / K, 456 young_gen->to_space()->used_in_bytes() / K, 457 young_gen->capacity_in_bytes() / K, new_young_size / K); 458 } 459 460 // Fill the unused part of the old gen. 461 MutableSpace* const old_space = old_gen->object_space(); 462 HeapWord* const unused_start = old_space->top(); 463 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 464 465 if (unused_words > 0) { 466 if (unused_words < CollectedHeap::min_fill_size()) { 467 return false; // If the old gen cannot be filled, must give up. 468 } 469 CollectedHeap::fill_with_objects(unused_start, unused_words); 470 } 471 472 // Take the live data from eden and set both top and end in the old gen to 473 // eden top. (Need to set end because reset_after_change() mangles the region 474 // from end to virtual_space->high() in debug builds). 475 HeapWord* const new_top = eden_space->top(); 476 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 477 absorb_size); 478 young_gen->reset_after_change(); 479 old_space->set_top(new_top); 480 old_space->set_end(new_top); 481 old_gen->reset_after_change(); 482 483 // Update the object start array for the filler object and the data from eden. 484 ObjectStartArray* const start_array = old_gen->start_array(); 485 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 486 start_array->allocate_block(p); 487 } 488 489 // Could update the promoted average here, but it is not typically updated at 490 // full GCs and the value to use is unclear. Something like 491 // 492 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 493 494 size_policy->set_bytes_absorbed_from_eden(absorb_size); 495 return true; 496 } 497 498 void PSMarkSweep::allocate_stacks() { 499 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 500 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 501 502 PSYoungGen* young_gen = heap->young_gen(); 503 504 MutableSpace* to_space = young_gen->to_space(); 505 _preserved_marks = (PreservedMark*)to_space->top(); 506 _preserved_count = 0; 507 508 // We want to calculate the size in bytes first. 509 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 510 // Now divide by the size of a PreservedMark 511 _preserved_count_max /= sizeof(PreservedMark); 512 } 513 514 515 void PSMarkSweep::deallocate_stacks() { 516 _preserved_mark_stack.clear(true); 517 _preserved_oop_stack.clear(true); 518 _marking_stack.clear(); 519 _objarray_stack.clear(true); 520 } 521 522 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 523 // Recursively traverse all live objects and mark them 524 GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 525 trace(" 1"); 526 527 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 528 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 529 530 // Need to clear claim bits before the tracing starts. 531 ClassLoaderDataGraph::clear_claimed_marks(); 532 533 // General strong roots. 534 { 535 ParallelScavengeHeap::ParStrongRootsScope psrs; 536 Universe::oops_do(mark_and_push_closure()); 537 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 538 CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure()); 539 MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); 540 Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob); 541 ObjectSynchronizer::oops_do(mark_and_push_closure()); 542 FlatProfiler::oops_do(mark_and_push_closure()); 543 Management::oops_do(mark_and_push_closure()); 544 JvmtiExport::oops_do(mark_and_push_closure()); 545 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); 546 ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); 547 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. 548 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); 549 } 550 551 // Flush marking stack. 552 follow_stack(); 553 554 // Process reference objects found during marking 555 { 556 ref_processor()->setup_policy(clear_all_softrefs); 557 const ReferenceProcessorStats& stats = 558 ref_processor()->process_discovered_references( 559 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); 560 gc_tracer()->report_gc_reference_stats(stats); 561 } 562 563 // This is the point where the entire marking should have completed. 564 assert(_marking_stack.is_empty(), "Marking should have completed"); 565 566 // Unload classes and purge the SystemDictionary. 567 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); 568 569 // Unload nmethods. 570 CodeCache::do_unloading(is_alive_closure(), purged_class); 571 572 // Prune dead klasses from subklass/sibling/implementor lists. 573 Klass::clean_weak_klass_links(is_alive_closure()); 574 575 // Delete entries for dead interned strings. 576 StringTable::unlink(is_alive_closure()); 577 578 // Clean up unreferenced symbols in symbol table. 579 SymbolTable::unlink(); 580 _gc_tracer->report_object_count_after_gc(is_alive_closure()); 581 } 582 583 584 void PSMarkSweep::mark_sweep_phase2() { 585 GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 586 trace("2"); 587 588 // Now all live objects are marked, compute the new object addresses. 589 590 // It is not required that we traverse spaces in the same order in 591 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 592 // tracking expects us to do so. See comment under phase4. 593 594 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 595 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 596 597 PSOldGen* old_gen = heap->old_gen(); 598 599 // Begin compacting into the old gen 600 PSMarkSweepDecorator::set_destination_decorator_tenured(); 601 602 // This will also compact the young gen spaces. 603 old_gen->precompact(); 604 } 605 606 // This should be moved to the shared markSweep code! 607 class PSAlwaysTrueClosure: public BoolObjectClosure { 608 public: 609 bool do_object_b(oop p) { return true; } 610 }; 611 static PSAlwaysTrueClosure always_true; 612 613 void PSMarkSweep::mark_sweep_phase3() { 614 // Adjust the pointers to reflect the new locations 615 GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 616 trace("3"); 617 618 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 619 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 620 621 PSYoungGen* young_gen = heap->young_gen(); 622 PSOldGen* old_gen = heap->old_gen(); 623 624 // Need to clear claim bits before the tracing starts. 625 ClassLoaderDataGraph::clear_claimed_marks(); 626 627 // General strong roots. 628 Universe::oops_do(adjust_pointer_closure()); 629 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles 630 CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); 631 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); 632 ObjectSynchronizer::oops_do(adjust_pointer_closure()); 633 FlatProfiler::oops_do(adjust_pointer_closure()); 634 Management::oops_do(adjust_pointer_closure()); 635 JvmtiExport::oops_do(adjust_pointer_closure()); 636 SystemDictionary::oops_do(adjust_pointer_closure()); 637 ClassLoaderDataGraph::cld_do(adjust_cld_closure()); 638 639 // Now adjust pointers in remaining weak roots. (All of which should 640 // have been cleared if they pointed to non-surviving objects.) 641 // Global (weak) JNI handles 642 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); 643 644 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); 645 CodeCache::blobs_do(&adjust_from_blobs); 646 StringTable::oops_do(adjust_pointer_closure()); 647 ref_processor()->weak_oops_do(adjust_pointer_closure()); 648 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); 649 650 adjust_marks(); 651 652 young_gen->adjust_pointers(); 653 old_gen->adjust_pointers(); 654 } 655 656 void PSMarkSweep::mark_sweep_phase4() { 657 EventMark m("4 compact heap"); 658 GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 659 trace("4"); 660 661 // All pointers are now adjusted, move objects accordingly 662 663 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 664 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 665 666 PSYoungGen* young_gen = heap->young_gen(); 667 PSOldGen* old_gen = heap->old_gen(); 668 669 old_gen->compact(); 670 young_gen->compact(); 671 } 672 673 jlong PSMarkSweep::millis_since_last_gc() { 674 // We need a monotonically non-decreasing time in ms but 675 // os::javaTimeMillis() does not guarantee monotonicity. 676 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 677 jlong ret_val = now - _time_of_last_gc; 678 // XXX See note in genCollectedHeap::millis_since_last_gc(). 679 if (ret_val < 0) { 680 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);) 681 return 0; 682 } 683 return ret_val; 684 } 685 686 void PSMarkSweep::reset_millis_since_last_gc() { 687 // We need a monotonically non-decreasing time in ms but 688 // os::javaTimeMillis() does not guarantee monotonicity. 689 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 690 }