1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 30 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 31 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 32 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" 33 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 34 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 35 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 36 #include "gc_implementation/shared/gcHeapSummary.hpp" 37 #include "gc_implementation/shared/gcTimer.hpp" 38 #include "gc_implementation/shared/gcTrace.hpp" 39 #include "gc_implementation/shared/gcTraceTime.hpp" 40 #include "gc_implementation/shared/isGCActiveMark.hpp" 41 #include "gc_implementation/shared/markSweep.hpp" 42 #include "gc_implementation/shared/spaceDecorator.hpp" 43 #include "gc_interface/gcCause.hpp" 44 #include "memory/gcLocker.inline.hpp" 45 #include "memory/referencePolicy.hpp" 46 #include "memory/referenceProcessor.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/biasedLocking.hpp" 49 #include "runtime/fprofiler.hpp" 50 #include "runtime/safepoint.hpp" 51 #include "runtime/vmThread.hpp" 52 #include "services/management.hpp" 53 #include "services/memoryService.hpp" 54 #include "utilities/events.hpp" 55 #include "utilities/stack.inline.hpp" 56 57 elapsedTimer PSMarkSweep::_accumulated_time; 58 jlong PSMarkSweep::_time_of_last_gc = 0; 59 CollectorCounters* PSMarkSweep::_counters = NULL; 60 61 void PSMarkSweep::initialize() { 62 MemRegion mr = ParallelScavengeHeap::heap()->reserved_region(); 63 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc 64 _counters = new CollectorCounters("PSMarkSweep", 1); 65 } 66 67 // This method contains all heap specific policy for invoking mark sweep. 68 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 69 // the heap. It will do nothing further. If we need to bail out for policy 70 // reasons, scavenge before full gc, or any other specialized behavior, it 71 // needs to be added here. 72 // 73 // Note that this method should only be called from the vm_thread while 74 // at a safepoint! 75 // 76 // Note that the all_soft_refs_clear flag in the collector policy 77 // may be true because this method can be called without intervening 78 // activity. For example when the heap space is tight and full measure 79 // are being taken to free space. 80 81 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 82 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 83 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 84 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 85 86 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 87 GCCause::Cause gc_cause = heap->gc_cause(); 88 PSAdaptiveSizePolicy* policy = heap->size_policy(); 89 IsGCActiveMark mark; 90 91 if (ScavengeBeforeFullGC) { 92 PSScavenge::invoke_no_policy(); 93 } 94 95 const bool clear_all_soft_refs = 96 heap->collector_policy()->should_clear_all_soft_refs(); 97 98 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; 99 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 100 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); 101 } 102 103 // This method contains no policy. You should probably 104 // be calling invoke() instead. 105 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 106 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 107 assert(ref_processor() != NULL, "Sanity"); 108 109 if (GC_locker::check_active_before_gc()) { 110 return false; 111 } 112 113 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 114 GCCause::Cause gc_cause = heap->gc_cause(); 115 116 _gc_timer->register_gc_start(); 117 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); 118 119 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 120 121 // The scope of casr should end after code that can change 122 // CollectorPolicy::_should_clear_all_soft_refs. 123 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); 124 125 PSYoungGen* young_gen = heap->young_gen(); 126 PSOldGen* old_gen = heap->old_gen(); 127 128 // Increment the invocation count 129 heap->increment_total_collections(true /* full */); 130 131 // Save information needed to minimize mangling 132 heap->record_gen_tops_before_GC(); 133 134 // We need to track unique mark sweep invocations as well. 135 _total_invocations++; 136 137 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 138 139 heap->print_heap_before_gc(); 140 heap->trace_heap_before_gc(_gc_tracer); 141 142 // Fill in TLABs 143 heap->accumulate_statistics_all_tlabs(); 144 heap->ensure_parsability(true); // retire TLABs 145 146 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 147 HandleMark hm; // Discard invalid handles created during verification 148 Universe::verify(" VerifyBeforeGC:"); 149 } 150 151 // Verify object start arrays 152 if (VerifyObjectStartArray && 153 VerifyBeforeGC) { 154 old_gen->verify_object_start_array(); 155 } 156 157 heap->pre_full_gc_dump(_gc_timer); 158 159 // Filled in below to track the state of the young gen after the collection. 160 bool eden_empty; 161 bool survivors_empty; 162 bool young_gen_empty; 163 164 { 165 HandleMark hm; 166 167 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 168 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); 169 TraceCollectorStats tcs(counters()); 170 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); 171 172 if (TraceOldGenTime) accumulated_time()->start(); 173 174 // Let the size policy know we're starting 175 size_policy->major_collection_begin(); 176 177 CodeCache::gc_prologue(); 178 BiasedLocking::preserve_marks(); 179 180 // Capture heap size before collection for printing. 181 size_t prev_used = heap->used(); 182 183 // Capture metadata size before collection for sizing. 184 size_t metadata_prev_used = MetaspaceAux::used_bytes(); 185 186 // For PrintGCDetails 187 size_t old_gen_prev_used = old_gen->used_in_bytes(); 188 size_t young_gen_prev_used = young_gen->used_in_bytes(); 189 190 allocate_stacks(); 191 192 COMPILER2_PRESENT(DerivedPointerTable::clear()); 193 194 ref_processor()->enable_discovery(); 195 ref_processor()->setup_policy(clear_all_softrefs); 196 197 mark_sweep_phase1(clear_all_softrefs); 198 199 mark_sweep_phase2(); 200 201 // Don't add any more derived pointers during phase3 202 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 203 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 204 205 mark_sweep_phase3(); 206 207 mark_sweep_phase4(); 208 209 restore_marks(); 210 211 deallocate_stacks(); 212 213 if (ZapUnusedHeapArea) { 214 // Do a complete mangle (top to end) because the usage for 215 // scratch does not maintain a top pointer. 216 young_gen->to_space()->mangle_unused_area_complete(); 217 } 218 219 eden_empty = young_gen->eden_space()->is_empty(); 220 if (!eden_empty) { 221 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 222 } 223 224 // Update heap occupancy information which is used as 225 // input to soft ref clearing policy at the next gc. 226 Universe::update_heap_info_at_gc(); 227 228 survivors_empty = young_gen->from_space()->is_empty() && 229 young_gen->to_space()->is_empty(); 230 young_gen_empty = eden_empty && survivors_empty; 231 232 ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set()); 233 MemRegion old_mr = heap->old_gen()->reserved(); 234 if (young_gen_empty) { 235 modBS->clear(MemRegion(old_mr.start(), old_mr.end())); 236 } else { 237 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); 238 } 239 240 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 241 ClassLoaderDataGraph::purge(); 242 MetaspaceAux::verify_metrics(); 243 244 BiasedLocking::restore_marks(); 245 CodeCache::gc_epilogue(); 246 JvmtiExport::gc_epilogue(); 247 248 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 249 250 ref_processor()->enqueue_discovered_references(NULL); 251 252 // Update time of last GC 253 reset_millis_since_last_gc(); 254 255 // Let the size policy know we're done 256 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 257 258 if (UseAdaptiveSizePolicy) { 259 260 if (PrintAdaptiveSizePolicy) { 261 gclog_or_tty->print("AdaptiveSizeStart: "); 262 gclog_or_tty->stamp(); 263 gclog_or_tty->print_cr(" collection: %d ", 264 heap->total_collections()); 265 if (Verbose) { 266 gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT 267 " young_gen_capacity: " SIZE_FORMAT, 268 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 269 } 270 } 271 272 // Don't check if the size_policy is ready here. Let 273 // the size_policy check that internally. 274 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 275 ((gc_cause != GCCause::_java_lang_system_gc) || 276 UseAdaptiveSizePolicyWithSystemGC)) { 277 // Swap the survivor spaces if from_space is empty. The 278 // resize_young_gen() called below is normally used after 279 // a successful young GC and swapping of survivor spaces; 280 // otherwise, it will fail to resize the young gen with 281 // the current implementation. 282 if (young_gen->from_space()->is_empty()) { 283 young_gen->from_space()->clear(SpaceDecorator::Mangle); 284 young_gen->swap_spaces(); 285 } 286 287 // Calculate optimal free space amounts 288 assert(young_gen->max_size() > 289 young_gen->from_space()->capacity_in_bytes() + 290 young_gen->to_space()->capacity_in_bytes(), 291 "Sizes of space in young gen are out-of-bounds"); 292 293 size_t young_live = young_gen->used_in_bytes(); 294 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 295 size_t old_live = old_gen->used_in_bytes(); 296 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 297 size_t max_old_gen_size = old_gen->max_gen_size(); 298 size_t max_eden_size = young_gen->max_size() - 299 young_gen->from_space()->capacity_in_bytes() - 300 young_gen->to_space()->capacity_in_bytes(); 301 302 // Used for diagnostics 303 size_policy->clear_generation_free_space_flags(); 304 305 size_policy->compute_generations_free_space(young_live, 306 eden_live, 307 old_live, 308 cur_eden, 309 max_old_gen_size, 310 max_eden_size, 311 true /* full gc*/); 312 313 size_policy->check_gc_overhead_limit(young_live, 314 eden_live, 315 max_old_gen_size, 316 max_eden_size, 317 true /* full gc*/, 318 gc_cause, 319 heap->collector_policy()); 320 321 size_policy->decay_supplemental_growth(true /* full gc*/); 322 323 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 324 325 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 326 size_policy->calculated_survivor_size_in_bytes()); 327 } 328 if (PrintAdaptiveSizePolicy) { 329 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 330 heap->total_collections()); 331 } 332 } 333 334 if (UsePerfData) { 335 heap->gc_policy_counters()->update_counters(); 336 heap->gc_policy_counters()->update_old_capacity( 337 old_gen->capacity_in_bytes()); 338 heap->gc_policy_counters()->update_young_capacity( 339 young_gen->capacity_in_bytes()); 340 } 341 342 heap->resize_all_tlabs(); 343 344 // We collected the heap, recalculate the metaspace capacity 345 MetaspaceGC::compute_new_size(); 346 347 if (TraceOldGenTime) accumulated_time()->stop(); 348 349 if (PrintGC) { 350 if (PrintGCDetails) { 351 // Don't print a GC timestamp here. This is after the GC so 352 // would be confusing. 353 young_gen->print_used_change(young_gen_prev_used); 354 old_gen->print_used_change(old_gen_prev_used); 355 } 356 heap->print_heap_change(prev_used); 357 if (PrintGCDetails) { 358 MetaspaceAux::print_metaspace_change(metadata_prev_used); 359 } 360 } 361 362 // Track memory usage and detect low memory 363 MemoryService::track_memory_usage(); 364 heap->update_counters(); 365 } 366 367 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 368 HandleMark hm; // Discard invalid handles created during verification 369 Universe::verify(" VerifyAfterGC:"); 370 } 371 372 // Re-verify object start arrays 373 if (VerifyObjectStartArray && 374 VerifyAfterGC) { 375 old_gen->verify_object_start_array(); 376 } 377 378 if (ZapUnusedHeapArea) { 379 old_gen->object_space()->check_mangled_unused_area_complete(); 380 } 381 382 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 383 384 heap->print_heap_after_gc(); 385 heap->trace_heap_after_gc(_gc_tracer); 386 387 heap->post_full_gc_dump(_gc_timer); 388 389 #ifdef TRACESPINNING 390 ParallelTaskTerminator::print_termination_counts(); 391 #endif 392 393 _gc_timer->register_gc_end(); 394 395 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 396 397 return true; 398 } 399 400 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 401 PSYoungGen* young_gen, 402 PSOldGen* old_gen) { 403 MutableSpace* const eden_space = young_gen->eden_space(); 404 assert(!eden_space->is_empty(), "eden must be non-empty"); 405 assert(young_gen->virtual_space()->alignment() == 406 old_gen->virtual_space()->alignment(), "alignments do not match"); 407 408 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 409 return false; 410 } 411 412 // Both generations must be completely committed. 413 if (young_gen->virtual_space()->uncommitted_size() != 0) { 414 return false; 415 } 416 if (old_gen->virtual_space()->uncommitted_size() != 0) { 417 return false; 418 } 419 420 // Figure out how much to take from eden. Include the average amount promoted 421 // in the total; otherwise the next young gen GC will simply bail out to a 422 // full GC. 423 const size_t alignment = old_gen->virtual_space()->alignment(); 424 const size_t eden_used = eden_space->used_in_bytes(); 425 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 426 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); 427 const size_t eden_capacity = eden_space->capacity_in_bytes(); 428 429 if (absorb_size >= eden_capacity) { 430 return false; // Must leave some space in eden. 431 } 432 433 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 434 if (new_young_size < young_gen->min_gen_size()) { 435 return false; // Respect young gen minimum size. 436 } 437 438 if (TraceAdaptiveGCBoundary && Verbose) { 439 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " 440 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 441 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 442 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 443 absorb_size / K, 444 eden_capacity / K, (eden_capacity - absorb_size) / K, 445 young_gen->from_space()->used_in_bytes() / K, 446 young_gen->to_space()->used_in_bytes() / K, 447 young_gen->capacity_in_bytes() / K, new_young_size / K); 448 } 449 450 // Fill the unused part of the old gen. 451 MutableSpace* const old_space = old_gen->object_space(); 452 HeapWord* const unused_start = old_space->top(); 453 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 454 455 if (unused_words > 0) { 456 if (unused_words < CollectedHeap::min_fill_size()) { 457 return false; // If the old gen cannot be filled, must give up. 458 } 459 CollectedHeap::fill_with_objects(unused_start, unused_words); 460 } 461 462 // Take the live data from eden and set both top and end in the old gen to 463 // eden top. (Need to set end because reset_after_change() mangles the region 464 // from end to virtual_space->high() in debug builds). 465 HeapWord* const new_top = eden_space->top(); 466 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 467 absorb_size); 468 young_gen->reset_after_change(); 469 old_space->set_top(new_top); 470 old_space->set_end(new_top); 471 old_gen->reset_after_change(); 472 473 // Update the object start array for the filler object and the data from eden. 474 ObjectStartArray* const start_array = old_gen->start_array(); 475 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 476 start_array->allocate_block(p); 477 } 478 479 // Could update the promoted average here, but it is not typically updated at 480 // full GCs and the value to use is unclear. Something like 481 // 482 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 483 484 size_policy->set_bytes_absorbed_from_eden(absorb_size); 485 return true; 486 } 487 488 void PSMarkSweep::allocate_stacks() { 489 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 490 PSYoungGen* young_gen = heap->young_gen(); 491 492 MutableSpace* to_space = young_gen->to_space(); 493 _preserved_marks = (PreservedMark*)to_space->top(); 494 _preserved_count = 0; 495 496 // We want to calculate the size in bytes first. 497 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 498 // Now divide by the size of a PreservedMark 499 _preserved_count_max /= sizeof(PreservedMark); 500 } 501 502 503 void PSMarkSweep::deallocate_stacks() { 504 _preserved_mark_stack.clear(true); 505 _preserved_oop_stack.clear(true); 506 _marking_stack.clear(); 507 _objarray_stack.clear(true); 508 } 509 510 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 511 // Recursively traverse all live objects and mark them 512 GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 513 514 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 515 516 // Need to clear claim bits before the tracing starts. 517 ClassLoaderDataGraph::clear_claimed_marks(); 518 519 // General strong roots. 520 { 521 ParallelScavengeHeap::ParStrongRootsScope psrs; 522 Universe::oops_do(mark_and_push_closure()); 523 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 524 CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure()); 525 MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); 526 Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob); 527 ObjectSynchronizer::oops_do(mark_and_push_closure()); 528 FlatProfiler::oops_do(mark_and_push_closure()); 529 Management::oops_do(mark_and_push_closure()); 530 JvmtiExport::oops_do(mark_and_push_closure()); 531 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); 532 ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); 533 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. 534 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); 535 } 536 537 // Flush marking stack. 538 follow_stack(); 539 540 // Process reference objects found during marking 541 { 542 ref_processor()->setup_policy(clear_all_softrefs); 543 const ReferenceProcessorStats& stats = 544 ref_processor()->process_discovered_references( 545 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); 546 gc_tracer()->report_gc_reference_stats(stats); 547 } 548 549 // This is the point where the entire marking should have completed. 550 assert(_marking_stack.is_empty(), "Marking should have completed"); 551 552 // Unload classes and purge the SystemDictionary. 553 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); 554 555 // Unload nmethods. 556 CodeCache::do_unloading(is_alive_closure(), purged_class); 557 558 // Prune dead klasses from subklass/sibling/implementor lists. 559 Klass::clean_weak_klass_links(is_alive_closure()); 560 561 // Delete entries for dead interned strings. 562 StringTable::unlink(is_alive_closure()); 563 564 // Clean up unreferenced symbols in symbol table. 565 SymbolTable::unlink(); 566 _gc_tracer->report_object_count_after_gc(is_alive_closure()); 567 } 568 569 570 void PSMarkSweep::mark_sweep_phase2() { 571 GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 572 573 // Now all live objects are marked, compute the new object addresses. 574 575 // It is not required that we traverse spaces in the same order in 576 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 577 // tracking expects us to do so. See comment under phase4. 578 579 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 580 PSOldGen* old_gen = heap->old_gen(); 581 582 // Begin compacting into the old gen 583 PSMarkSweepDecorator::set_destination_decorator_tenured(); 584 585 // This will also compact the young gen spaces. 586 old_gen->precompact(); 587 } 588 589 // This should be moved to the shared markSweep code! 590 class PSAlwaysTrueClosure: public BoolObjectClosure { 591 public: 592 bool do_object_b(oop p) { return true; } 593 }; 594 static PSAlwaysTrueClosure always_true; 595 596 void PSMarkSweep::mark_sweep_phase3() { 597 // Adjust the pointers to reflect the new locations 598 GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 599 600 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 601 PSYoungGen* young_gen = heap->young_gen(); 602 PSOldGen* old_gen = heap->old_gen(); 603 604 // Need to clear claim bits before the tracing starts. 605 ClassLoaderDataGraph::clear_claimed_marks(); 606 607 // General strong roots. 608 Universe::oops_do(adjust_pointer_closure()); 609 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles 610 CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); 611 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); 612 ObjectSynchronizer::oops_do(adjust_pointer_closure()); 613 FlatProfiler::oops_do(adjust_pointer_closure()); 614 Management::oops_do(adjust_pointer_closure()); 615 JvmtiExport::oops_do(adjust_pointer_closure()); 616 SystemDictionary::oops_do(adjust_pointer_closure()); 617 ClassLoaderDataGraph::cld_do(adjust_cld_closure()); 618 619 // Now adjust pointers in remaining weak roots. (All of which should 620 // have been cleared if they pointed to non-surviving objects.) 621 // Global (weak) JNI handles 622 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); 623 624 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); 625 CodeCache::blobs_do(&adjust_from_blobs); 626 StringTable::oops_do(adjust_pointer_closure()); 627 ref_processor()->weak_oops_do(adjust_pointer_closure()); 628 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); 629 630 adjust_marks(); 631 632 young_gen->adjust_pointers(); 633 old_gen->adjust_pointers(); 634 } 635 636 void PSMarkSweep::mark_sweep_phase4() { 637 EventMark m("4 compact heap"); 638 GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); 639 640 // All pointers are now adjusted, move objects accordingly 641 642 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 643 PSYoungGen* young_gen = heap->young_gen(); 644 PSOldGen* old_gen = heap->old_gen(); 645 646 old_gen->compact(); 647 young_gen->compact(); 648 } 649 650 jlong PSMarkSweep::millis_since_last_gc() { 651 // We need a monotonically non-decreasing time in ms but 652 // os::javaTimeMillis() does not guarantee monotonicity. 653 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 654 jlong ret_val = now - _time_of_last_gc; 655 // XXX See note in genCollectedHeap::millis_since_last_gc(). 656 if (ret_val < 0) { 657 NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);) 658 return 0; 659 } 660 return ret_val; 661 } 662 663 void PSMarkSweep::reset_millis_since_last_gc() { 664 // We need a monotonically non-decreasing time in ms but 665 // os::javaTimeMillis() does not guarantee monotonicity. 666 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 667 }