1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "classfile/symbolTable.hpp" 28 #include "classfile/systemDictionary.hpp" 29 #include "code/codeCache.hpp" 30 #include "gc/parallel/parallelScavengeHeap.hpp" 31 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 32 #include "gc/parallel/psMarkSweep.hpp" 33 #include "gc/parallel/psMarkSweepDecorator.hpp" 34 #include "gc/parallel/psOldGen.hpp" 35 #include "gc/parallel/psScavenge.hpp" 36 #include "gc/parallel/psYoungGen.hpp" 37 #include "gc/serial/markSweep.hpp" 38 #include "gc/shared/gcCause.hpp" 39 #include "gc/shared/gcHeapSummary.hpp" 40 #include "gc/shared/gcId.hpp" 41 #include "gc/shared/gcLocker.inline.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/isGCActiveMark.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/referenceProcessor.hpp" 48 #include "gc/shared/spaceDecorator.hpp" 49 #include "logging/log.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "runtime/biasedLocking.hpp" 52 #include "runtime/fprofiler.hpp" 53 #include "runtime/safepoint.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "services/management.hpp" 56 #include "services/memoryService.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/stack.inline.hpp" 59 60 elapsedTimer PSMarkSweep::_accumulated_time; 61 jlong PSMarkSweep::_time_of_last_gc = 0; 62 CollectorCounters* PSMarkSweep::_counters = NULL; 63 64 void PSMarkSweep::initialize() { 65 MemRegion mr = ParallelScavengeHeap::heap()->reserved_region(); 66 set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc 67 _counters = new CollectorCounters("PSMarkSweep", 1); 68 69 // Dummy counter 70 new CollectorCounters("dummy", 2); 71 } 72 73 // This method contains all heap specific policy for invoking mark sweep. 74 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 75 // the heap. It will do nothing further. If we need to bail out for policy 76 // reasons, scavenge before full gc, or any other specialized behavior, it 77 // needs to be added here. 78 // 79 // Note that this method should only be called from the vm_thread while 80 // at a safepoint! 81 // 82 // Note that the all_soft_refs_clear flag in the collector policy 83 // may be true because this method can be called without intervening 84 // activity. For example when the heap space is tight and full measure 85 // are being taken to free space. 86 87 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 88 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 89 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 90 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 91 92 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 93 GCCause::Cause gc_cause = heap->gc_cause(); 94 PSAdaptiveSizePolicy* policy = heap->size_policy(); 95 IsGCActiveMark mark; 96 97 if (ScavengeBeforeFullGC) { 98 PSScavenge::invoke_no_policy(); 99 } 100 101 const bool clear_all_soft_refs = 102 heap->collector_policy()->should_clear_all_soft_refs(); 103 104 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; 105 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 106 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); 107 } 108 109 // This method contains no policy. You should probably 110 // be calling invoke() instead. 111 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 112 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 113 assert(ref_processor() != NULL, "Sanity"); 114 115 if (GCLocker::check_active_before_gc()) { 116 return false; 117 } 118 119 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 120 GCCause::Cause gc_cause = heap->gc_cause(); 121 122 GCIdMark gc_id_mark; 123 _gc_timer->register_gc_start(); 124 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); 125 126 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 127 128 // The scope of casr should end after code that can change 129 // CollectorPolicy::_should_clear_all_soft_refs. 130 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); 131 132 PSYoungGen* young_gen = heap->young_gen(); 133 PSOldGen* old_gen = heap->old_gen(); 134 135 // Increment the invocation count 136 heap->increment_total_collections(true /* full */); 137 138 // Save information needed to minimize mangling 139 heap->record_gen_tops_before_GC(); 140 141 // We need to track unique mark sweep invocations as well. 142 _total_invocations++; 143 144 heap->print_heap_before_gc(); 145 heap->trace_heap_before_gc(_gc_tracer); 146 147 // Fill in TLABs 148 heap->accumulate_statistics_all_tlabs(); 149 heap->ensure_parsability(true); // retire TLABs 150 151 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 152 HandleMark hm; // Discard invalid handles created during verification 153 Universe::verify("Before GC"); 154 } 155 156 // Verify object start arrays 157 if (VerifyObjectStartArray && 158 VerifyBeforeGC) { 159 old_gen->verify_object_start_array(); 160 } 161 162 // Filled in below to track the state of the young gen after the collection. 163 bool eden_empty; 164 bool survivors_empty; 165 bool young_gen_empty; 166 167 { 168 HandleMark hm; 169 170 GCTraceCPUTime tcpu; 171 GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true); 172 173 heap->pre_full_gc_dump(_gc_timer); 174 175 TraceCollectorStats tcs(counters()); 176 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); 177 178 if (TraceOldGenTime) accumulated_time()->start(); 179 180 // Let the size policy know we're starting 181 size_policy->major_collection_begin(); 182 183 CodeCache::gc_prologue(); 184 BiasedLocking::preserve_marks(); 185 186 // Capture metadata size before collection for sizing. 187 size_t metadata_prev_used = MetaspaceAux::used_bytes(); 188 189 size_t old_gen_prev_used = old_gen->used_in_bytes(); 190 size_t young_gen_prev_used = young_gen->used_in_bytes(); 191 192 allocate_stacks(); 193 194 #if defined(COMPILER2) || INCLUDE_JVMCI 195 DerivedPointerTable::clear(); 196 #endif 197 198 ref_processor()->enable_discovery(); 199 ref_processor()->setup_policy(clear_all_softrefs); 200 201 mark_sweep_phase1(clear_all_softrefs); 202 203 mark_sweep_phase2(); 204 205 #if defined(COMPILER2) || INCLUDE_JVMCI 206 // Don't add any more derived pointers during phase3 207 assert(DerivedPointerTable::is_active(), "Sanity"); 208 DerivedPointerTable::set_active(false); 209 #endif 210 211 mark_sweep_phase3(); 212 213 mark_sweep_phase4(); 214 215 restore_marks(); 216 217 deallocate_stacks(); 218 219 if (ZapUnusedHeapArea) { 220 // Do a complete mangle (top to end) because the usage for 221 // scratch does not maintain a top pointer. 222 young_gen->to_space()->mangle_unused_area_complete(); 223 } 224 225 eden_empty = young_gen->eden_space()->is_empty(); 226 if (!eden_empty) { 227 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 228 } 229 230 // Update heap occupancy information which is used as 231 // input to soft ref clearing policy at the next gc. 232 Universe::update_heap_info_at_gc(); 233 234 survivors_empty = young_gen->from_space()->is_empty() && 235 young_gen->to_space()->is_empty(); 236 young_gen_empty = eden_empty && survivors_empty; 237 238 ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set()); 239 MemRegion old_mr = heap->old_gen()->reserved(); 240 if (young_gen_empty) { 241 modBS->clear(MemRegion(old_mr.start(), old_mr.end())); 242 } else { 243 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); 244 } 245 246 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 247 ClassLoaderDataGraph::purge(); 248 MetaspaceAux::verify_metrics(); 249 250 BiasedLocking::restore_marks(); 251 CodeCache::gc_epilogue(); 252 JvmtiExport::gc_epilogue(); 253 254 #if defined(COMPILER2) || INCLUDE_JVMCI 255 DerivedPointerTable::update_pointers(); 256 #endif 257 258 ref_processor()->enqueue_discovered_references(NULL); 259 260 // Update time of last GC 261 reset_millis_since_last_gc(); 262 263 // Let the size policy know we're done 264 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 265 266 if (UseAdaptiveSizePolicy) { 267 268 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 269 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, 270 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 271 272 // Don't check if the size_policy is ready here. Let 273 // the size_policy check that internally. 274 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 275 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) { 276 // Swap the survivor spaces if from_space is empty. The 277 // resize_young_gen() called below is normally used after 278 // a successful young GC and swapping of survivor spaces; 279 // otherwise, it will fail to resize the young gen with 280 // the current implementation. 281 if (young_gen->from_space()->is_empty()) { 282 young_gen->from_space()->clear(SpaceDecorator::Mangle); 283 young_gen->swap_spaces(); 284 } 285 286 // Calculate optimal free space amounts 287 assert(young_gen->max_size() > 288 young_gen->from_space()->capacity_in_bytes() + 289 young_gen->to_space()->capacity_in_bytes(), 290 "Sizes of space in young gen are out-of-bounds"); 291 292 size_t young_live = young_gen->used_in_bytes(); 293 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 294 size_t old_live = old_gen->used_in_bytes(); 295 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 296 size_t max_old_gen_size = old_gen->max_gen_size(); 297 size_t max_eden_size = young_gen->max_size() - 298 young_gen->from_space()->capacity_in_bytes() - 299 young_gen->to_space()->capacity_in_bytes(); 300 301 // Used for diagnostics 302 size_policy->clear_generation_free_space_flags(); 303 304 size_policy->compute_generations_free_space(young_live, 305 eden_live, 306 old_live, 307 cur_eden, 308 max_old_gen_size, 309 max_eden_size, 310 true /* full gc*/); 311 312 size_policy->check_gc_overhead_limit(young_live, 313 eden_live, 314 max_old_gen_size, 315 max_eden_size, 316 true /* full gc*/, 317 gc_cause, 318 heap->collector_policy()); 319 320 size_policy->decay_supplemental_growth(true /* full gc*/); 321 322 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 323 324 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 325 size_policy->calculated_survivor_size_in_bytes()); 326 } 327 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 328 } 329 330 if (UsePerfData) { 331 heap->gc_policy_counters()->update_counters(); 332 heap->gc_policy_counters()->update_old_capacity( 333 old_gen->capacity_in_bytes()); 334 heap->gc_policy_counters()->update_young_capacity( 335 young_gen->capacity_in_bytes()); 336 } 337 338 heap->resize_all_tlabs(); 339 340 // We collected the heap, recalculate the metaspace capacity 341 MetaspaceGC::compute_new_size(); 342 343 if (TraceOldGenTime) accumulated_time()->stop(); 344 345 young_gen->print_used_change(young_gen_prev_used); 346 old_gen->print_used_change(old_gen_prev_used); 347 MetaspaceAux::print_metaspace_change(metadata_prev_used); 348 349 // Track memory usage and detect low memory 350 MemoryService::track_memory_usage(); 351 heap->update_counters(); 352 353 heap->post_full_gc_dump(_gc_timer); 354 } 355 356 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 357 HandleMark hm; // Discard invalid handles created during verification 358 Universe::verify("After GC"); 359 } 360 361 // Re-verify object start arrays 362 if (VerifyObjectStartArray && 363 VerifyAfterGC) { 364 old_gen->verify_object_start_array(); 365 } 366 367 if (ZapUnusedHeapArea) { 368 old_gen->object_space()->check_mangled_unused_area_complete(); 369 } 370 371 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 372 373 heap->print_heap_after_gc(); 374 heap->trace_heap_after_gc(_gc_tracer); 375 376 #ifdef TRACESPINNING 377 ParallelTaskTerminator::print_termination_counts(); 378 #endif 379 380 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 381 382 _gc_timer->register_gc_end(); 383 384 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 385 386 return true; 387 } 388 389 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 390 PSYoungGen* young_gen, 391 PSOldGen* old_gen) { 392 MutableSpace* const eden_space = young_gen->eden_space(); 393 assert(!eden_space->is_empty(), "eden must be non-empty"); 394 assert(young_gen->virtual_space()->alignment() == 395 old_gen->virtual_space()->alignment(), "alignments do not match"); 396 397 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 398 return false; 399 } 400 401 // Both generations must be completely committed. 402 if (young_gen->virtual_space()->uncommitted_size() != 0) { 403 return false; 404 } 405 if (old_gen->virtual_space()->uncommitted_size() != 0) { 406 return false; 407 } 408 409 // Figure out how much to take from eden. Include the average amount promoted 410 // in the total; otherwise the next young gen GC will simply bail out to a 411 // full GC. 412 const size_t alignment = old_gen->virtual_space()->alignment(); 413 const size_t eden_used = eden_space->used_in_bytes(); 414 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 415 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); 416 const size_t eden_capacity = eden_space->capacity_in_bytes(); 417 418 if (absorb_size >= eden_capacity) { 419 return false; // Must leave some space in eden. 420 } 421 422 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 423 if (new_young_size < young_gen->min_gen_size()) { 424 return false; // Respect young gen minimum size. 425 } 426 427 log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K: " 428 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 429 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 430 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 431 absorb_size / K, 432 eden_capacity / K, (eden_capacity - absorb_size) / K, 433 young_gen->from_space()->used_in_bytes() / K, 434 young_gen->to_space()->used_in_bytes() / K, 435 young_gen->capacity_in_bytes() / K, new_young_size / K); 436 437 // Fill the unused part of the old gen. 438 MutableSpace* const old_space = old_gen->object_space(); 439 HeapWord* const unused_start = old_space->top(); 440 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 441 442 if (unused_words > 0) { 443 if (unused_words < CollectedHeap::min_fill_size()) { 444 return false; // If the old gen cannot be filled, must give up. 445 } 446 CollectedHeap::fill_with_objects(unused_start, unused_words); 447 } 448 449 // Take the live data from eden and set both top and end in the old gen to 450 // eden top. (Need to set end because reset_after_change() mangles the region 451 // from end to virtual_space->high() in debug builds). 452 HeapWord* const new_top = eden_space->top(); 453 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 454 absorb_size); 455 young_gen->reset_after_change(); 456 old_space->set_top(new_top); 457 old_space->set_end(new_top); 458 old_gen->reset_after_change(); 459 460 // Update the object start array for the filler object and the data from eden. 461 ObjectStartArray* const start_array = old_gen->start_array(); 462 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 463 start_array->allocate_block(p); 464 } 465 466 // Could update the promoted average here, but it is not typically updated at 467 // full GCs and the value to use is unclear. Something like 468 // 469 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 470 471 size_policy->set_bytes_absorbed_from_eden(absorb_size); 472 return true; 473 } 474 475 void PSMarkSweep::allocate_stacks() { 476 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 477 PSYoungGen* young_gen = heap->young_gen(); 478 479 MutableSpace* to_space = young_gen->to_space(); 480 _preserved_marks = (PreservedMark*)to_space->top(); 481 _preserved_count = 0; 482 483 // We want to calculate the size in bytes first. 484 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 485 // Now divide by the size of a PreservedMark 486 _preserved_count_max /= sizeof(PreservedMark); 487 } 488 489 490 void PSMarkSweep::deallocate_stacks() { 491 _preserved_mark_stack.clear(true); 492 _preserved_oop_stack.clear(true); 493 _marking_stack.clear(); 494 _objarray_stack.clear(true); 495 } 496 497 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 498 // Recursively traverse all live objects and mark them 499 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer); 500 501 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 502 503 // Need to clear claim bits before the tracing starts. 504 ClassLoaderDataGraph::clear_claimed_marks(); 505 506 // General strong roots. 507 { 508 ParallelScavengeHeap::ParStrongRootsScope psrs; 509 Universe::oops_do(mark_and_push_closure()); 510 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 511 MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); 512 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob); 513 ObjectSynchronizer::oops_do(mark_and_push_closure()); 514 FlatProfiler::oops_do(mark_and_push_closure()); 515 Management::oops_do(mark_and_push_closure()); 516 JvmtiExport::oops_do(mark_and_push_closure()); 517 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); 518 ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); 519 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. 520 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); 521 } 522 523 // Flush marking stack. 524 follow_stack(); 525 526 // Process reference objects found during marking 527 { 528 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer); 529 530 ref_processor()->setup_policy(clear_all_softrefs); 531 const ReferenceProcessorStats& stats = 532 ref_processor()->process_discovered_references( 533 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); 534 gc_tracer()->report_gc_reference_stats(stats); 535 } 536 537 // This is the point where the entire marking should have completed. 538 assert(_marking_stack.is_empty(), "Marking should have completed"); 539 540 { 541 GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer); 542 543 // Unload classes and purge the SystemDictionary. 544 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); 545 546 // Unload nmethods. 547 CodeCache::do_unloading(is_alive_closure(), purged_class); 548 549 // Prune dead klasses from subklass/sibling/implementor lists. 550 Klass::clean_weak_klass_links(is_alive_closure()); 551 } 552 553 { 554 GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer); 555 // Delete entries for dead interned strings. 556 StringTable::unlink(is_alive_closure()); 557 } 558 559 { 560 GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer); 561 // Clean up unreferenced symbols in symbol table. 562 SymbolTable::unlink(); 563 } 564 565 _gc_tracer->report_object_count_after_gc(is_alive_closure()); 566 } 567 568 569 void PSMarkSweep::mark_sweep_phase2() { 570 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer); 571 572 // Now all live objects are marked, compute the new object addresses. 573 574 // It is not required that we traverse spaces in the same order in 575 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 576 // tracking expects us to do so. See comment under phase4. 577 578 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 579 PSOldGen* old_gen = heap->old_gen(); 580 581 // Begin compacting into the old gen 582 PSMarkSweepDecorator::set_destination_decorator_tenured(); 583 584 // This will also compact the young gen spaces. 585 old_gen->precompact(); 586 } 587 588 void PSMarkSweep::mark_sweep_phase3() { 589 // Adjust the pointers to reflect the new locations 590 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer); 591 592 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 593 PSYoungGen* young_gen = heap->young_gen(); 594 PSOldGen* old_gen = heap->old_gen(); 595 596 // Need to clear claim bits before the tracing starts. 597 ClassLoaderDataGraph::clear_claimed_marks(); 598 599 // General strong roots. 600 Universe::oops_do(adjust_pointer_closure()); 601 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles 602 Threads::oops_do(adjust_pointer_closure(), NULL); 603 ObjectSynchronizer::oops_do(adjust_pointer_closure()); 604 FlatProfiler::oops_do(adjust_pointer_closure()); 605 Management::oops_do(adjust_pointer_closure()); 606 JvmtiExport::oops_do(adjust_pointer_closure()); 607 SystemDictionary::oops_do(adjust_pointer_closure()); 608 ClassLoaderDataGraph::cld_do(adjust_cld_closure()); 609 610 // Now adjust pointers in remaining weak roots. (All of which should 611 // have been cleared if they pointed to non-surviving objects.) 612 // Global (weak) JNI handles 613 JNIHandles::weak_oops_do(adjust_pointer_closure()); 614 615 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); 616 CodeCache::blobs_do(&adjust_from_blobs); 617 StringTable::oops_do(adjust_pointer_closure()); 618 ref_processor()->weak_oops_do(adjust_pointer_closure()); 619 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); 620 621 adjust_marks(); 622 623 young_gen->adjust_pointers(); 624 old_gen->adjust_pointers(); 625 } 626 627 void PSMarkSweep::mark_sweep_phase4() { 628 EventMark m("4 compact heap"); 629 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer); 630 631 // All pointers are now adjusted, move objects accordingly 632 633 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 634 PSYoungGen* young_gen = heap->young_gen(); 635 PSOldGen* old_gen = heap->old_gen(); 636 637 old_gen->compact(); 638 young_gen->compact(); 639 } 640 641 jlong PSMarkSweep::millis_since_last_gc() { 642 // We need a monotonically non-decreasing time in ms but 643 // os::javaTimeMillis() does not guarantee monotonicity. 644 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 645 jlong ret_val = now - _time_of_last_gc; 646 // XXX See note in genCollectedHeap::millis_since_last_gc(). 647 if (ret_val < 0) { 648 NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);) 649 return 0; 650 } 651 return ret_val; 652 } 653 654 void PSMarkSweep::reset_millis_since_last_gc() { 655 // We need a monotonically non-decreasing time in ms but 656 // os::javaTimeMillis() does not guarantee monotonicity. 657 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 658 }