1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "code/codeCache.hpp" 32 #include "gc/parallel/parallelScavengeHeap.hpp" 33 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 34 #include "gc/parallel/psMarkSweep.hpp" 35 #include "gc/parallel/psMarkSweepDecorator.hpp" 36 #include "gc/parallel/psOldGen.hpp" 37 #include "gc/parallel/psScavenge.hpp" 38 #include "gc/parallel/psYoungGen.hpp" 39 #include "gc/serial/markSweep.hpp" 40 #include "gc/shared/gcCause.hpp" 41 #include "gc/shared/gcHeapSummary.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcLocker.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/isGCActiveMark.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/referenceProcessor.hpp" 50 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 51 #include "gc/shared/spaceDecorator.hpp" 52 #include "gc/shared/weakProcessor.hpp" 53 #include "logging/log.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "runtime/biasedLocking.hpp" 56 #include "runtime/flags/flagSetting.hpp" 57 #include "runtime/handles.inline.hpp" 58 #include "runtime/safepoint.hpp" 59 #include "runtime/vmThread.hpp" 60 #include "services/management.hpp" 61 #include "services/memoryService.hpp" 62 #include "utilities/align.hpp" 63 #include "utilities/events.hpp" 64 #include "utilities/stack.inline.hpp" 65 66 elapsedTimer PSMarkSweep::_accumulated_time; 67 jlong PSMarkSweep::_time_of_last_gc = 0; 68 CollectorCounters* PSMarkSweep::_counters = NULL; 69 70 SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer; 71 72 void PSMarkSweep::initialize() { 73 _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region()); 74 set_ref_processor(new ReferenceProcessor(&_span_based_discoverer)); // a vanilla ref proc 75 _counters = new CollectorCounters("Serial full collection pauses", 1); 76 MarkSweep::initialize(); 77 } 78 79 // This method contains all heap specific policy for invoking mark sweep. 80 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 81 // the heap. It will do nothing further. If we need to bail out for policy 82 // reasons, scavenge before full gc, or any other specialized behavior, it 83 // needs to be added here. 84 // 85 // Note that this method should only be called from the vm_thread while 86 // at a safepoint! 87 // 88 // Note that the all_soft_refs_clear flag in the collector policy 89 // may be true because this method can be called without intervening 90 // activity. For example when the heap space is tight and full measure 91 // are being taken to free space. 92 93 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 94 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 95 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 96 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 97 98 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 99 GCCause::Cause gc_cause = heap->gc_cause(); 100 PSAdaptiveSizePolicy* policy = heap->size_policy(); 101 IsGCActiveMark mark; 102 103 if (ScavengeBeforeFullGC) { 104 PSScavenge::invoke_no_policy(); 105 } 106 107 const bool clear_all_soft_refs = 108 heap->soft_ref_policy()->should_clear_all_soft_refs(); 109 110 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; 111 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 112 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); 113 } 114 115 // This method contains no policy. You should probably 116 // be calling invoke() instead. 117 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 118 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 119 assert(ref_processor() != NULL, "Sanity"); 120 121 if (GCLocker::check_active_before_gc()) { 122 return false; 123 } 124 125 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 126 GCCause::Cause gc_cause = heap->gc_cause(); 127 128 GCIdMark gc_id_mark; 129 _gc_timer->register_gc_start(); 130 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); 131 132 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 133 134 // The scope of casr should end after code that can change 135 // CollectorPolicy::_should_clear_all_soft_refs. 136 ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy()); 137 138 PSYoungGen* young_gen = heap->young_gen(); 139 PSOldGen* old_gen = heap->old_gen(); 140 141 // Increment the invocation count 142 heap->increment_total_collections(true /* full */); 143 144 // Save information needed to minimize mangling 145 heap->record_gen_tops_before_GC(); 146 147 // We need to track unique mark sweep invocations as well. 148 _total_invocations++; 149 150 heap->print_heap_before_gc(); 151 heap->trace_heap_before_gc(_gc_tracer); 152 153 // Fill in TLABs 154 heap->ensure_parsability(true); // retire TLABs 155 156 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 157 HandleMark hm; // Discard invalid handles created during verification 158 Universe::verify("Before GC"); 159 } 160 161 // Verify object start arrays 162 if (VerifyObjectStartArray && 163 VerifyBeforeGC) { 164 old_gen->verify_object_start_array(); 165 } 166 167 // Filled in below to track the state of the young gen after the collection. 168 bool eden_empty; 169 bool survivors_empty; 170 bool young_gen_empty; 171 172 { 173 HandleMark hm; 174 175 GCTraceCPUTime tcpu; 176 GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true); 177 178 heap->pre_full_gc_dump(_gc_timer); 179 180 TraceCollectorStats tcs(counters()); 181 TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause); 182 183 if (log_is_enabled(Debug, gc, heap, exit)) { 184 accumulated_time()->start(); 185 } 186 187 // Let the size policy know we're starting 188 size_policy->major_collection_begin(); 189 190 BiasedLocking::preserve_marks(); 191 192 // Capture metadata size before collection for sizing. 193 size_t metadata_prev_used = MetaspaceUtils::used_bytes(); 194 195 size_t old_gen_prev_used = old_gen->used_in_bytes(); 196 size_t young_gen_prev_used = young_gen->used_in_bytes(); 197 198 allocate_stacks(); 199 200 #if COMPILER2_OR_JVMCI 201 DerivedPointerTable::clear(); 202 #endif 203 204 ref_processor()->enable_discovery(); 205 ref_processor()->setup_policy(clear_all_softrefs); 206 207 mark_sweep_phase1(clear_all_softrefs); 208 209 mark_sweep_phase2(); 210 211 #if COMPILER2_OR_JVMCI 212 // Don't add any more derived pointers during phase3 213 assert(DerivedPointerTable::is_active(), "Sanity"); 214 DerivedPointerTable::set_active(false); 215 #endif 216 217 mark_sweep_phase3(); 218 219 mark_sweep_phase4(); 220 221 restore_marks(); 222 223 deallocate_stacks(); 224 225 if (ZapUnusedHeapArea) { 226 // Do a complete mangle (top to end) because the usage for 227 // scratch does not maintain a top pointer. 228 young_gen->to_space()->mangle_unused_area_complete(); 229 } 230 231 eden_empty = young_gen->eden_space()->is_empty(); 232 if (!eden_empty) { 233 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 234 } 235 236 // Update heap occupancy information which is used as 237 // input to soft ref clearing policy at the next gc. 238 Universe::update_heap_info_at_gc(); 239 240 survivors_empty = young_gen->from_space()->is_empty() && 241 young_gen->to_space()->is_empty(); 242 young_gen_empty = eden_empty && survivors_empty; 243 244 PSCardTable* card_table = heap->card_table(); 245 MemRegion old_mr = heap->old_gen()->reserved(); 246 if (young_gen_empty) { 247 card_table->clear(MemRegion(old_mr.start(), old_mr.end())); 248 } else { 249 card_table->invalidate(MemRegion(old_mr.start(), old_mr.end())); 250 } 251 252 // Delete metaspaces for unloaded class loaders and clean up loader_data graph 253 ClassLoaderDataGraph::purge(); 254 MetaspaceUtils::verify_metrics(); 255 256 BiasedLocking::restore_marks(); 257 heap->prune_scavengable_nmethods(); 258 JvmtiExport::gc_epilogue(); 259 260 #if COMPILER2_OR_JVMCI 261 DerivedPointerTable::update_pointers(); 262 #endif 263 264 assert(!ref_processor()->discovery_enabled(), "Should have been disabled earlier"); 265 266 // Update time of last GC 267 reset_millis_since_last_gc(); 268 269 // Let the size policy know we're done 270 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 271 272 if (UseAdaptiveSizePolicy) { 273 274 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 275 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, 276 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 277 278 // Don't check if the size_policy is ready here. Let 279 // the size_policy check that internally. 280 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 281 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) { 282 // Swap the survivor spaces if from_space is empty. The 283 // resize_young_gen() called below is normally used after 284 // a successful young GC and swapping of survivor spaces; 285 // otherwise, it will fail to resize the young gen with 286 // the current implementation. 287 if (young_gen->from_space()->is_empty()) { 288 young_gen->from_space()->clear(SpaceDecorator::Mangle); 289 young_gen->swap_spaces(); 290 } 291 292 // Calculate optimal free space amounts 293 assert(young_gen->max_size() > 294 young_gen->from_space()->capacity_in_bytes() + 295 young_gen->to_space()->capacity_in_bytes(), 296 "Sizes of space in young gen are out of bounds"); 297 298 size_t young_live = young_gen->used_in_bytes(); 299 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 300 size_t old_live = old_gen->used_in_bytes(); 301 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 302 size_t max_old_gen_size = old_gen->max_gen_size(); 303 size_t max_eden_size = young_gen->max_size() - 304 young_gen->from_space()->capacity_in_bytes() - 305 young_gen->to_space()->capacity_in_bytes(); 306 307 // Used for diagnostics 308 size_policy->clear_generation_free_space_flags(); 309 310 size_policy->compute_generations_free_space(young_live, 311 eden_live, 312 old_live, 313 cur_eden, 314 max_old_gen_size, 315 max_eden_size, 316 true /* full gc*/); 317 318 size_policy->check_gc_overhead_limit(eden_live, 319 max_old_gen_size, 320 max_eden_size, 321 true /* full gc*/, 322 gc_cause, 323 heap->soft_ref_policy()); 324 325 size_policy->decay_supplemental_growth(true /* full gc*/); 326 327 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 328 329 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 330 size_policy->calculated_survivor_size_in_bytes()); 331 } 332 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 333 } 334 335 if (UsePerfData) { 336 heap->gc_policy_counters()->update_counters(); 337 heap->gc_policy_counters()->update_old_capacity( 338 old_gen->capacity_in_bytes()); 339 heap->gc_policy_counters()->update_young_capacity( 340 young_gen->capacity_in_bytes()); 341 } 342 343 heap->resize_all_tlabs(); 344 345 // We collected the heap, recalculate the metaspace capacity 346 MetaspaceGC::compute_new_size(); 347 348 if (log_is_enabled(Debug, gc, heap, exit)) { 349 accumulated_time()->stop(); 350 } 351 352 young_gen->print_used_change(young_gen_prev_used); 353 old_gen->print_used_change(old_gen_prev_used); 354 MetaspaceUtils::print_metaspace_change(metadata_prev_used); 355 356 // Track memory usage and detect low memory 357 MemoryService::track_memory_usage(); 358 heap->update_counters(); 359 360 heap->post_full_gc_dump(_gc_timer); 361 } 362 363 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 364 HandleMark hm; // Discard invalid handles created during verification 365 Universe::verify("After GC"); 366 } 367 368 // Re-verify object start arrays 369 if (VerifyObjectStartArray && 370 VerifyAfterGC) { 371 old_gen->verify_object_start_array(); 372 } 373 374 if (ZapUnusedHeapArea) { 375 old_gen->object_space()->check_mangled_unused_area_complete(); 376 } 377 378 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 379 380 heap->print_heap_after_gc(); 381 heap->trace_heap_after_gc(_gc_tracer); 382 383 #ifdef TRACESPINNING 384 ParallelTaskTerminator::print_termination_counts(); 385 #endif 386 387 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 388 389 _gc_timer->register_gc_end(); 390 391 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 392 393 return true; 394 } 395 396 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 397 PSYoungGen* young_gen, 398 PSOldGen* old_gen) { 399 MutableSpace* const eden_space = young_gen->eden_space(); 400 assert(!eden_space->is_empty(), "eden must be non-empty"); 401 assert(young_gen->virtual_space()->alignment() == 402 old_gen->virtual_space()->alignment(), "alignments do not match"); 403 404 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 405 return false; 406 } 407 408 // Both generations must be completely committed. 409 if (young_gen->virtual_space()->uncommitted_size() != 0) { 410 return false; 411 } 412 if (old_gen->virtual_space()->uncommitted_size() != 0) { 413 return false; 414 } 415 416 // Figure out how much to take from eden. Include the average amount promoted 417 // in the total; otherwise the next young gen GC will simply bail out to a 418 // full GC. 419 const size_t alignment = old_gen->virtual_space()->alignment(); 420 const size_t eden_used = eden_space->used_in_bytes(); 421 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 422 const size_t absorb_size = align_up(eden_used + promoted, alignment); 423 const size_t eden_capacity = eden_space->capacity_in_bytes(); 424 425 if (absorb_size >= eden_capacity) { 426 return false; // Must leave some space in eden. 427 } 428 429 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 430 if (new_young_size < young_gen->min_gen_size()) { 431 return false; // Respect young gen minimum size. 432 } 433 434 log_trace(gc, ergo, heap)(" absorbing " SIZE_FORMAT "K: " 435 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 436 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 437 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 438 absorb_size / K, 439 eden_capacity / K, (eden_capacity - absorb_size) / K, 440 young_gen->from_space()->used_in_bytes() / K, 441 young_gen->to_space()->used_in_bytes() / K, 442 young_gen->capacity_in_bytes() / K, new_young_size / K); 443 444 // Fill the unused part of the old gen. 445 MutableSpace* const old_space = old_gen->object_space(); 446 HeapWord* const unused_start = old_space->top(); 447 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 448 449 if (unused_words > 0) { 450 if (unused_words < CollectedHeap::min_fill_size()) { 451 return false; // If the old gen cannot be filled, must give up. 452 } 453 CollectedHeap::fill_with_objects(unused_start, unused_words); 454 } 455 456 // Take the live data from eden and set both top and end in the old gen to 457 // eden top. (Need to set end because reset_after_change() mangles the region 458 // from end to virtual_space->high() in debug builds). 459 HeapWord* const new_top = eden_space->top(); 460 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 461 absorb_size); 462 young_gen->reset_after_change(); 463 old_space->set_top(new_top); 464 old_space->set_end(new_top); 465 old_gen->reset_after_change(); 466 467 // Update the object start array for the filler object and the data from eden. 468 ObjectStartArray* const start_array = old_gen->start_array(); 469 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 470 start_array->allocate_block(p); 471 } 472 473 // Could update the promoted average here, but it is not typically updated at 474 // full GCs and the value to use is unclear. Something like 475 // 476 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 477 478 size_policy->set_bytes_absorbed_from_eden(absorb_size); 479 return true; 480 } 481 482 void PSMarkSweep::allocate_stacks() { 483 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 484 PSYoungGen* young_gen = heap->young_gen(); 485 486 MutableSpace* to_space = young_gen->to_space(); 487 _preserved_marks = (PreservedMark*)to_space->top(); 488 _preserved_count = 0; 489 490 // We want to calculate the size in bytes first. 491 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 492 // Now divide by the size of a PreservedMark 493 _preserved_count_max /= sizeof(PreservedMark); 494 } 495 496 497 void PSMarkSweep::deallocate_stacks() { 498 _preserved_mark_stack.clear(true); 499 _preserved_oop_stack.clear(true); 500 _marking_stack.clear(); 501 _objarray_stack.clear(true); 502 } 503 504 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 505 // Recursively traverse all live objects and mark them 506 GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer); 507 508 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 509 510 // Need to clear claim bits before the tracing starts. 511 ClassLoaderDataGraph::clear_claimed_marks(); 512 513 // General strong roots. 514 { 515 ParallelScavengeHeap::ParStrongRootsScope psrs; 516 Universe::oops_do(mark_and_push_closure()); 517 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 518 MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); 519 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob); 520 ObjectSynchronizer::oops_do(mark_and_push_closure()); 521 Management::oops_do(mark_and_push_closure()); 522 JvmtiExport::oops_do(mark_and_push_closure()); 523 SystemDictionary::oops_do(mark_and_push_closure()); 524 ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); 525 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. 526 //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); 527 AOTLoader::oops_do(mark_and_push_closure()); 528 } 529 530 // Flush marking stack. 531 follow_stack(); 532 533 // Process reference objects found during marking 534 { 535 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer); 536 537 ref_processor()->setup_policy(clear_all_softrefs); 538 ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues()); 539 const ReferenceProcessorStats& stats = 540 ref_processor()->process_discovered_references( 541 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt); 542 gc_tracer()->report_gc_reference_stats(stats); 543 pt.print_all_references(); 544 } 545 546 // This is the point where the entire marking should have completed. 547 assert(_marking_stack.is_empty(), "Marking should have completed"); 548 549 { 550 GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer); 551 WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl); 552 } 553 554 { 555 GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer); 556 557 // Unload classes and purge the SystemDictionary. 558 bool purged_class = SystemDictionary::do_unloading(_gc_timer); 559 560 // Unload nmethods. 561 CodeCache::do_unloading(is_alive_closure(), purged_class); 562 563 // Prune dead klasses from subklass/sibling/implementor lists. 564 Klass::clean_weak_klass_links(purged_class); 565 } 566 567 _gc_tracer->report_object_count_after_gc(is_alive_closure()); 568 } 569 570 571 void PSMarkSweep::mark_sweep_phase2() { 572 GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer); 573 574 // Now all live objects are marked, compute the new object addresses. 575 576 // It is not required that we traverse spaces in the same order in 577 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 578 // tracking expects us to do so. See comment under phase4. 579 580 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 581 PSOldGen* old_gen = heap->old_gen(); 582 583 // Begin compacting into the old gen 584 PSMarkSweepDecorator::set_destination_decorator_tenured(); 585 586 // This will also compact the young gen spaces. 587 old_gen->precompact(); 588 } 589 590 void PSMarkSweep::mark_sweep_phase3() { 591 // Adjust the pointers to reflect the new locations 592 GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer); 593 594 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 595 PSYoungGen* young_gen = heap->young_gen(); 596 PSOldGen* old_gen = heap->old_gen(); 597 598 // Need to clear claim bits before the tracing starts. 599 ClassLoaderDataGraph::clear_claimed_marks(); 600 601 // General strong roots. 602 Universe::oops_do(adjust_pointer_closure()); 603 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles 604 Threads::oops_do(adjust_pointer_closure(), NULL); 605 ObjectSynchronizer::oops_do(adjust_pointer_closure()); 606 Management::oops_do(adjust_pointer_closure()); 607 JvmtiExport::oops_do(adjust_pointer_closure()); 608 SystemDictionary::oops_do(adjust_pointer_closure()); 609 ClassLoaderDataGraph::cld_do(adjust_cld_closure()); 610 611 // Now adjust pointers in remaining weak roots. (All of which should 612 // have been cleared if they pointed to non-surviving objects.) 613 // Global (weak) JNI handles 614 WeakProcessor::oops_do(adjust_pointer_closure()); 615 616 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); 617 CodeCache::blobs_do(&adjust_from_blobs); 618 AOTLoader::oops_do(adjust_pointer_closure()); 619 ref_processor()->weak_oops_do(adjust_pointer_closure()); 620 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); 621 622 adjust_marks(); 623 624 young_gen->adjust_pointers(); 625 old_gen->adjust_pointers(); 626 } 627 628 void PSMarkSweep::mark_sweep_phase4() { 629 EventMark m("4 compact heap"); 630 GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer); 631 632 // All pointers are now adjusted, move objects accordingly 633 634 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 635 PSYoungGen* young_gen = heap->young_gen(); 636 PSOldGen* old_gen = heap->old_gen(); 637 638 old_gen->compact(); 639 young_gen->compact(); 640 } 641 642 jlong PSMarkSweep::millis_since_last_gc() { 643 // We need a monotonically non-decreasing time in ms but 644 // os::javaTimeMillis() does not guarantee monotonicity. 645 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 646 jlong ret_val = now - _time_of_last_gc; 647 // XXX See note in genCollectedHeap::millis_since_last_gc(). 648 if (ret_val < 0) { 649 NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);) 650 return 0; 651 } 652 return ret_val; 653 } 654 655 void PSMarkSweep::reset_millis_since_last_gc() { 656 // We need a monotonically non-decreasing time in ms but 657 // os::javaTimeMillis() does not guarantee monotonicity. 658 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 659 }