1 /* 2 * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "code/codeCache.hpp" 30 #include "gc/parallel/gcTaskManager.hpp" 31 #include "gc/parallel/parallelScavengeHeap.hpp" 32 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 33 #include "gc/parallel/psClosure.inline.hpp" 34 #include "gc/parallel/psMarkSweepProxy.hpp" 35 #include "gc/parallel/psParallelCompact.inline.hpp" 36 #include "gc/parallel/psPromotionManager.inline.hpp" 37 #include "gc/parallel/psRootType.inline.hpp" 38 #include "gc/parallel/psScavenge.inline.hpp" 39 #include "gc/parallel/psTasks.hpp" 40 #include "gc/shared/gcCause.hpp" 41 #include "gc/shared/gcHeapSummary.hpp" 42 #include "gc/shared/gcId.hpp" 43 #include "gc/shared/gcLocker.hpp" 44 #include "gc/shared/gcTimer.hpp" 45 #include "gc/shared/gcTrace.hpp" 46 #include "gc/shared/gcTraceTime.inline.hpp" 47 #include "gc/shared/isGCActiveMark.hpp" 48 #include "gc/shared/referencePolicy.hpp" 49 #include "gc/shared/referenceProcessor.hpp" 50 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 51 #include "gc/shared/scavengableNMethods.hpp" 52 #include "gc/shared/spaceDecorator.hpp" 53 #include "gc/shared/weakProcessor.hpp" 54 #include "gc/shared/workerPolicy.hpp" 55 #include "gc/shared/workgroup.hpp" 56 #if INCLUDE_JVMCI 57 #include "jvmci/jvmci.hpp" 58 #endif 59 #include "memory/resourceArea.hpp" 60 #include "memory/universe.hpp" 61 #include "logging/log.hpp" 62 #include "oops/access.inline.hpp" 63 #include "oops/compressedOops.inline.hpp" 64 #include "oops/oop.inline.hpp" 65 #include "runtime/biasedLocking.hpp" 66 #include "runtime/handles.inline.hpp" 67 #include "runtime/threadCritical.hpp" 68 #include "runtime/vmThread.hpp" 69 #include "runtime/vmOperations.hpp" 70 #include "services/management.hpp" 71 #include "services/memoryService.hpp" 72 #include "utilities/stack.inline.hpp" 73 74 75 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 76 int PSScavenge::_consecutive_skipped_scavenges = 0; 77 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer; 78 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 79 PSCardTable* PSScavenge::_card_table = NULL; 80 bool PSScavenge::_survivor_overflow = false; 81 uint PSScavenge::_tenuring_threshold = 0; 82 HeapWord* PSScavenge::_young_generation_boundary = NULL; 83 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 84 elapsedTimer PSScavenge::_accumulated_time; 85 STWGCTimer PSScavenge::_gc_timer; 86 ParallelScavengeTracer PSScavenge::_gc_tracer; 87 CollectorCounters* PSScavenge::_counters = NULL; 88 89 void scavenge_roots_task(Parallel::RootType::Value root_type, uint which) { 90 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); 91 92 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); 93 PSScavengeRootsClosure roots_closure(pm); 94 PSPromoteRootsClosure roots_to_old_closure(pm); 95 96 switch (root_type) { 97 case Parallel::RootType::universe: 98 Universe::oops_do(&roots_closure); 99 break; 100 101 case Parallel::RootType::jni_handles: 102 JNIHandles::oops_do(&roots_closure); 103 break; 104 105 case Parallel::RootType::object_synchronizer: 106 ObjectSynchronizer::oops_do(&roots_closure); 107 break; 108 109 case Parallel::RootType::system_dictionary: 110 SystemDictionary::oops_do(&roots_closure); 111 break; 112 113 case Parallel::RootType::class_loader_data: 114 { 115 PSScavengeCLDClosure cld_closure(pm); 116 ClassLoaderDataGraph::cld_do(&cld_closure); 117 } 118 break; 119 120 case Parallel::RootType::management: 121 Management::oops_do(&roots_closure); 122 break; 123 124 case Parallel::RootType::jvmti: 125 JvmtiExport::oops_do(&roots_closure); 126 break; 127 128 case Parallel::RootType::code_cache: 129 { 130 MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations); 131 ScavengableNMethods::nmethods_do(&code_closure); 132 AOTLoader::oops_do(&roots_closure); 133 } 134 break; 135 136 #if INCLUDE_JVMCI 137 case Parallel::RootType::jvmci: 138 JVMCI::oops_do(&roots_closure); 139 break; 140 #endif 141 142 case Parallel::RootType::sentinel: 143 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds 144 fatal("Bad enumeration value: %u", root_type); 145 break; 146 } 147 148 // Do the real work 149 pm->drain_stacks(false); 150 } 151 152 void steal_task(ParallelTaskTerminator& terminator, uint worker_id) { 153 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); 154 155 PSPromotionManager* pm = 156 PSPromotionManager::gc_thread_promotion_manager(worker_id); 157 pm->drain_stacks(true); 158 guarantee(pm->stacks_empty(), 159 "stacks should be empty at this point"); 160 161 while (true) { 162 StarTask p; 163 if (PSPromotionManager::steal_depth(worker_id, p)) { 164 TASKQUEUE_STATS_ONLY(pm->record_steal(p)); 165 pm->process_popped_location_depth(p); 166 pm->drain_stacks_depth(true); 167 } else { 168 if (terminator.offer_termination()) { 169 break; 170 } 171 } 172 } 173 guarantee(pm->stacks_empty(), "stacks should be empty at this point"); 174 } 175 176 // Define before use 177 class PSIsAliveClosure: public BoolObjectClosure { 178 public: 179 bool do_object_b(oop p) { 180 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 181 } 182 }; 183 184 PSIsAliveClosure PSScavenge::_is_alive_closure; 185 186 class PSKeepAliveClosure: public OopClosure { 187 protected: 188 MutableSpace* _to_space; 189 PSPromotionManager* _promotion_manager; 190 191 public: 192 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 193 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 194 _to_space = heap->young_gen()->to_space(); 195 196 assert(_promotion_manager != NULL, "Sanity"); 197 } 198 199 template <class T> void do_oop_work(T* p) { 200 assert (oopDesc::is_oop(RawAccess<IS_NOT_NULL>::oop_load(p)), 201 "expected an oop while scanning weak refs"); 202 203 // Weak refs may be visited more than once. 204 if (PSScavenge::should_scavenge(p, _to_space)) { 205 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p); 206 } 207 } 208 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 209 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 210 }; 211 212 class PSEvacuateFollowersClosure: public VoidClosure { 213 private: 214 PSPromotionManager* _promotion_manager; 215 public: 216 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 217 218 virtual void do_void() { 219 assert(_promotion_manager != NULL, "Sanity"); 220 _promotion_manager->drain_stacks(true); 221 guarantee(_promotion_manager->stacks_empty(), 222 "stacks should be empty at this point"); 223 } 224 }; 225 226 class PSRefProcTaskProxy: public GCTask { 227 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 228 ProcessTask & _rp_task; 229 uint _work_id; 230 public: 231 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 232 : _rp_task(rp_task), 233 _work_id(work_id) 234 { } 235 236 private: 237 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 238 virtual void do_it(GCTaskManager* manager, uint which); 239 }; 240 241 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 242 { 243 PSPromotionManager* promotion_manager = 244 PSPromotionManager::gc_thread_promotion_manager(which); 245 assert(promotion_manager != NULL, "sanity check"); 246 PSKeepAliveClosure keep_alive(promotion_manager); 247 PSEvacuateFollowersClosure evac_followers(promotion_manager); 248 PSIsAliveClosure is_alive; 249 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 250 } 251 252 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 253 virtual void execute(ProcessTask& task, uint ergo_workers); 254 }; 255 256 void PSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) 257 { 258 GCTaskQueue* q = GCTaskQueue::create(); 259 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 260 uint active_workers = manager->active_workers(); 261 262 assert(active_workers == ergo_workers, 263 "Ergonomically chosen workers (%u) must be equal to active workers (%u)", 264 ergo_workers, active_workers); 265 266 for(uint i=0; i < active_workers; i++) { 267 q->enqueue(new PSRefProcTaskProxy(task, i)); 268 } 269 TaskTerminator terminator(active_workers, 270 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 271 if (task.marks_oops_alive() && active_workers > 1) { 272 for (uint j = 0; j < active_workers; j++) { 273 q->enqueue(new StealTask(terminator.terminator())); 274 } 275 } 276 manager->execute_and_wait(q); 277 } 278 279 // This method contains all heap specific policy for invoking scavenge. 280 // PSScavenge::invoke_no_policy() will do nothing but attempt to 281 // scavenge. It will not clean up after failed promotions, bail out if 282 // we've exceeded policy time limits, or any other special behavior. 283 // All such policy should be placed here. 284 // 285 // Note that this method should only be called from the vm_thread while 286 // at a safepoint! 287 bool PSScavenge::invoke() { 288 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 289 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 290 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 291 292 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap(); 293 PSAdaptiveSizePolicy* policy = heap->size_policy(); 294 IsGCActiveMark mark; 295 296 const bool scavenge_done = PSScavenge::invoke_no_policy(); 297 const bool need_full_gc = !scavenge_done || 298 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 299 bool full_gc_done = false; 300 301 if (UsePerfData) { 302 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 303 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 304 counters->update_full_follows_scavenge(ffs_val); 305 } 306 307 if (need_full_gc) { 308 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 309 SoftRefPolicy* srp = heap->soft_ref_policy(); 310 const bool clear_all_softrefs = srp->should_clear_all_soft_refs(); 311 312 if (UseParallelOldGC) { 313 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 314 } else { 315 full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs); 316 } 317 } 318 319 return full_gc_done; 320 } 321 322 class PSThreadRootsTaskClosure : public ThreadClosure { 323 uint _worker_id; 324 public: 325 PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { } 326 virtual void do_thread(Thread* thread) { 327 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); 328 329 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id); 330 PSScavengeRootsClosure roots_closure(pm); 331 MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations); 332 333 thread->oops_do(&roots_closure, &roots_in_blobs); 334 335 // Do the real work 336 pm->drain_stacks(false); 337 } 338 }; 339 // 340 // OldToYoungRootsTask 341 // 342 // This task is used to scan old to young roots in parallel 343 // 344 // A GC thread executing this tasks divides the generation (old gen) 345 // into slices and takes a stripe in the slice as its part of the 346 // work. 347 // 348 // +===============+ slice 0 349 // | stripe 0 | 350 // +---------------+ 351 // | stripe 1 | 352 // +---------------+ 353 // | stripe 2 | 354 // +---------------+ 355 // | stripe 3 | 356 // +===============+ slice 1 357 // | stripe 0 | 358 // +---------------+ 359 // | stripe 1 | 360 // +---------------+ 361 // | stripe 2 | 362 // +---------------+ 363 // | stripe 3 | 364 // +===============+ slice 2 365 // ... 366 // 367 // A task is created for each stripe. In this case there are 4 tasks 368 // created. A GC thread first works on its stripe within slice 0 369 // and then moves to its stripe in the next slice until all stripes 370 // exceed the top of the generation. Note that having fewer GC threads 371 // than stripes works because all the tasks are executed so all stripes 372 // will be covered. In this example if 4 tasks have been created to cover 373 // all the stripes and there are only 3 threads, one of the threads will 374 // get the tasks with the 4th stripe. However, there is a dependence in 375 // PSCardTable::scavenge_contents_parallel() on the number 376 // of tasks created. In scavenge_contents_parallel the distance 377 // to the next stripe is calculated based on the number of tasks. 378 // If the stripe width is ssize, a task's next stripe is at 379 // ssize * number_of_tasks (= slice_stride). In this case after 380 // finishing stripe 0 in slice 0, the thread finds the stripe 0 in slice1 381 // by adding slice_stride to the start of stripe 0 in slice 0 to get 382 // to the start of stride 0 in slice 1. 383 384 class ScavengeRootsTask : public AbstractGangTask { 385 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 386 EnumClaimer<Parallel::RootType::Value> _enum_claimer; 387 PSOldGen* _old_gen; 388 HeapWord* _gen_top; 389 uint _active_workers; 390 bool _is_empty; 391 TaskTerminator _terminator; 392 393 public: 394 ScavengeRootsTask( 395 PSOldGen* old_gen, 396 HeapWord* gen_top, 397 uint active_workers, 398 bool is_empty) 399 : AbstractGangTask("ScavengeRootsTask"), 400 _strong_roots_scope(active_workers), 401 _enum_claimer(Parallel::RootType::sentinel), 402 _old_gen(old_gen), 403 _gen_top(gen_top), 404 _active_workers(active_workers), 405 _is_empty(is_empty), 406 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) { 407 } 408 409 virtual void work(uint worker_id) { 410 ResourceMark rm; 411 412 if (!_is_empty) { 413 // There are only old-to-young pointers if there are objects 414 // in the old gen. 415 416 // There are not old-to-young pointers if the old gen is empty. 417 assert(!_old_gen->object_space()->is_empty(), 418 "Should not be called is there is no work"); 419 assert(_old_gen != NULL, "Sanity"); 420 assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity"); 421 assert(worker_id < ParallelGCThreads, "Sanity"); 422 423 { 424 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 425 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 426 427 card_table->scavenge_contents_parallel(_old_gen->start_array(), 428 _old_gen->object_space(), 429 _gen_top, 430 pm, 431 worker_id, 432 _active_workers); 433 434 // Do the real work 435 pm->drain_stacks(false); 436 } 437 } 438 439 for (Parallel::RootType::Value root_type; _enum_claimer.try_claim(root_type); /* empty */) { 440 scavenge_roots_task(root_type, worker_id); 441 } 442 443 PSThreadRootsTaskClosure closure(worker_id); 444 Threads::possibly_parallel_threads_do(true /*parallel */, &closure); 445 446 447 // If active_workers can exceed 1, add a StrealTask. 448 // PSPromotionManager::drain_stacks_depth() does not fully drain its 449 // stacks and expects a StealTask to complete the draining if 450 // ParallelGCThreads is > 1. 451 452 if (_active_workers > 1) { 453 steal_task(*_terminator.terminator() , worker_id); 454 } 455 } 456 }; 457 458 // This method contains no policy. You should probably 459 // be calling invoke() instead. 460 bool PSScavenge::invoke_no_policy() { 461 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 462 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 463 464 _gc_timer.register_gc_start(); 465 466 TimeStamp scavenge_entry; 467 TimeStamp scavenge_midpoint; 468 TimeStamp scavenge_exit; 469 470 scavenge_entry.update(); 471 472 if (GCLocker::check_active_before_gc()) { 473 return false; 474 } 475 476 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 477 GCCause::Cause gc_cause = heap->gc_cause(); 478 479 // Check for potential problems. 480 if (!should_attempt_scavenge()) { 481 return false; 482 } 483 484 GCIdMark gc_id_mark; 485 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 486 487 bool promotion_failure_occurred = false; 488 489 PSYoungGen* young_gen = heap->young_gen(); 490 PSOldGen* old_gen = heap->old_gen(); 491 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 492 493 heap->increment_total_collections(); 494 495 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 496 // Gather the feedback data for eden occupancy. 497 young_gen->eden_space()->accumulate_statistics(); 498 } 499 500 heap->print_heap_before_gc(); 501 heap->trace_heap_before_gc(&_gc_tracer); 502 503 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 504 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 505 506 // Fill in TLABs 507 heap->ensure_parsability(true); // retire TLABs 508 509 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 510 HandleMark hm; // Discard invalid handles created during verification 511 Universe::verify("Before GC"); 512 } 513 514 { 515 ResourceMark rm; 516 HandleMark hm; 517 518 GCTraceCPUTime tcpu; 519 GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true); 520 TraceCollectorStats tcs(counters()); 521 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause); 522 523 if (log_is_enabled(Debug, gc, heap, exit)) { 524 accumulated_time()->start(); 525 } 526 527 // Let the size policy know we're starting 528 size_policy->minor_collection_begin(); 529 530 // Verify the object start arrays. 531 if (VerifyObjectStartArray && 532 VerifyBeforeGC) { 533 old_gen->verify_object_start_array(); 534 } 535 536 // Verify no unmarked old->young roots 537 if (VerifyRememberedSets) { 538 heap->card_table()->verify_all_young_refs_imprecise(); 539 } 540 541 assert(young_gen->to_space()->is_empty(), 542 "Attempt to scavenge with live objects in to_space"); 543 young_gen->to_space()->clear(SpaceDecorator::Mangle); 544 545 save_to_space_top_before_gc(); 546 547 #if COMPILER2_OR_JVMCI 548 DerivedPointerTable::clear(); 549 #endif 550 551 reference_processor()->enable_discovery(); 552 reference_processor()->setup_policy(false); 553 554 PreGCValues pre_gc_values(heap); 555 556 // Reset our survivor overflow. 557 set_survivor_overflow(false); 558 559 // We need to save the old top values before 560 // creating the promotion_manager. We pass the top 561 // values to the card_table, to prevent it from 562 // straying into the promotion labs. 563 HeapWord* old_top = old_gen->object_space()->top(); 564 565 uint active_workers = ParallelScavengeHeap::heap()->workers().update_active_workers(WorkerPolicy::calc_active_workers( 566 ParallelScavengeHeap::heap()->workers().total_workers(), 567 ParallelScavengeHeap::heap()->workers().active_workers(), 568 Threads::number_of_non_daemon_threads())); 569 570 // Release all previously held resources 571 gc_task_manager()->release_all_resources(); 572 573 // Set the number of GC threads to be used in this collection 574 gc_task_manager()->set_active_gang(); 575 gc_task_manager()->task_idle_workers(); 576 577 assert(active_workers == gc_task_manager()->active_workers(), "sanity, taskmanager and workgang ought to agree"); 578 579 PSPromotionManager::pre_scavenge(); 580 581 // We'll use the promotion manager again later. 582 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 583 { 584 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer); 585 586 ScavengeRootsTask task(old_gen, old_top, active_workers, old_gen->object_space()->is_empty()); 587 ParallelScavengeHeap::heap()->workers().run_task(&task); 588 } 589 590 scavenge_midpoint.update(); 591 592 // Process reference objects discovered during scavenge 593 { 594 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 595 596 reference_processor()->setup_policy(false); // not always_clear 597 reference_processor()->set_active_mt_degree(active_workers); 598 PSKeepAliveClosure keep_alive(promotion_manager); 599 PSEvacuateFollowersClosure evac_followers(promotion_manager); 600 ReferenceProcessorStats stats; 601 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues()); 602 if (reference_processor()->processing_is_mt()) { 603 PSRefProcTaskExecutor task_executor; 604 stats = reference_processor()->process_discovered_references( 605 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 606 &pt); 607 } else { 608 stats = reference_processor()->process_discovered_references( 609 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &pt); 610 } 611 612 _gc_tracer.report_gc_reference_stats(stats); 613 pt.print_all_references(); 614 } 615 616 assert(promotion_manager->stacks_empty(),"stacks should be empty at this point"); 617 618 PSScavengeRootsClosure root_closure(promotion_manager); 619 620 { 621 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 622 WeakProcessor::weak_oops_do(&_is_alive_closure, &root_closure); 623 } 624 625 // Verify that usage of root_closure didn't copy any objects. 626 assert(promotion_manager->stacks_empty(),"stacks should be empty at this point"); 627 628 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 629 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 630 if (promotion_failure_occurred) { 631 clean_up_failed_promotion(); 632 log_info(gc, promotion)("Promotion failed"); 633 } 634 635 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 636 637 // Let the size policy know we're done. Note that we count promotion 638 // failure cleanup time as part of the collection (otherwise, we're 639 // implicitly saying it's mutator time). 640 size_policy->minor_collection_end(gc_cause); 641 642 if (!promotion_failure_occurred) { 643 // Swap the survivor spaces. 644 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 645 young_gen->from_space()->clear(SpaceDecorator::Mangle); 646 young_gen->swap_spaces(); 647 648 size_t survived = young_gen->from_space()->used_in_bytes(); 649 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used(); 650 size_policy->update_averages(_survivor_overflow, survived, promoted); 651 652 // A successful scavenge should restart the GC time limit count which is 653 // for full GC's. 654 size_policy->reset_gc_overhead_limit_count(); 655 if (UseAdaptiveSizePolicy) { 656 // Calculate the new survivor size and tenuring threshold 657 658 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 659 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, 660 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 661 662 if (UsePerfData) { 663 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 664 counters->update_old_eden_size( 665 size_policy->calculated_eden_size_in_bytes()); 666 counters->update_old_promo_size( 667 size_policy->calculated_promo_size_in_bytes()); 668 counters->update_old_capacity(old_gen->capacity_in_bytes()); 669 counters->update_young_capacity(young_gen->capacity_in_bytes()); 670 counters->update_survived(survived); 671 counters->update_promoted(promoted); 672 counters->update_survivor_overflowed(_survivor_overflow); 673 } 674 675 size_t max_young_size = young_gen->max_size(); 676 677 // Deciding a free ratio in the young generation is tricky, so if 678 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 679 // that the old generation size may have been limited because of them) we 680 // should then limit our young generation size using NewRatio to have it 681 // follow the old generation size. 682 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 683 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); 684 } 685 686 size_t survivor_limit = 687 size_policy->max_survivor_size(max_young_size); 688 _tenuring_threshold = 689 size_policy->compute_survivor_space_size_and_threshold( 690 _survivor_overflow, 691 _tenuring_threshold, 692 survivor_limit); 693 694 log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")", 695 size_policy->calculated_survivor_size_in_bytes(), 696 _tenuring_threshold, MaxTenuringThreshold); 697 698 if (UsePerfData) { 699 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 700 counters->update_tenuring_threshold(_tenuring_threshold); 701 counters->update_survivor_size_counters(); 702 } 703 704 // Do call at minor collections? 705 // Don't check if the size_policy is ready at this 706 // level. Let the size_policy check that internally. 707 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 708 (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) { 709 // Calculate optimal free space amounts 710 assert(young_gen->max_size() > 711 young_gen->from_space()->capacity_in_bytes() + 712 young_gen->to_space()->capacity_in_bytes(), 713 "Sizes of space in young gen are out-of-bounds"); 714 715 size_t young_live = young_gen->used_in_bytes(); 716 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 717 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 718 size_t max_old_gen_size = old_gen->max_gen_size(); 719 size_t max_eden_size = max_young_size - 720 young_gen->from_space()->capacity_in_bytes() - 721 young_gen->to_space()->capacity_in_bytes(); 722 723 // Used for diagnostics 724 size_policy->clear_generation_free_space_flags(); 725 726 size_policy->compute_eden_space_size(young_live, 727 eden_live, 728 cur_eden, 729 max_eden_size, 730 false /* not full gc*/); 731 732 size_policy->check_gc_overhead_limit(eden_live, 733 max_old_gen_size, 734 max_eden_size, 735 false /* not full gc*/, 736 gc_cause, 737 heap->soft_ref_policy()); 738 739 size_policy->decay_supplemental_growth(false /* not full gc*/); 740 } 741 // Resize the young generation at every collection 742 // even if new sizes have not been calculated. This is 743 // to allow resizes that may have been inhibited by the 744 // relative location of the "to" and "from" spaces. 745 746 // Resizing the old gen at young collections can cause increases 747 // that don't feed back to the generation sizing policy until 748 // a full collection. Don't resize the old gen here. 749 750 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 751 size_policy->calculated_survivor_size_in_bytes()); 752 753 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 754 } 755 756 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 757 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 758 // Also update() will case adaptive NUMA chunk resizing. 759 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 760 young_gen->eden_space()->update(); 761 762 heap->gc_policy_counters()->update_counters(); 763 764 heap->resize_all_tlabs(); 765 766 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 767 } 768 769 #if COMPILER2_OR_JVMCI 770 DerivedPointerTable::update_pointers(); 771 #endif 772 773 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 774 775 // Re-verify object start arrays 776 if (VerifyObjectStartArray && 777 VerifyAfterGC) { 778 old_gen->verify_object_start_array(); 779 } 780 781 // Verify all old -> young cards are now precise 782 if (VerifyRememberedSets) { 783 // Precise verification will give false positives. Until this is fixed, 784 // use imprecise verification. 785 // heap->card_table()->verify_all_young_refs_precise(); 786 heap->card_table()->verify_all_young_refs_imprecise(); 787 } 788 789 if (log_is_enabled(Debug, gc, heap, exit)) { 790 accumulated_time()->stop(); 791 } 792 793 young_gen->print_used_change(pre_gc_values.young_gen_used()); 794 old_gen->print_used_change(pre_gc_values.old_gen_used()); 795 MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used()); 796 797 // Track memory usage and detect low memory 798 MemoryService::track_memory_usage(); 799 heap->update_counters(); 800 801 gc_task_manager()->release_idle_workers(); 802 } 803 804 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 805 HandleMark hm; // Discard invalid handles created during verification 806 Universe::verify("After GC"); 807 } 808 809 heap->print_heap_after_gc(); 810 heap->trace_heap_after_gc(&_gc_tracer); 811 812 scavenge_exit.update(); 813 814 log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT, 815 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 816 scavenge_exit.ticks()); 817 gc_task_manager()->print_task_time_stamps(); 818 819 #ifdef TRACESPINNING 820 ParallelTaskTerminator::print_termination_counts(); 821 #endif 822 823 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 824 825 _gc_timer.register_gc_end(); 826 827 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 828 829 return !promotion_failure_occurred; 830 } 831 832 // This method iterates over all objects in the young generation, 833 // removing all forwarding references. It then restores any preserved marks. 834 void PSScavenge::clean_up_failed_promotion() { 835 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 836 PSYoungGen* young_gen = heap->young_gen(); 837 838 RemoveForwardedPointerClosure remove_fwd_ptr_closure; 839 young_gen->object_iterate(&remove_fwd_ptr_closure); 840 841 PSPromotionManager::restore_preserved_marks(); 842 843 // Reset the PromotionFailureALot counters. 844 NOT_PRODUCT(heap->reset_promotion_should_fail();) 845 } 846 847 bool PSScavenge::should_attempt_scavenge() { 848 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 849 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 850 851 if (UsePerfData) { 852 counters->update_scavenge_skipped(not_skipped); 853 } 854 855 PSYoungGen* young_gen = heap->young_gen(); 856 PSOldGen* old_gen = heap->old_gen(); 857 858 // Do not attempt to promote unless to_space is empty 859 if (!young_gen->to_space()->is_empty()) { 860 _consecutive_skipped_scavenges++; 861 if (UsePerfData) { 862 counters->update_scavenge_skipped(to_space_not_empty); 863 } 864 return false; 865 } 866 867 // Test to see if the scavenge will likely fail. 868 PSAdaptiveSizePolicy* policy = heap->size_policy(); 869 870 // A similar test is done in the policy's should_full_GC(). If this is 871 // changed, decide if that test should also be changed. 872 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 873 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 874 bool result = promotion_estimate < old_gen->free_in_bytes(); 875 876 log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT, 877 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), 878 (size_t) policy->padded_average_promoted_in_bytes(), 879 old_gen->free_in_bytes()); 880 if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) { 881 log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 882 } 883 884 if (result) { 885 _consecutive_skipped_scavenges = 0; 886 } else { 887 _consecutive_skipped_scavenges++; 888 if (UsePerfData) { 889 counters->update_scavenge_skipped(promoted_too_large); 890 } 891 } 892 return result; 893 } 894 895 // Used to add tasks 896 GCTaskManager* const PSScavenge::gc_task_manager() { 897 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 898 "shouldn't return NULL"); 899 return ParallelScavengeHeap::gc_task_manager(); 900 } 901 902 // Adaptive size policy support. When the young generation/old generation 903 // boundary moves, _young_generation_boundary must be reset 904 void PSScavenge::set_young_generation_boundary(HeapWord* v) { 905 _young_generation_boundary = v; 906 if (UseCompressedOops) { 907 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v); 908 } 909 } 910 911 void PSScavenge::initialize() { 912 // Arguments must have been parsed 913 914 if (AlwaysTenure || NeverTenure) { 915 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1, 916 "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold); 917 _tenuring_threshold = MaxTenuringThreshold; 918 } else { 919 // We want to smooth out our startup times for the AdaptiveSizePolicy 920 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 921 MaxTenuringThreshold; 922 } 923 924 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 925 PSYoungGen* young_gen = heap->young_gen(); 926 PSOldGen* old_gen = heap->old_gen(); 927 928 // Set boundary between young_gen and old_gen 929 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 930 "old above young"); 931 set_young_generation_boundary(young_gen->eden_space()->bottom()); 932 933 // Initialize ref handling object for scavenging. 934 _span_based_discoverer.set_span(young_gen->reserved()); 935 _ref_processor = 936 new ReferenceProcessor(&_span_based_discoverer, 937 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 938 ParallelGCThreads, // mt processing degree 939 true, // mt discovery 940 ParallelGCThreads, // mt discovery degree 941 true, // atomic_discovery 942 NULL, // header provides liveness info 943 false); 944 945 // Cache the cardtable 946 _card_table = heap->card_table(); 947 948 _counters = new CollectorCounters("Parallel young collection pauses", 0); 949 }