1 /* 2 * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "classfile/stringTable.hpp" 29 #include "code/codeCache.hpp" 30 #include "gc/parallel/parallelScavengeHeap.hpp" 31 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 32 #include "gc/parallel/psClosure.inline.hpp" 33 #include "gc/parallel/psCompactionManager.hpp" 34 #include "gc/parallel/psParallelCompact.inline.hpp" 35 #include "gc/parallel/psPromotionManager.inline.hpp" 36 #include "gc/parallel/psRootType.hpp" 37 #include "gc/parallel/psScavenge.inline.hpp" 38 #include "gc/shared/gcCause.hpp" 39 #include "gc/shared/gcHeapSummary.hpp" 40 #include "gc/shared/gcId.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/isGCActiveMark.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/referenceProcessor.hpp" 48 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 49 #include "gc/shared/scavengableNMethods.hpp" 50 #include "gc/shared/spaceDecorator.inline.hpp" 51 #include "gc/shared/weakProcessor.hpp" 52 #include "gc/shared/workerPolicy.hpp" 53 #include "gc/shared/workgroup.hpp" 54 #include "memory/iterator.hpp" 55 #include "memory/resourceArea.hpp" 56 #include "memory/universe.hpp" 57 #include "logging/log.hpp" 58 #include "oops/access.inline.hpp" 59 #include "oops/compressedOops.inline.hpp" 60 #include "oops/oop.inline.hpp" 61 #include "runtime/biasedLocking.hpp" 62 #include "runtime/handles.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/vmThread.hpp" 65 #include "runtime/vmOperations.hpp" 66 #include "services/management.hpp" 67 #include "services/memoryService.hpp" 68 #include "utilities/stack.inline.hpp" 69 70 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 71 int PSScavenge::_consecutive_skipped_scavenges = 0; 72 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer; 73 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 74 PSCardTable* PSScavenge::_card_table = NULL; 75 bool PSScavenge::_survivor_overflow = false; 76 uint PSScavenge::_tenuring_threshold = 0; 77 HeapWord* PSScavenge::_young_generation_boundary = NULL; 78 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 79 elapsedTimer PSScavenge::_accumulated_time; 80 STWGCTimer PSScavenge::_gc_timer; 81 ParallelScavengeTracer PSScavenge::_gc_tracer; 82 CollectorCounters* PSScavenge::_counters = NULL; 83 84 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) { 85 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); 86 87 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 88 PSScavengeRootsClosure roots_closure(pm); 89 PSPromoteRootsClosure roots_to_old_closure(pm); 90 91 switch (root_type) { 92 case ParallelRootType::universe: 93 Universe::oops_do(&roots_closure); 94 break; 95 96 case ParallelRootType::jni_handles: 97 JNIHandles::oops_do(&roots_closure); 98 break; 99 100 case ParallelRootType::object_synchronizer: 101 ObjectSynchronizer::oops_do(&roots_closure); 102 break; 103 104 case ParallelRootType::system_dictionary: 105 SystemDictionary::oops_do(&roots_closure); 106 break; 107 108 case ParallelRootType::class_loader_data: 109 { 110 PSScavengeCLDClosure cld_closure(pm); 111 ClassLoaderDataGraph::cld_do(&cld_closure); 112 } 113 break; 114 115 case ParallelRootType::management: 116 Management::oops_do(&roots_closure); 117 break; 118 119 case ParallelRootType::jvmti: 120 JvmtiExport::oops_do(&roots_closure); 121 break; 122 123 case ParallelRootType::code_cache: 124 { 125 MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations); 126 ScavengableNMethods::nmethods_do(&code_closure); 127 AOTLoader::oops_do(&roots_closure); 128 } 129 break; 130 131 case ParallelRootType::sentinel: 132 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds 133 fatal("Bad enumeration value: %u", root_type); 134 break; 135 } 136 137 // Do the real work 138 pm->drain_stacks(false); 139 } 140 141 static void steal_work(ParallelTaskTerminator& terminator, uint worker_id) { 142 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); 143 144 PSPromotionManager* pm = 145 PSPromotionManager::gc_thread_promotion_manager(worker_id); 146 pm->drain_stacks(true); 147 guarantee(pm->stacks_empty(), 148 "stacks should be empty at this point"); 149 150 while (true) { 151 StarTask p; 152 if (PSPromotionManager::steal_depth(worker_id, p)) { 153 TASKQUEUE_STATS_ONLY(pm->record_steal(p)); 154 pm->process_popped_location_depth(p); 155 pm->drain_stacks_depth(true); 156 } else { 157 if (terminator.offer_termination()) { 158 break; 159 } 160 } 161 } 162 guarantee(pm->stacks_empty(), "stacks should be empty at this point"); 163 } 164 165 // Define before use 166 class PSIsAliveClosure: public BoolObjectClosure { 167 public: 168 bool do_object_b(oop p) { 169 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 170 } 171 }; 172 173 PSIsAliveClosure PSScavenge::_is_alive_closure; 174 175 class PSKeepAliveClosure: public OopClosure { 176 protected: 177 MutableSpace* _to_space; 178 PSPromotionManager* _promotion_manager; 179 180 public: 181 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 182 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 183 _to_space = heap->young_gen()->to_space(); 184 185 assert(_promotion_manager != NULL, "Sanity"); 186 } 187 188 template <class T> void do_oop_work(T* p) { 189 assert (oopDesc::is_oop(RawAccess<IS_NOT_NULL>::oop_load(p)), 190 "expected an oop while scanning weak refs"); 191 192 // Weak refs may be visited more than once. 193 if (PSScavenge::should_scavenge(p, _to_space)) { 194 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p); 195 } 196 } 197 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 198 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 199 }; 200 201 class PSEvacuateFollowersClosure: public VoidClosure { 202 private: 203 PSPromotionManager* _promotion_manager; 204 public: 205 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 206 207 virtual void do_void() { 208 assert(_promotion_manager != NULL, "Sanity"); 209 _promotion_manager->drain_stacks(true); 210 guarantee(_promotion_manager->stacks_empty(), 211 "stacks should be empty at this point"); 212 } 213 }; 214 215 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 216 virtual void execute(ProcessTask& process_task, uint ergo_workers); 217 }; 218 219 class PSRefProcTask : public AbstractGangTask { 220 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 221 TaskTerminator _terminator; 222 ProcessTask& _task; 223 uint _active_workers; 224 225 public: 226 PSRefProcTask(ProcessTask& task, uint active_workers) 227 : AbstractGangTask("PSRefProcTask"), 228 _terminator(active_workers, PSPromotionManager::stack_array_depth()), 229 _task(task), 230 _active_workers(active_workers) { 231 } 232 233 virtual void work(uint worker_id) { 234 PSPromotionManager* promotion_manager = 235 PSPromotionManager::gc_thread_promotion_manager(worker_id); 236 assert(promotion_manager != NULL, "sanity check"); 237 PSKeepAliveClosure keep_alive(promotion_manager); 238 PSEvacuateFollowersClosure evac_followers(promotion_manager); 239 PSIsAliveClosure is_alive; 240 _task.work(worker_id, is_alive, keep_alive, evac_followers); 241 242 if (_task.marks_oops_alive() && _active_workers > 1) { 243 steal_work(*_terminator.terminator(), worker_id); 244 } 245 } 246 }; 247 248 void PSRefProcTaskExecutor::execute(ProcessTask& process_task, uint ergo_workers) { 249 PSRefProcTask task(process_task, ergo_workers); 250 ParallelScavengeHeap::heap()->workers().run_task(&task); 251 } 252 253 // This method contains all heap specific policy for invoking scavenge. 254 // PSScavenge::invoke_no_policy() will do nothing but attempt to 255 // scavenge. It will not clean up after failed promotions, bail out if 256 // we've exceeded policy time limits, or any other special behavior. 257 // All such policy should be placed here. 258 // 259 // Note that this method should only be called from the vm_thread while 260 // at a safepoint! 261 bool PSScavenge::invoke() { 262 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 263 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 264 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 265 266 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap(); 267 PSAdaptiveSizePolicy* policy = heap->size_policy(); 268 IsGCActiveMark mark; 269 270 const bool scavenge_done = PSScavenge::invoke_no_policy(); 271 const bool need_full_gc = !scavenge_done || 272 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 273 bool full_gc_done = false; 274 275 if (UsePerfData) { 276 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 277 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 278 counters->update_full_follows_scavenge(ffs_val); 279 } 280 281 if (need_full_gc) { 282 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 283 SoftRefPolicy* srp = heap->soft_ref_policy(); 284 const bool clear_all_softrefs = srp->should_clear_all_soft_refs(); 285 286 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 287 } 288 289 return full_gc_done; 290 } 291 292 class PSThreadRootsTaskClosure : public ThreadClosure { 293 uint _worker_id; 294 public: 295 PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { } 296 virtual void do_thread(Thread* thread) { 297 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); 298 299 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id); 300 PSScavengeRootsClosure roots_closure(pm); 301 MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations); 302 303 thread->oops_do(&roots_closure, &roots_in_blobs); 304 305 // Do the real work 306 pm->drain_stacks(false); 307 } 308 }; 309 310 class ScavengeRootsTask : public AbstractGangTask { 311 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 312 SequentialSubTasksDone _subtasks; 313 PSOldGen* _old_gen; 314 HeapWord* _gen_top; 315 uint _active_workers; 316 bool _is_empty; 317 TaskTerminator _terminator; 318 319 public: 320 ScavengeRootsTask(PSOldGen* old_gen, 321 HeapWord* gen_top, 322 uint active_workers, 323 bool is_empty) : 324 AbstractGangTask("ScavengeRootsTask"), 325 _strong_roots_scope(active_workers), 326 _subtasks(), 327 _old_gen(old_gen), 328 _gen_top(gen_top), 329 _active_workers(active_workers), 330 _is_empty(is_empty), 331 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) { 332 _subtasks.set_n_threads(active_workers); 333 _subtasks.set_n_tasks(ParallelRootType::sentinel); 334 } 335 336 virtual void work(uint worker_id) { 337 ResourceMark rm; 338 339 if (!_is_empty) { 340 // There are only old-to-young pointers if there are objects 341 // in the old gen. 342 343 assert(_old_gen != NULL, "Sanity"); 344 // There are no old-to-young pointers if the old gen is empty. 345 assert(!_old_gen->object_space()->is_empty(), "Should not be called is there is no work"); 346 assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity"); 347 assert(worker_id < ParallelGCThreads, "Sanity"); 348 349 { 350 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 351 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 352 353 card_table->scavenge_contents_parallel(_old_gen->start_array(), 354 _old_gen->object_space(), 355 _gen_top, 356 pm, 357 worker_id, 358 _active_workers); 359 360 // Do the real work 361 pm->drain_stacks(false); 362 } 363 } 364 365 for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) { 366 scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id); 367 } 368 _subtasks.all_tasks_completed(); 369 370 PSThreadRootsTaskClosure closure(worker_id); 371 Threads::possibly_parallel_threads_do(true /*parallel */, &closure); 372 373 374 // If active_workers can exceed 1, add a steal_work(). 375 // PSPromotionManager::drain_stacks_depth() does not fully drain its 376 // stacks and expects a steal_work() to complete the draining if 377 // ParallelGCThreads is > 1. 378 379 if (_active_workers > 1) { 380 steal_work(*_terminator.terminator() , worker_id); 381 } 382 } 383 }; 384 385 // This method contains no policy. You should probably 386 // be calling invoke() instead. 387 bool PSScavenge::invoke_no_policy() { 388 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 389 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 390 391 _gc_timer.register_gc_start(); 392 393 TimeStamp scavenge_entry; 394 TimeStamp scavenge_midpoint; 395 TimeStamp scavenge_exit; 396 397 scavenge_entry.update(); 398 399 if (GCLocker::check_active_before_gc()) { 400 return false; 401 } 402 403 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 404 GCCause::Cause gc_cause = heap->gc_cause(); 405 406 // Check for potential problems. 407 if (!should_attempt_scavenge()) { 408 return false; 409 } 410 411 GCIdMark gc_id_mark; 412 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 413 414 bool promotion_failure_occurred = false; 415 416 PSYoungGen* young_gen = heap->young_gen(); 417 PSOldGen* old_gen = heap->old_gen(); 418 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 419 420 heap->increment_total_collections(); 421 422 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 423 // Gather the feedback data for eden occupancy. 424 young_gen->eden_space()->accumulate_statistics(); 425 } 426 427 heap->print_heap_before_gc(); 428 heap->trace_heap_before_gc(&_gc_tracer); 429 430 assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity"); 431 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 432 433 // Fill in TLABs 434 heap->ensure_parsability(true); // retire TLABs 435 436 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 437 HandleMark hm; // Discard invalid handles created during verification 438 Universe::verify("Before GC"); 439 } 440 441 { 442 ResourceMark rm; 443 HandleMark hm; 444 445 GCTraceCPUTime tcpu; 446 GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true); 447 TraceCollectorStats tcs(counters()); 448 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause); 449 450 if (log_is_enabled(Debug, gc, heap, exit)) { 451 accumulated_time()->start(); 452 } 453 454 // Let the size policy know we're starting 455 size_policy->minor_collection_begin(); 456 457 // Verify the object start arrays. 458 if (VerifyObjectStartArray && 459 VerifyBeforeGC) { 460 old_gen->verify_object_start_array(); 461 } 462 463 // Verify no unmarked old->young roots 464 if (VerifyRememberedSets) { 465 heap->card_table()->verify_all_young_refs_imprecise(); 466 } 467 468 assert(young_gen->to_space()->is_empty(), 469 "Attempt to scavenge with live objects in to_space"); 470 young_gen->to_space()->clear(SpaceDecorator::Mangle); 471 472 save_to_space_top_before_gc(); 473 474 #if COMPILER2_OR_JVMCI 475 DerivedPointerTable::clear(); 476 #endif 477 478 reference_processor()->enable_discovery(); 479 reference_processor()->setup_policy(false); 480 481 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); 482 483 // Reset our survivor overflow. 484 set_survivor_overflow(false); 485 486 // We need to save the old top values before 487 // creating the promotion_manager. We pass the top 488 // values to the card_table, to prevent it from 489 // straying into the promotion labs. 490 HeapWord* old_top = old_gen->object_space()->top(); 491 492 const uint active_workers = 493 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(), 494 ParallelScavengeHeap::heap()->workers().active_workers(), 495 Threads::number_of_non_daemon_threads()); 496 ParallelScavengeHeap::heap()->workers().update_active_workers(active_workers); 497 498 PSPromotionManager::pre_scavenge(); 499 500 // We'll use the promotion manager again later. 501 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 502 { 503 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer); 504 505 ScavengeRootsTask task(old_gen, old_top, active_workers, old_gen->object_space()->is_empty()); 506 ParallelScavengeHeap::heap()->workers().run_task(&task); 507 } 508 509 scavenge_midpoint.update(); 510 511 // Process reference objects discovered during scavenge 512 { 513 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 514 515 reference_processor()->setup_policy(false); // not always_clear 516 reference_processor()->set_active_mt_degree(active_workers); 517 PSKeepAliveClosure keep_alive(promotion_manager); 518 PSEvacuateFollowersClosure evac_followers(promotion_manager); 519 ReferenceProcessorStats stats; 520 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues()); 521 if (reference_processor()->processing_is_mt()) { 522 PSRefProcTaskExecutor task_executor; 523 stats = reference_processor()->process_discovered_references( 524 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 525 &pt); 526 } else { 527 stats = reference_processor()->process_discovered_references( 528 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &pt); 529 } 530 531 _gc_tracer.report_gc_reference_stats(stats); 532 pt.print_all_references(); 533 } 534 535 assert(promotion_manager->stacks_empty(),"stacks should be empty at this point"); 536 537 PSScavengeRootsClosure root_closure(promotion_manager); 538 539 { 540 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 541 WeakProcessor::weak_oops_do(&_is_alive_closure, &root_closure); 542 } 543 544 // Verify that usage of root_closure didn't copy any objects. 545 assert(promotion_manager->stacks_empty(),"stacks should be empty at this point"); 546 547 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 548 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 549 if (promotion_failure_occurred) { 550 clean_up_failed_promotion(); 551 log_info(gc, promotion)("Promotion failed"); 552 } 553 554 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 555 556 // Let the size policy know we're done. Note that we count promotion 557 // failure cleanup time as part of the collection (otherwise, we're 558 // implicitly saying it's mutator time). 559 size_policy->minor_collection_end(gc_cause); 560 561 if (!promotion_failure_occurred) { 562 // Swap the survivor spaces. 563 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 564 young_gen->from_space()->clear(SpaceDecorator::Mangle); 565 young_gen->swap_spaces(); 566 567 size_t survived = young_gen->from_space()->used_in_bytes(); 568 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used(); 569 size_policy->update_averages(_survivor_overflow, survived, promoted); 570 571 // A successful scavenge should restart the GC time limit count which is 572 // for full GC's. 573 size_policy->reset_gc_overhead_limit_count(); 574 if (UseAdaptiveSizePolicy) { 575 // Calculate the new survivor size and tenuring threshold 576 577 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 578 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, 579 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 580 581 if (UsePerfData) { 582 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 583 counters->update_old_eden_size( 584 size_policy->calculated_eden_size_in_bytes()); 585 counters->update_old_promo_size( 586 size_policy->calculated_promo_size_in_bytes()); 587 counters->update_old_capacity(old_gen->capacity_in_bytes()); 588 counters->update_young_capacity(young_gen->capacity_in_bytes()); 589 counters->update_survived(survived); 590 counters->update_promoted(promoted); 591 counters->update_survivor_overflowed(_survivor_overflow); 592 } 593 594 size_t max_young_size = young_gen->max_size(); 595 596 // Deciding a free ratio in the young generation is tricky, so if 597 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 598 // that the old generation size may have been limited because of them) we 599 // should then limit our young generation size using NewRatio to have it 600 // follow the old generation size. 601 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 602 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); 603 } 604 605 size_t survivor_limit = 606 size_policy->max_survivor_size(max_young_size); 607 _tenuring_threshold = 608 size_policy->compute_survivor_space_size_and_threshold( 609 _survivor_overflow, 610 _tenuring_threshold, 611 survivor_limit); 612 613 log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")", 614 size_policy->calculated_survivor_size_in_bytes(), 615 _tenuring_threshold, MaxTenuringThreshold); 616 617 if (UsePerfData) { 618 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 619 counters->update_tenuring_threshold(_tenuring_threshold); 620 counters->update_survivor_size_counters(); 621 } 622 623 // Do call at minor collections? 624 // Don't check if the size_policy is ready at this 625 // level. Let the size_policy check that internally. 626 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 627 (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) { 628 // Calculate optimal free space amounts 629 assert(young_gen->max_size() > 630 young_gen->from_space()->capacity_in_bytes() + 631 young_gen->to_space()->capacity_in_bytes(), 632 "Sizes of space in young gen are out-of-bounds"); 633 634 size_t young_live = young_gen->used_in_bytes(); 635 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 636 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 637 size_t max_old_gen_size = old_gen->max_gen_size(); 638 size_t max_eden_size = max_young_size - 639 young_gen->from_space()->capacity_in_bytes() - 640 young_gen->to_space()->capacity_in_bytes(); 641 642 // Used for diagnostics 643 size_policy->clear_generation_free_space_flags(); 644 645 size_policy->compute_eden_space_size(young_live, 646 eden_live, 647 cur_eden, 648 max_eden_size, 649 false /* not full gc*/); 650 651 size_policy->check_gc_overhead_limit(eden_live, 652 max_old_gen_size, 653 max_eden_size, 654 false /* not full gc*/, 655 gc_cause, 656 heap->soft_ref_policy()); 657 658 size_policy->decay_supplemental_growth(false /* not full gc*/); 659 } 660 // Resize the young generation at every collection 661 // even if new sizes have not been calculated. This is 662 // to allow resizes that may have been inhibited by the 663 // relative location of the "to" and "from" spaces. 664 665 // Resizing the old gen at young collections can cause increases 666 // that don't feed back to the generation sizing policy until 667 // a full collection. Don't resize the old gen here. 668 669 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 670 size_policy->calculated_survivor_size_in_bytes()); 671 672 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 673 } 674 675 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 676 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 677 // Also update() will case adaptive NUMA chunk resizing. 678 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 679 young_gen->eden_space()->update(); 680 681 heap->gc_policy_counters()->update_counters(); 682 683 heap->resize_all_tlabs(); 684 685 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 686 } 687 688 #if COMPILER2_OR_JVMCI 689 DerivedPointerTable::update_pointers(); 690 #endif 691 692 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 693 694 // Re-verify object start arrays 695 if (VerifyObjectStartArray && 696 VerifyAfterGC) { 697 old_gen->verify_object_start_array(); 698 } 699 700 // Verify all old -> young cards are now precise 701 if (VerifyRememberedSets) { 702 // Precise verification will give false positives. Until this is fixed, 703 // use imprecise verification. 704 // heap->card_table()->verify_all_young_refs_precise(); 705 heap->card_table()->verify_all_young_refs_imprecise(); 706 } 707 708 if (log_is_enabled(Debug, gc, heap, exit)) { 709 accumulated_time()->stop(); 710 } 711 712 heap->print_heap_change(pre_gc_values); 713 714 // Track memory usage and detect low memory 715 MemoryService::track_memory_usage(); 716 heap->update_counters(); 717 } 718 719 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 720 HandleMark hm; // Discard invalid handles created during verification 721 Universe::verify("After GC"); 722 } 723 724 heap->print_heap_after_gc(); 725 heap->trace_heap_after_gc(&_gc_tracer); 726 727 scavenge_exit.update(); 728 729 log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT, 730 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 731 scavenge_exit.ticks()); 732 733 #ifdef TRACESPINNING 734 ParallelTaskTerminator::print_termination_counts(); 735 #endif 736 737 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 738 739 _gc_timer.register_gc_end(); 740 741 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 742 743 return !promotion_failure_occurred; 744 } 745 746 // This method iterates over all objects in the young generation, 747 // removing all forwarding references. It then restores any preserved marks. 748 void PSScavenge::clean_up_failed_promotion() { 749 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 750 PSYoungGen* young_gen = heap->young_gen(); 751 752 RemoveForwardedPointerClosure remove_fwd_ptr_closure; 753 young_gen->object_iterate(&remove_fwd_ptr_closure); 754 755 PSPromotionManager::restore_preserved_marks(); 756 757 // Reset the PromotionFailureALot counters. 758 NOT_PRODUCT(heap->reset_promotion_should_fail();) 759 } 760 761 bool PSScavenge::should_attempt_scavenge() { 762 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 763 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 764 765 if (UsePerfData) { 766 counters->update_scavenge_skipped(not_skipped); 767 } 768 769 PSYoungGen* young_gen = heap->young_gen(); 770 PSOldGen* old_gen = heap->old_gen(); 771 772 // Do not attempt to promote unless to_space is empty 773 if (!young_gen->to_space()->is_empty()) { 774 _consecutive_skipped_scavenges++; 775 if (UsePerfData) { 776 counters->update_scavenge_skipped(to_space_not_empty); 777 } 778 return false; 779 } 780 781 // Test to see if the scavenge will likely fail. 782 PSAdaptiveSizePolicy* policy = heap->size_policy(); 783 784 // A similar test is done in the policy's should_full_GC(). If this is 785 // changed, decide if that test should also be changed. 786 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 787 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 788 bool result = promotion_estimate < old_gen->free_in_bytes(); 789 790 log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT, 791 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), 792 (size_t) policy->padded_average_promoted_in_bytes(), 793 old_gen->free_in_bytes()); 794 if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) { 795 log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 796 } 797 798 if (result) { 799 _consecutive_skipped_scavenges = 0; 800 } else { 801 _consecutive_skipped_scavenges++; 802 if (UsePerfData) { 803 counters->update_scavenge_skipped(promoted_too_large); 804 } 805 } 806 return result; 807 } 808 809 // Adaptive size policy support. When the young generation/old generation 810 // boundary moves, _young_generation_boundary must be reset 811 void PSScavenge::set_young_generation_boundary(HeapWord* v) { 812 _young_generation_boundary = v; 813 if (UseCompressedOops) { 814 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v); 815 } 816 } 817 818 void PSScavenge::initialize() { 819 // Arguments must have been parsed 820 821 if (AlwaysTenure || NeverTenure) { 822 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1, 823 "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold); 824 _tenuring_threshold = MaxTenuringThreshold; 825 } else { 826 // We want to smooth out our startup times for the AdaptiveSizePolicy 827 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 828 MaxTenuringThreshold; 829 } 830 831 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 832 PSYoungGen* young_gen = heap->young_gen(); 833 PSOldGen* old_gen = heap->old_gen(); 834 835 // Set boundary between young_gen and old_gen 836 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 837 "old above young"); 838 set_young_generation_boundary(young_gen->eden_space()->bottom()); 839 840 // Initialize ref handling object for scavenging. 841 _span_based_discoverer.set_span(young_gen->reserved()); 842 _ref_processor = 843 new ReferenceProcessor(&_span_based_discoverer, 844 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 845 ParallelGCThreads, // mt processing degree 846 true, // mt discovery 847 ParallelGCThreads, // mt discovery degree 848 true, // atomic_discovery 849 NULL, // header provides liveness info 850 false); 851 852 // Cache the cardtable 853 _card_table = heap->card_table(); 854 855 _counters = new CollectorCounters("Parallel young collection pauses", 0); 856 }