1 /* 2 * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/parallel/gcTaskManager.hpp" 29 #include "gc/parallel/parallelScavengeHeap.hpp" 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 31 #include "gc/parallel/psClosure.inline.hpp" 32 #include "gc/parallel/psMarkSweepProxy.hpp" 33 #include "gc/parallel/psParallelCompact.inline.hpp" 34 #include "gc/parallel/psPromotionManager.inline.hpp" 35 #include "gc/parallel/psScavenge.inline.hpp" 36 #include "gc/parallel/psTasks.hpp" 37 #include "gc/shared/collectorPolicy.hpp" 38 #include "gc/shared/gcCause.hpp" 39 #include "gc/shared/gcHeapSummary.hpp" 40 #include "gc/shared/gcId.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "gc/shared/gcTimer.hpp" 43 #include "gc/shared/gcTrace.hpp" 44 #include "gc/shared/gcTraceTime.inline.hpp" 45 #include "gc/shared/isGCActiveMark.hpp" 46 #include "gc/shared/referencePolicy.hpp" 47 #include "gc/shared/referenceProcessor.hpp" 48 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 49 #include "gc/shared/spaceDecorator.hpp" 50 #include "gc/shared/weakProcessor.hpp" 51 #include "memory/resourceArea.hpp" 52 #include "logging/log.hpp" 53 #include "oops/access.inline.hpp" 54 #include "oops/compressedOops.inline.hpp" 55 #include "oops/oop.inline.hpp" 56 #include "runtime/biasedLocking.hpp" 57 #include "runtime/handles.inline.hpp" 58 #include "runtime/threadCritical.hpp" 59 #include "runtime/vmThread.hpp" 60 #include "runtime/vm_operations.hpp" 61 #include "services/memoryService.hpp" 62 #include "utilities/stack.inline.hpp" 63 64 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 65 int PSScavenge::_consecutive_skipped_scavenges = 0; 66 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer; 67 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 68 PSCardTable* PSScavenge::_card_table = NULL; 69 bool PSScavenge::_survivor_overflow = false; 70 uint PSScavenge::_tenuring_threshold = 0; 71 HeapWord* PSScavenge::_young_generation_boundary = NULL; 72 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 73 elapsedTimer PSScavenge::_accumulated_time; 74 STWGCTimer PSScavenge::_gc_timer; 75 ParallelScavengeTracer PSScavenge::_gc_tracer; 76 CollectorCounters* PSScavenge::_counters = NULL; 77 78 // Define before use 79 class PSIsAliveClosure: public BoolObjectClosure { 80 public: 81 bool do_object_b(oop p) { 82 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 83 } 84 }; 85 86 PSIsAliveClosure PSScavenge::_is_alive_closure; 87 88 class PSKeepAliveClosure: public OopClosure { 89 protected: 90 MutableSpace* _to_space; 91 PSPromotionManager* _promotion_manager; 92 93 public: 94 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 95 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 96 _to_space = heap->young_gen()->to_space(); 97 98 assert(_promotion_manager != NULL, "Sanity"); 99 } 100 101 template <class T> void do_oop_work(T* p) { 102 assert (oopDesc::is_oop(RawAccess<IS_NOT_NULL>::oop_load(p)), 103 "expected an oop while scanning weak refs"); 104 105 // Weak refs may be visited more than once. 106 if (PSScavenge::should_scavenge(p, _to_space)) { 107 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p); 108 } 109 } 110 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 111 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 112 }; 113 114 class PSEvacuateFollowersClosure: public VoidClosure { 115 private: 116 PSPromotionManager* _promotion_manager; 117 public: 118 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 119 120 virtual void do_void() { 121 assert(_promotion_manager != NULL, "Sanity"); 122 _promotion_manager->drain_stacks(true); 123 guarantee(_promotion_manager->stacks_empty(), 124 "stacks should be empty at this point"); 125 } 126 }; 127 128 class PSRefProcTaskProxy: public GCTask { 129 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 130 ProcessTask & _rp_task; 131 uint _work_id; 132 public: 133 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 134 : _rp_task(rp_task), 135 _work_id(work_id) 136 { } 137 138 private: 139 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 140 virtual void do_it(GCTaskManager* manager, uint which); 141 }; 142 143 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 144 { 145 PSPromotionManager* promotion_manager = 146 PSPromotionManager::gc_thread_promotion_manager(which); 147 assert(promotion_manager != NULL, "sanity check"); 148 PSKeepAliveClosure keep_alive(promotion_manager); 149 PSEvacuateFollowersClosure evac_followers(promotion_manager); 150 PSIsAliveClosure is_alive; 151 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 152 } 153 154 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 155 virtual void execute(ProcessTask& task, uint ergo_workers); 156 }; 157 158 void PSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) 159 { 160 GCTaskQueue* q = GCTaskQueue::create(); 161 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 162 uint active_workers = manager->active_workers(); 163 164 assert(active_workers == ergo_workers, 165 "Ergonomically chosen workers (%u) must be equal to active workers (%u)", 166 ergo_workers, active_workers); 167 168 for(uint i=0; i < active_workers; i++) { 169 q->enqueue(new PSRefProcTaskProxy(task, i)); 170 } 171 ParallelTaskTerminator terminator(active_workers, 172 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 173 if (task.marks_oops_alive() && active_workers > 1) { 174 for (uint j = 0; j < active_workers; j++) { 175 q->enqueue(new StealTask(&terminator)); 176 } 177 } 178 manager->execute_and_wait(q); 179 } 180 181 // This method contains all heap specific policy for invoking scavenge. 182 // PSScavenge::invoke_no_policy() will do nothing but attempt to 183 // scavenge. It will not clean up after failed promotions, bail out if 184 // we've exceeded policy time limits, or any other special behavior. 185 // All such policy should be placed here. 186 // 187 // Note that this method should only be called from the vm_thread while 188 // at a safepoint! 189 bool PSScavenge::invoke() { 190 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 191 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 192 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 193 194 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap(); 195 PSAdaptiveSizePolicy* policy = heap->size_policy(); 196 IsGCActiveMark mark; 197 198 const bool scavenge_done = PSScavenge::invoke_no_policy(); 199 const bool need_full_gc = !scavenge_done || 200 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 201 bool full_gc_done = false; 202 203 if (UsePerfData) { 204 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 205 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 206 counters->update_full_follows_scavenge(ffs_val); 207 } 208 209 if (need_full_gc) { 210 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 211 SoftRefPolicy* srp = heap->soft_ref_policy(); 212 const bool clear_all_softrefs = srp->should_clear_all_soft_refs(); 213 214 if (UseParallelOldGC) { 215 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 216 } else { 217 full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs); 218 } 219 } 220 221 return full_gc_done; 222 } 223 224 class PSAddThreadRootsTaskClosure : public ThreadClosure { 225 private: 226 GCTaskQueue* _q; 227 228 public: 229 PSAddThreadRootsTaskClosure(GCTaskQueue* q) : _q(q) { } 230 void do_thread(Thread* t) { 231 _q->enqueue(new ThreadRootsTask(t)); 232 } 233 }; 234 235 // This method contains no policy. You should probably 236 // be calling invoke() instead. 237 bool PSScavenge::invoke_no_policy() { 238 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 239 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 240 241 _gc_timer.register_gc_start(); 242 243 TimeStamp scavenge_entry; 244 TimeStamp scavenge_midpoint; 245 TimeStamp scavenge_exit; 246 247 scavenge_entry.update(); 248 249 if (GCLocker::check_active_before_gc()) { 250 return false; 251 } 252 253 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 254 GCCause::Cause gc_cause = heap->gc_cause(); 255 256 // Check for potential problems. 257 if (!should_attempt_scavenge()) { 258 return false; 259 } 260 261 GCIdMark gc_id_mark; 262 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 263 264 bool promotion_failure_occurred = false; 265 266 PSYoungGen* young_gen = heap->young_gen(); 267 PSOldGen* old_gen = heap->old_gen(); 268 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 269 270 heap->increment_total_collections(); 271 272 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 273 // Gather the feedback data for eden occupancy. 274 young_gen->eden_space()->accumulate_statistics(); 275 } 276 277 heap->print_heap_before_gc(); 278 heap->trace_heap_before_gc(&_gc_tracer); 279 280 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 281 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 282 283 // Fill in TLABs 284 heap->ensure_parsability(true); // retire TLABs 285 286 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 287 HandleMark hm; // Discard invalid handles created during verification 288 Universe::verify("Before GC"); 289 } 290 291 { 292 ResourceMark rm; 293 HandleMark hm; 294 295 GCTraceCPUTime tcpu; 296 GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true); 297 TraceCollectorStats tcs(counters()); 298 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause); 299 300 if (log_is_enabled(Debug, gc, heap, exit)) { 301 accumulated_time()->start(); 302 } 303 304 // Let the size policy know we're starting 305 size_policy->minor_collection_begin(); 306 307 // Verify the object start arrays. 308 if (VerifyObjectStartArray && 309 VerifyBeforeGC) { 310 old_gen->verify_object_start_array(); 311 } 312 313 // Verify no unmarked old->young roots 314 if (VerifyRememberedSets) { 315 heap->card_table()->verify_all_young_refs_imprecise(); 316 } 317 318 assert(young_gen->to_space()->is_empty(), 319 "Attempt to scavenge with live objects in to_space"); 320 young_gen->to_space()->clear(SpaceDecorator::Mangle); 321 322 save_to_space_top_before_gc(); 323 324 #if COMPILER2_OR_JVMCI 325 DerivedPointerTable::clear(); 326 #endif 327 328 reference_processor()->enable_discovery(); 329 reference_processor()->setup_policy(false); 330 331 PreGCValues pre_gc_values(heap); 332 333 // Reset our survivor overflow. 334 set_survivor_overflow(false); 335 336 // We need to save the old top values before 337 // creating the promotion_manager. We pass the top 338 // values to the card_table, to prevent it from 339 // straying into the promotion labs. 340 HeapWord* old_top = old_gen->object_space()->top(); 341 342 // Release all previously held resources 343 gc_task_manager()->release_all_resources(); 344 345 // Set the number of GC threads to be used in this collection 346 gc_task_manager()->set_active_gang(); 347 gc_task_manager()->task_idle_workers(); 348 // Get the active number of workers here and use that value 349 // throughout the methods. 350 uint active_workers = gc_task_manager()->active_workers(); 351 352 PSPromotionManager::pre_scavenge(); 353 354 // We'll use the promotion manager again later. 355 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 356 { 357 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer); 358 ParallelScavengeHeap::ParStrongRootsScope psrs; 359 360 GCTaskQueue* q = GCTaskQueue::create(); 361 362 if (!old_gen->object_space()->is_empty()) { 363 // There are only old-to-young pointers if there are objects 364 // in the old gen. 365 uint stripe_total = active_workers; 366 for(uint i=0; i < stripe_total; i++) { 367 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); 368 } 369 } 370 371 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 372 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 373 // We scan the thread roots in parallel 374 PSAddThreadRootsTaskClosure cl(q); 375 Threads::java_threads_and_vm_thread_do(&cl); 376 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 377 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 378 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 379 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); 380 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 381 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 382 383 ParallelTaskTerminator terminator( 384 active_workers, 385 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); 386 // If active_workers can exceed 1, add a StrealTask. 387 // PSPromotionManager::drain_stacks_depth() does not fully drain its 388 // stacks and expects a StealTask to complete the draining if 389 // ParallelGCThreads is > 1. 390 if (gc_task_manager()->workers() > 1) { 391 for (uint j = 0; j < active_workers; j++) { 392 q->enqueue(new StealTask(&terminator)); 393 } 394 } 395 396 gc_task_manager()->execute_and_wait(q); 397 } 398 399 scavenge_midpoint.update(); 400 401 // Process reference objects discovered during scavenge 402 { 403 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 404 405 reference_processor()->setup_policy(false); // not always_clear 406 reference_processor()->set_active_mt_degree(active_workers); 407 PSKeepAliveClosure keep_alive(promotion_manager); 408 PSEvacuateFollowersClosure evac_followers(promotion_manager); 409 ReferenceProcessorStats stats; 410 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues()); 411 if (reference_processor()->processing_is_mt()) { 412 PSRefProcTaskExecutor task_executor; 413 stats = reference_processor()->process_discovered_references( 414 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 415 &pt); 416 } else { 417 stats = reference_processor()->process_discovered_references( 418 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &pt); 419 } 420 421 _gc_tracer.report_gc_reference_stats(stats); 422 pt.print_all_references(); 423 } 424 425 assert(promotion_manager->stacks_empty(),"stacks should be empty at this point"); 426 427 PSScavengeRootsClosure root_closure(promotion_manager); 428 429 { 430 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 431 WeakProcessor::weak_oops_do(&_is_alive_closure, &root_closure); 432 } 433 434 { 435 GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer); 436 // Unlink any dead interned Strings and process the remaining live ones. 437 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); 438 } 439 440 // Verify that usage of root_closure didn't copy any objects. 441 assert(promotion_manager->stacks_empty(),"stacks should be empty at this point"); 442 443 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 444 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 445 if (promotion_failure_occurred) { 446 clean_up_failed_promotion(); 447 log_info(gc, promotion)("Promotion failed"); 448 } 449 450 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 451 452 // Let the size policy know we're done. Note that we count promotion 453 // failure cleanup time as part of the collection (otherwise, we're 454 // implicitly saying it's mutator time). 455 size_policy->minor_collection_end(gc_cause); 456 457 if (!promotion_failure_occurred) { 458 // Swap the survivor spaces. 459 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 460 young_gen->from_space()->clear(SpaceDecorator::Mangle); 461 young_gen->swap_spaces(); 462 463 size_t survived = young_gen->from_space()->used_in_bytes(); 464 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used(); 465 size_policy->update_averages(_survivor_overflow, survived, promoted); 466 467 // A successful scavenge should restart the GC time limit count which is 468 // for full GC's. 469 size_policy->reset_gc_overhead_limit_count(); 470 if (UseAdaptiveSizePolicy) { 471 // Calculate the new survivor size and tenuring threshold 472 473 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 474 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT, 475 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 476 477 if (UsePerfData) { 478 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 479 counters->update_old_eden_size( 480 size_policy->calculated_eden_size_in_bytes()); 481 counters->update_old_promo_size( 482 size_policy->calculated_promo_size_in_bytes()); 483 counters->update_old_capacity(old_gen->capacity_in_bytes()); 484 counters->update_young_capacity(young_gen->capacity_in_bytes()); 485 counters->update_survived(survived); 486 counters->update_promoted(promoted); 487 counters->update_survivor_overflowed(_survivor_overflow); 488 } 489 490 size_t max_young_size = young_gen->max_size(); 491 492 // Deciding a free ratio in the young generation is tricky, so if 493 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 494 // that the old generation size may have been limited because of them) we 495 // should then limit our young generation size using NewRatio to have it 496 // follow the old generation size. 497 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 498 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); 499 } 500 501 size_t survivor_limit = 502 size_policy->max_survivor_size(max_young_size); 503 _tenuring_threshold = 504 size_policy->compute_survivor_space_size_and_threshold( 505 _survivor_overflow, 506 _tenuring_threshold, 507 survivor_limit); 508 509 log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max threshold " UINTX_FORMAT ")", 510 size_policy->calculated_survivor_size_in_bytes(), 511 _tenuring_threshold, MaxTenuringThreshold); 512 513 if (UsePerfData) { 514 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 515 counters->update_tenuring_threshold(_tenuring_threshold); 516 counters->update_survivor_size_counters(); 517 } 518 519 // Do call at minor collections? 520 // Don't check if the size_policy is ready at this 521 // level. Let the size_policy check that internally. 522 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 523 (AdaptiveSizePolicy::should_update_eden_stats(gc_cause))) { 524 // Calculate optimal free space amounts 525 assert(young_gen->max_size() > 526 young_gen->from_space()->capacity_in_bytes() + 527 young_gen->to_space()->capacity_in_bytes(), 528 "Sizes of space in young gen are out-of-bounds"); 529 530 size_t young_live = young_gen->used_in_bytes(); 531 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 532 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 533 size_t max_old_gen_size = old_gen->max_gen_size(); 534 size_t max_eden_size = max_young_size - 535 young_gen->from_space()->capacity_in_bytes() - 536 young_gen->to_space()->capacity_in_bytes(); 537 538 // Used for diagnostics 539 size_policy->clear_generation_free_space_flags(); 540 541 size_policy->compute_eden_space_size(young_live, 542 eden_live, 543 cur_eden, 544 max_eden_size, 545 false /* not full gc*/); 546 547 size_policy->check_gc_overhead_limit(young_live, 548 eden_live, 549 max_old_gen_size, 550 max_eden_size, 551 false /* not full gc*/, 552 gc_cause, 553 heap->soft_ref_policy()); 554 555 size_policy->decay_supplemental_growth(false /* not full gc*/); 556 } 557 // Resize the young generation at every collection 558 // even if new sizes have not been calculated. This is 559 // to allow resizes that may have been inhibited by the 560 // relative location of the "to" and "from" spaces. 561 562 // Resizing the old gen at young collections can cause increases 563 // that don't feed back to the generation sizing policy until 564 // a full collection. Don't resize the old gen here. 565 566 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 567 size_policy->calculated_survivor_size_in_bytes()); 568 569 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 570 } 571 572 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 573 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 574 // Also update() will case adaptive NUMA chunk resizing. 575 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 576 young_gen->eden_space()->update(); 577 578 heap->gc_policy_counters()->update_counters(); 579 580 heap->resize_all_tlabs(); 581 582 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 583 } 584 585 #if COMPILER2_OR_JVMCI 586 DerivedPointerTable::update_pointers(); 587 #endif 588 589 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 590 591 // Re-verify object start arrays 592 if (VerifyObjectStartArray && 593 VerifyAfterGC) { 594 old_gen->verify_object_start_array(); 595 } 596 597 // Verify all old -> young cards are now precise 598 if (VerifyRememberedSets) { 599 // Precise verification will give false positives. Until this is fixed, 600 // use imprecise verification. 601 // heap->card_table()->verify_all_young_refs_precise(); 602 heap->card_table()->verify_all_young_refs_imprecise(); 603 } 604 605 if (log_is_enabled(Debug, gc, heap, exit)) { 606 accumulated_time()->stop(); 607 } 608 609 young_gen->print_used_change(pre_gc_values.young_gen_used()); 610 old_gen->print_used_change(pre_gc_values.old_gen_used()); 611 MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used()); 612 613 // Track memory usage and detect low memory 614 MemoryService::track_memory_usage(); 615 heap->update_counters(); 616 617 gc_task_manager()->release_idle_workers(); 618 } 619 620 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 621 HandleMark hm; // Discard invalid handles created during verification 622 Universe::verify("After GC"); 623 } 624 625 heap->print_heap_after_gc(); 626 heap->trace_heap_after_gc(&_gc_tracer); 627 628 scavenge_exit.update(); 629 630 log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT, 631 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 632 scavenge_exit.ticks()); 633 gc_task_manager()->print_task_time_stamps(); 634 635 #ifdef TRACESPINNING 636 ParallelTaskTerminator::print_termination_counts(); 637 #endif 638 639 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 640 641 _gc_timer.register_gc_end(); 642 643 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 644 645 return !promotion_failure_occurred; 646 } 647 648 // This method iterates over all objects in the young generation, 649 // removing all forwarding references. It then restores any preserved marks. 650 void PSScavenge::clean_up_failed_promotion() { 651 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 652 PSYoungGen* young_gen = heap->young_gen(); 653 654 RemoveForwardedPointerClosure remove_fwd_ptr_closure; 655 young_gen->object_iterate(&remove_fwd_ptr_closure); 656 657 PSPromotionManager::restore_preserved_marks(); 658 659 // Reset the PromotionFailureALot counters. 660 NOT_PRODUCT(heap->reset_promotion_should_fail();) 661 } 662 663 bool PSScavenge::should_attempt_scavenge() { 664 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 665 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 666 667 if (UsePerfData) { 668 counters->update_scavenge_skipped(not_skipped); 669 } 670 671 PSYoungGen* young_gen = heap->young_gen(); 672 PSOldGen* old_gen = heap->old_gen(); 673 674 // Do not attempt to promote unless to_space is empty 675 if (!young_gen->to_space()->is_empty()) { 676 _consecutive_skipped_scavenges++; 677 if (UsePerfData) { 678 counters->update_scavenge_skipped(to_space_not_empty); 679 } 680 return false; 681 } 682 683 // Test to see if the scavenge will likely fail. 684 PSAdaptiveSizePolicy* policy = heap->size_policy(); 685 686 // A similar test is done in the policy's should_full_GC(). If this is 687 // changed, decide if that test should also be changed. 688 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 689 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 690 bool result = promotion_estimate < old_gen->free_in_bytes(); 691 692 log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT, 693 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), 694 (size_t) policy->padded_average_promoted_in_bytes(), 695 old_gen->free_in_bytes()); 696 if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) { 697 log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 698 } 699 700 if (result) { 701 _consecutive_skipped_scavenges = 0; 702 } else { 703 _consecutive_skipped_scavenges++; 704 if (UsePerfData) { 705 counters->update_scavenge_skipped(promoted_too_large); 706 } 707 } 708 return result; 709 } 710 711 // Used to add tasks 712 GCTaskManager* const PSScavenge::gc_task_manager() { 713 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 714 "shouldn't return NULL"); 715 return ParallelScavengeHeap::gc_task_manager(); 716 } 717 718 // Adaptive size policy support. When the young generation/old generation 719 // boundary moves, _young_generation_boundary must be reset 720 void PSScavenge::set_young_generation_boundary(HeapWord* v) { 721 _young_generation_boundary = v; 722 if (UseCompressedOops) { 723 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v); 724 } 725 } 726 727 void PSScavenge::initialize() { 728 // Arguments must have been parsed 729 730 if (AlwaysTenure || NeverTenure) { 731 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1, 732 "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold); 733 _tenuring_threshold = MaxTenuringThreshold; 734 } else { 735 // We want to smooth out our startup times for the AdaptiveSizePolicy 736 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 737 MaxTenuringThreshold; 738 } 739 740 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 741 PSYoungGen* young_gen = heap->young_gen(); 742 PSOldGen* old_gen = heap->old_gen(); 743 744 // Set boundary between young_gen and old_gen 745 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 746 "old above young"); 747 set_young_generation_boundary(young_gen->eden_space()->bottom()); 748 749 // Initialize ref handling object for scavenging. 750 _span_based_discoverer.set_span(young_gen->reserved()); 751 _ref_processor = 752 new ReferenceProcessor(&_span_based_discoverer, 753 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 754 ParallelGCThreads, // mt processing degree 755 true, // mt discovery 756 ParallelGCThreads, // mt discovery degree 757 true, // atomic_discovery 758 NULL, // header provides liveness info 759 false); 760 761 // Cache the cardtable 762 _card_table = heap->card_table(); 763 764 _counters = new CollectorCounters("PSScavenge", 0); 765 }