1 2 /* 3 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "classfile/stringTable.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 30 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 36 #include "gc_implementation/parallelScavenge/psTasks.hpp" 37 #include "gc_implementation/shared/gcHeapSummary.hpp" 38 #include "gc_implementation/shared/gcTimer.hpp" 39 #include "gc_implementation/shared/gcTrace.hpp" 40 #include "gc_implementation/shared/gcTraceTime.hpp" 41 #include "gc_implementation/shared/isGCActiveMark.hpp" 42 #include "gc_implementation/shared/spaceDecorator.hpp" 43 #include "gc_interface/gcCause.hpp" 44 #include "memory/collectorPolicy.hpp" 45 #include "memory/gcLocker.inline.hpp" 46 #include "memory/referencePolicy.hpp" 47 #include "memory/referenceProcessor.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "oops/oop.psgc.inline.hpp" 51 #include "runtime/biasedLocking.hpp" 52 #include "runtime/fprofiler.hpp" 53 #include "runtime/handles.inline.hpp" 54 #include "runtime/threadCritical.hpp" 55 #include "runtime/vmThread.hpp" 56 #include "runtime/vm_operations.hpp" 57 #include "services/memoryService.hpp" 58 #include "utilities/stack.inline.hpp" 59 60 61 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 62 int PSScavenge::_consecutive_skipped_scavenges = 0; 63 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 64 CardTableExtension* PSScavenge::_card_table = NULL; 65 bool PSScavenge::_survivor_overflow = false; 66 uint PSScavenge::_tenuring_threshold = 0; 67 HeapWord* PSScavenge::_young_generation_boundary = NULL; 68 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 69 elapsedTimer PSScavenge::_accumulated_time; 70 STWGCTimer PSScavenge::_gc_timer; 71 ParallelScavengeTracer PSScavenge::_gc_tracer; 72 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack; 73 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack; 74 CollectorCounters* PSScavenge::_counters = NULL; 75 76 // Define before use 77 class PSIsAliveClosure: public BoolObjectClosure { 78 public: 79 bool do_object_b(oop p) { 80 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 81 } 82 }; 83 84 PSIsAliveClosure PSScavenge::_is_alive_closure; 85 86 class PSKeepAliveClosure: public OopClosure { 87 protected: 88 MutableSpace* _to_space; 89 PSPromotionManager* _promotion_manager; 90 91 public: 92 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 93 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 94 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 95 _to_space = heap->young_gen()->to_space(); 96 97 assert(_promotion_manager != NULL, "Sanity"); 98 } 99 100 template <class T> void do_oop_work(T* p) { 101 assert (!oopDesc::is_null(*p), "expected non-null ref"); 102 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 103 "expected an oop while scanning weak refs"); 104 105 // Weak refs may be visited more than once. 106 if (PSScavenge::should_scavenge(p, _to_space)) { 107 PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p); 108 } 109 } 110 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 111 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 112 }; 113 114 class PSEvacuateFollowersClosure: public VoidClosure { 115 private: 116 PSPromotionManager* _promotion_manager; 117 public: 118 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 119 120 virtual void do_void() { 121 assert(_promotion_manager != NULL, "Sanity"); 122 _promotion_manager->drain_stacks(true); 123 guarantee(_promotion_manager->stacks_empty(), 124 "stacks should be empty at this point"); 125 } 126 }; 127 128 class PSPromotionFailedClosure : public ObjectClosure { 129 virtual void do_object(oop obj) { 130 if (obj->is_forwarded()) { 131 obj->init_mark(); 132 } 133 } 134 }; 135 136 class PSRefProcTaskProxy: public GCTask { 137 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 138 ProcessTask & _rp_task; 139 uint _work_id; 140 public: 141 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 142 : _rp_task(rp_task), 143 _work_id(work_id) 144 { } 145 146 private: 147 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 148 virtual void do_it(GCTaskManager* manager, uint which); 149 }; 150 151 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 152 { 153 PSPromotionManager* promotion_manager = 154 PSPromotionManager::gc_thread_promotion_manager(which); 155 assert(promotion_manager != NULL, "sanity check"); 156 PSKeepAliveClosure keep_alive(promotion_manager); 157 PSEvacuateFollowersClosure evac_followers(promotion_manager); 158 PSIsAliveClosure is_alive; 159 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 160 } 161 162 class PSRefEnqueueTaskProxy: public GCTask { 163 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 164 EnqueueTask& _enq_task; 165 uint _work_id; 166 167 public: 168 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 169 : _enq_task(enq_task), 170 _work_id(work_id) 171 { } 172 173 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 174 virtual void do_it(GCTaskManager* manager, uint which) 175 { 176 _enq_task.work(_work_id); 177 } 178 }; 179 180 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 181 virtual void execute(ProcessTask& task); 182 virtual void execute(EnqueueTask& task); 183 }; 184 185 void PSRefProcTaskExecutor::execute(ProcessTask& task) 186 { 187 GCTaskQueue* q = GCTaskQueue::create(); 188 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 189 for(uint i=0; i < manager->active_workers(); i++) { 190 q->enqueue(new PSRefProcTaskProxy(task, i)); 191 } 192 ParallelTaskTerminator terminator(manager->active_workers(), 193 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 194 if (task.marks_oops_alive() && manager->active_workers() > 1) { 195 for (uint j = 0; j < manager->active_workers(); j++) { 196 q->enqueue(new StealTask(&terminator)); 197 } 198 } 199 manager->execute_and_wait(q); 200 } 201 202 203 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 204 { 205 GCTaskQueue* q = GCTaskQueue::create(); 206 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 207 for(uint i=0; i < manager->active_workers(); i++) { 208 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 209 } 210 manager->execute_and_wait(q); 211 } 212 213 // This method contains all heap specific policy for invoking scavenge. 214 // PSScavenge::invoke_no_policy() will do nothing but attempt to 215 // scavenge. It will not clean up after failed promotions, bail out if 216 // we've exceeded policy time limits, or any other special behavior. 217 // All such policy should be placed here. 218 // 219 // Note that this method should only be called from the vm_thread while 220 // at a safepoint! 221 bool PSScavenge::invoke() { 222 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 223 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 224 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 225 226 ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap(); 227 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 228 229 PSAdaptiveSizePolicy* policy = heap->size_policy(); 230 IsGCActiveMark mark; 231 232 const bool scavenge_done = PSScavenge::invoke_no_policy(); 233 const bool need_full_gc = !scavenge_done || 234 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 235 bool full_gc_done = false; 236 237 if (UsePerfData) { 238 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 239 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 240 counters->update_full_follows_scavenge(ffs_val); 241 } 242 243 if (need_full_gc) { 244 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 245 CollectorPolicy* cp = heap->collector_policy(); 246 const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); 247 248 if (UseParallelOldGC) { 249 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 250 } else { 251 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); 252 } 253 } 254 255 return full_gc_done; 256 } 257 258 // This method contains no policy. You should probably 259 // be calling invoke() instead. 260 bool PSScavenge::invoke_no_policy() { 261 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 262 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 263 264 assert(_preserved_mark_stack.is_empty(), "should be empty"); 265 assert(_preserved_oop_stack.is_empty(), "should be empty"); 266 267 _gc_timer.register_gc_start(); 268 269 TimeStamp scavenge_entry; 270 TimeStamp scavenge_midpoint; 271 TimeStamp scavenge_exit; 272 273 scavenge_entry.update(); 274 275 if (GC_locker::check_active_before_gc()) { 276 return false; 277 } 278 279 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 280 GCCause::Cause gc_cause = heap->gc_cause(); 281 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 282 283 // Check for potential problems. 284 if (!should_attempt_scavenge()) { 285 return false; 286 } 287 288 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 289 290 bool promotion_failure_occurred = false; 291 292 PSYoungGen* young_gen = heap->young_gen(); 293 PSOldGen* old_gen = heap->old_gen(); 294 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 295 296 heap->increment_total_collections(); 297 298 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 299 300 if ((gc_cause != GCCause::_java_lang_system_gc) || 301 UseAdaptiveSizePolicyWithSystemGC) { 302 // Gather the feedback data for eden occupancy. 303 young_gen->eden_space()->accumulate_statistics(); 304 } 305 306 if (ZapUnusedHeapArea) { 307 // Save information needed to minimize mangling 308 heap->record_gen_tops_before_GC(); 309 } 310 311 heap->print_heap_before_gc(); 312 heap->trace_heap_before_gc(&_gc_tracer); 313 314 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 315 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 316 317 size_t prev_used = heap->used(); 318 319 // Fill in TLABs 320 heap->accumulate_statistics_all_tlabs(); 321 heap->ensure_parsability(true); // retire TLABs 322 323 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 324 HandleMark hm; // Discard invalid handles created during verification 325 Universe::verify(" VerifyBeforeGC:"); 326 } 327 328 { 329 ResourceMark rm; 330 HandleMark hm; 331 332 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 333 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 334 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); 335 TraceCollectorStats tcs(counters()); 336 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); 337 338 if (TraceGen0Time) accumulated_time()->start(); 339 340 // Let the size policy know we're starting 341 size_policy->minor_collection_begin(); 342 343 // Verify the object start arrays. 344 if (VerifyObjectStartArray && 345 VerifyBeforeGC) { 346 old_gen->verify_object_start_array(); 347 } 348 349 // Verify no unmarked old->young roots 350 if (VerifyRememberedSets) { 351 CardTableExtension::verify_all_young_refs_imprecise(); 352 } 353 354 if (!ScavengeWithObjectsInToSpace) { 355 assert(young_gen->to_space()->is_empty(), 356 "Attempt to scavenge with live objects in to_space"); 357 young_gen->to_space()->clear(SpaceDecorator::Mangle); 358 } else if (ZapUnusedHeapArea) { 359 young_gen->to_space()->mangle_unused_area(); 360 } 361 save_to_space_top_before_gc(); 362 363 COMPILER2_PRESENT(DerivedPointerTable::clear()); 364 365 reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 366 reference_processor()->setup_policy(false); 367 368 // We track how much was promoted to the next generation for 369 // the AdaptiveSizePolicy. 370 size_t old_gen_used_before = old_gen->used_in_bytes(); 371 372 // For PrintGCDetails 373 size_t young_gen_used_before = young_gen->used_in_bytes(); 374 375 // Reset our survivor overflow. 376 set_survivor_overflow(false); 377 378 // We need to save the old top values before 379 // creating the promotion_manager. We pass the top 380 // values to the card_table, to prevent it from 381 // straying into the promotion labs. 382 HeapWord* old_top = old_gen->object_space()->top(); 383 384 // Release all previously held resources 385 gc_task_manager()->release_all_resources(); 386 387 // Set the number of GC threads to be used in this collection 388 gc_task_manager()->set_active_gang(); 389 gc_task_manager()->task_idle_workers(); 390 // Get the active number of workers here and use that value 391 // throughout the methods. 392 uint active_workers = gc_task_manager()->active_workers(); 393 heap->set_par_threads(active_workers); 394 395 PSPromotionManager::pre_scavenge(); 396 397 // We'll use the promotion manager again later. 398 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 399 { 400 GCTraceTime tm("Scavenge", false, false, &_gc_timer); 401 ParallelScavengeHeap::ParStrongRootsScope psrs; 402 403 GCTaskQueue* q = GCTaskQueue::create(); 404 405 if (!old_gen->object_space()->is_empty()) { 406 // There are only old-to-young pointers if there are objects 407 // in the old gen. 408 uint stripe_total = active_workers; 409 for(uint i=0; i < stripe_total; i++) { 410 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); 411 } 412 } 413 414 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 415 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 416 // We scan the thread roots in parallel 417 Threads::create_thread_roots_tasks(q); 418 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 419 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 420 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 421 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 422 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); 423 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 424 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 425 426 ParallelTaskTerminator terminator( 427 active_workers, 428 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); 429 if (active_workers > 1) { 430 for (uint j = 0; j < active_workers; j++) { 431 q->enqueue(new StealTask(&terminator)); 432 } 433 } 434 435 gc_task_manager()->execute_and_wait(q); 436 } 437 438 scavenge_midpoint.update(); 439 440 // Process reference objects discovered during scavenge 441 { 442 GCTraceTime tm("References", false, false, &_gc_timer); 443 444 reference_processor()->setup_policy(false); // not always_clear 445 reference_processor()->set_active_mt_degree(active_workers); 446 PSKeepAliveClosure keep_alive(promotion_manager); 447 PSEvacuateFollowersClosure evac_followers(promotion_manager); 448 ReferenceProcessorStats stats; 449 if (reference_processor()->processing_is_mt()) { 450 PSRefProcTaskExecutor task_executor; 451 stats = reference_processor()->process_discovered_references( 452 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 453 &_gc_timer); 454 } else { 455 stats = reference_processor()->process_discovered_references( 456 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); 457 } 458 459 _gc_tracer.report_gc_reference_stats(stats); 460 461 // Enqueue reference objects discovered during scavenge. 462 if (reference_processor()->processing_is_mt()) { 463 PSRefProcTaskExecutor task_executor; 464 reference_processor()->enqueue_discovered_references(&task_executor); 465 } else { 466 reference_processor()->enqueue_discovered_references(NULL); 467 } 468 } 469 470 { 471 GCTraceTime tm("StringTable", false, false, &_gc_timer); 472 // Unlink any dead interned Strings and process the remaining live ones. 473 PSScavengeRootsClosure root_closure(promotion_manager); 474 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); 475 } 476 477 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 478 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 479 if (promotion_failure_occurred) { 480 clean_up_failed_promotion(); 481 if (PrintGC) { 482 gclog_or_tty->print("--"); 483 } 484 } 485 486 // Let the size policy know we're done. Note that we count promotion 487 // failure cleanup time as part of the collection (otherwise, we're 488 // implicitly saying it's mutator time). 489 size_policy->minor_collection_end(gc_cause); 490 491 if (!promotion_failure_occurred) { 492 // Swap the survivor spaces. 493 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 494 young_gen->from_space()->clear(SpaceDecorator::Mangle); 495 young_gen->swap_spaces(); 496 497 size_t survived = young_gen->from_space()->used_in_bytes(); 498 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 499 size_policy->update_averages(_survivor_overflow, survived, promoted); 500 501 // A successful scavenge should restart the GC time limit count which is 502 // for full GC's. 503 size_policy->reset_gc_overhead_limit_count(); 504 if (UseAdaptiveSizePolicy) { 505 // Calculate the new survivor size and tenuring threshold 506 507 if (PrintAdaptiveSizePolicy) { 508 gclog_or_tty->print("AdaptiveSizeStart: "); 509 gclog_or_tty->stamp(); 510 gclog_or_tty->print_cr(" collection: %d ", 511 heap->total_collections()); 512 513 if (Verbose) { 514 gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT 515 " young_gen_capacity: " SIZE_FORMAT, 516 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 517 } 518 } 519 520 521 if (UsePerfData) { 522 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 523 counters->update_old_eden_size( 524 size_policy->calculated_eden_size_in_bytes()); 525 counters->update_old_promo_size( 526 size_policy->calculated_promo_size_in_bytes()); 527 counters->update_old_capacity(old_gen->capacity_in_bytes()); 528 counters->update_young_capacity(young_gen->capacity_in_bytes()); 529 counters->update_survived(survived); 530 counters->update_promoted(promoted); 531 counters->update_survivor_overflowed(_survivor_overflow); 532 } 533 534 size_t max_young_size = young_gen->max_size(); 535 536 // Deciding a free ratio in the young generation is tricky, so if 537 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 538 // that the old generation size may have been limited because of them) we 539 // should then limit our young generation size using NewRatio to have it 540 // follow the old generation size. 541 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 542 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); 543 } 544 545 size_t survivor_limit = 546 size_policy->max_survivor_size(max_young_size); 547 _tenuring_threshold = 548 size_policy->compute_survivor_space_size_and_threshold( 549 _survivor_overflow, 550 _tenuring_threshold, 551 survivor_limit); 552 553 if (PrintTenuringDistribution) { 554 gclog_or_tty->cr(); 555 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold " 556 UINTX_FORMAT " (max threshold " UINTX_FORMAT ")", 557 size_policy->calculated_survivor_size_in_bytes(), 558 _tenuring_threshold, MaxTenuringThreshold); 559 } 560 561 if (UsePerfData) { 562 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 563 counters->update_tenuring_threshold(_tenuring_threshold); 564 counters->update_survivor_size_counters(); 565 } 566 567 // Do call at minor collections? 568 // Don't check if the size_policy is ready at this 569 // level. Let the size_policy check that internally. 570 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 571 ((gc_cause != GCCause::_java_lang_system_gc) || 572 UseAdaptiveSizePolicyWithSystemGC)) { 573 574 // Calculate optimal free space amounts 575 assert(young_gen->max_size() > 576 young_gen->from_space()->capacity_in_bytes() + 577 young_gen->to_space()->capacity_in_bytes(), 578 "Sizes of space in young gen are out-of-bounds"); 579 580 size_t young_live = young_gen->used_in_bytes(); 581 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 582 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 583 size_t max_old_gen_size = old_gen->max_gen_size(); 584 size_t max_eden_size = max_young_size - 585 young_gen->from_space()->capacity_in_bytes() - 586 young_gen->to_space()->capacity_in_bytes(); 587 588 // Used for diagnostics 589 size_policy->clear_generation_free_space_flags(); 590 591 size_policy->compute_eden_space_size(young_live, 592 eden_live, 593 cur_eden, 594 max_eden_size, 595 false /* not full gc*/); 596 597 size_policy->check_gc_overhead_limit(young_live, 598 eden_live, 599 max_old_gen_size, 600 max_eden_size, 601 false /* not full gc*/, 602 gc_cause, 603 heap->collector_policy()); 604 605 size_policy->decay_supplemental_growth(false /* not full gc*/); 606 } 607 // Resize the young generation at every collection 608 // even if new sizes have not been calculated. This is 609 // to allow resizes that may have been inhibited by the 610 // relative location of the "to" and "from" spaces. 611 612 // Resizing the old gen at minor collects can cause increases 613 // that don't feed back to the generation sizing policy until 614 // a major collection. Don't resize the old gen here. 615 616 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 617 size_policy->calculated_survivor_size_in_bytes()); 618 619 if (PrintAdaptiveSizePolicy) { 620 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 621 heap->total_collections()); 622 } 623 } 624 625 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 626 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 627 // Also update() will case adaptive NUMA chunk resizing. 628 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 629 young_gen->eden_space()->update(); 630 631 heap->gc_policy_counters()->update_counters(); 632 633 heap->resize_all_tlabs(); 634 635 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 636 } 637 638 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 639 640 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 641 642 { 643 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); 644 645 CodeCache::prune_scavenge_root_nmethods(); 646 } 647 648 // Re-verify object start arrays 649 if (VerifyObjectStartArray && 650 VerifyAfterGC) { 651 old_gen->verify_object_start_array(); 652 } 653 654 // Verify all old -> young cards are now precise 655 if (VerifyRememberedSets) { 656 // Precise verification will give false positives. Until this is fixed, 657 // use imprecise verification. 658 // CardTableExtension::verify_all_young_refs_precise(); 659 CardTableExtension::verify_all_young_refs_imprecise(); 660 } 661 662 if (TraceGen0Time) accumulated_time()->stop(); 663 664 if (PrintGC) { 665 if (PrintGCDetails) { 666 // Don't print a GC timestamp here. This is after the GC so 667 // would be confusing. 668 young_gen->print_used_change(young_gen_used_before); 669 } 670 heap->print_heap_change(prev_used); 671 } 672 673 // Track memory usage and detect low memory 674 MemoryService::track_memory_usage(); 675 heap->update_counters(); 676 677 gc_task_manager()->release_idle_workers(); 678 } 679 680 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 681 HandleMark hm; // Discard invalid handles created during verification 682 Universe::verify(" VerifyAfterGC:"); 683 } 684 685 heap->print_heap_after_gc(); 686 heap->trace_heap_after_gc(&_gc_tracer); 687 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 688 689 if (ZapUnusedHeapArea) { 690 young_gen->eden_space()->check_mangled_unused_area_complete(); 691 young_gen->from_space()->check_mangled_unused_area_complete(); 692 young_gen->to_space()->check_mangled_unused_area_complete(); 693 } 694 695 scavenge_exit.update(); 696 697 if (PrintGCTaskTimeStamps) { 698 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, 699 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 700 scavenge_exit.ticks()); 701 gc_task_manager()->print_task_time_stamps(); 702 } 703 704 #ifdef TRACESPINNING 705 ParallelTaskTerminator::print_termination_counts(); 706 #endif 707 708 709 _gc_timer.register_gc_end(); 710 711 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 712 713 return !promotion_failure_occurred; 714 } 715 716 // This method iterates over all objects in the young generation, 717 // unforwarding markOops. It then restores any preserved mark oops, 718 // and clears the _preserved_mark_stack. 719 void PSScavenge::clean_up_failed_promotion() { 720 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 721 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 722 723 PSYoungGen* young_gen = heap->young_gen(); 724 725 { 726 ResourceMark rm; 727 728 // Unforward all pointers in the young gen. 729 PSPromotionFailedClosure unforward_closure; 730 young_gen->object_iterate(&unforward_closure); 731 732 if (PrintGC && Verbose) { 733 gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size()); 734 } 735 736 // Restore any saved marks. 737 while (!_preserved_oop_stack.is_empty()) { 738 oop obj = _preserved_oop_stack.pop(); 739 markOop mark = _preserved_mark_stack.pop(); 740 obj->set_mark(mark); 741 } 742 743 // Clear the preserved mark and oop stack caches. 744 _preserved_mark_stack.clear(true); 745 _preserved_oop_stack.clear(true); 746 } 747 748 // Reset the PromotionFailureALot counters. 749 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 750 } 751 752 // This method is called whenever an attempt to promote an object 753 // fails. Some markOops will need preservation, some will not. Note 754 // that the entire eden is traversed after a failed promotion, with 755 // all forwarded headers replaced by the default markOop. This means 756 // it is not necessary to preserve most markOops. 757 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 758 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 759 // Should use per-worker private stacks here rather than 760 // locking a common pair of stacks. 761 ThreadCritical tc; 762 _preserved_oop_stack.push(obj); 763 _preserved_mark_stack.push(obj_mark); 764 } 765 } 766 767 bool PSScavenge::should_attempt_scavenge() { 768 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 769 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 770 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 771 772 if (UsePerfData) { 773 counters->update_scavenge_skipped(not_skipped); 774 } 775 776 PSYoungGen* young_gen = heap->young_gen(); 777 PSOldGen* old_gen = heap->old_gen(); 778 779 if (!ScavengeWithObjectsInToSpace) { 780 // Do not attempt to promote unless to_space is empty 781 if (!young_gen->to_space()->is_empty()) { 782 _consecutive_skipped_scavenges++; 783 if (UsePerfData) { 784 counters->update_scavenge_skipped(to_space_not_empty); 785 } 786 return false; 787 } 788 } 789 790 // Test to see if the scavenge will likely fail. 791 PSAdaptiveSizePolicy* policy = heap->size_policy(); 792 793 // A similar test is done in the policy's should_full_GC(). If this is 794 // changed, decide if that test should also be changed. 795 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 796 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 797 bool result = promotion_estimate < old_gen->free_in_bytes(); 798 799 if (PrintGCDetails && Verbose) { 800 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 801 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 802 " padded_average_promoted " SIZE_FORMAT 803 " free in old gen " SIZE_FORMAT, 804 (size_t) policy->average_promoted_in_bytes(), 805 (size_t) policy->padded_average_promoted_in_bytes(), 806 old_gen->free_in_bytes()); 807 if (young_gen->used_in_bytes() < 808 (size_t) policy->padded_average_promoted_in_bytes()) { 809 gclog_or_tty->print_cr(" padded_promoted_average is greater" 810 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 811 } 812 } 813 814 if (result) { 815 _consecutive_skipped_scavenges = 0; 816 } else { 817 _consecutive_skipped_scavenges++; 818 if (UsePerfData) { 819 counters->update_scavenge_skipped(promoted_too_large); 820 } 821 } 822 return result; 823 } 824 825 // Used to add tasks 826 GCTaskManager* const PSScavenge::gc_task_manager() { 827 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 828 "shouldn't return NULL"); 829 return ParallelScavengeHeap::gc_task_manager(); 830 } 831 832 void PSScavenge::initialize() { 833 // Arguments must have been parsed 834 835 if (AlwaysTenure || NeverTenure) { 836 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1, 837 err_msg("MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is ", MaxTenuringThreshold)); 838 _tenuring_threshold = MaxTenuringThreshold; 839 } else { 840 // We want to smooth out our startup times for the AdaptiveSizePolicy 841 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 842 MaxTenuringThreshold; 843 } 844 845 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 846 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 847 848 PSYoungGen* young_gen = heap->young_gen(); 849 PSOldGen* old_gen = heap->old_gen(); 850 851 // Set boundary between young_gen and old_gen 852 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 853 "old above young"); 854 set_young_generation_boundary(young_gen->eden_space()->bottom()); 855 856 // Initialize ref handling object for scavenging. 857 MemRegion mr = young_gen->reserved(); 858 859 _ref_processor = 860 new ReferenceProcessor(mr, // span 861 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 862 (int) ParallelGCThreads, // mt processing degree 863 true, // mt discovery 864 (int) ParallelGCThreads, // mt discovery degree 865 true, // atomic_discovery 866 NULL, // header provides liveness info 867 false); // next field updates do not need write barrier 868 869 // Cache the cardtable 870 BarrierSet* bs = Universe::heap()->barrier_set(); 871 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 872 _card_table = (CardTableExtension*)bs; 873 874 _counters = new CollectorCounters("PSScavenge", 0); 875 }