1 /* 2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 34 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 35 #include "gc_implementation/parallelScavenge/psTasks.hpp" 36 #include "gc_implementation/shared/gcHeapSummary.hpp" 37 #include "gc_implementation/shared/gcTimer.hpp" 38 #include "gc_implementation/shared/gcTrace.hpp" 39 #include "gc_implementation/shared/gcTraceTime.hpp" 40 #include "gc_implementation/shared/isGCActiveMark.hpp" 41 #include "gc_implementation/shared/spaceDecorator.hpp" 42 #include "gc_interface/gcCause.hpp" 43 #include "memory/collectorPolicy.hpp" 44 #include "memory/gcLocker.inline.hpp" 45 #include "memory/referencePolicy.hpp" 46 #include "memory/referenceProcessor.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/biasedLocking.hpp" 50 #include "runtime/fprofiler.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/threadCritical.hpp" 53 #include "runtime/vmThread.hpp" 54 #include "runtime/vm_operations.hpp" 55 #include "services/memoryService.hpp" 56 #include "utilities/stack.inline.hpp" 57 58 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 59 int PSScavenge::_consecutive_skipped_scavenges = 0; 60 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 61 CardTableExtension* PSScavenge::_card_table = NULL; 62 bool PSScavenge::_survivor_overflow = false; 63 uint PSScavenge::_tenuring_threshold = 0; 64 HeapWord* PSScavenge::_young_generation_boundary = NULL; 65 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 66 elapsedTimer PSScavenge::_accumulated_time; 67 STWGCTimer PSScavenge::_gc_timer; 68 ParallelScavengeTracer PSScavenge::_gc_tracer; 69 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack; 70 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack; 71 CollectorCounters* PSScavenge::_counters = NULL; 72 73 // Define before use 74 class PSIsAliveClosure: public BoolObjectClosure { 75 public: 76 bool do_object_b(oop p) { 77 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 78 } 79 }; 80 81 PSIsAliveClosure PSScavenge::_is_alive_closure; 82 83 class PSKeepAliveClosure: public OopClosure { 84 protected: 85 MutableSpace* _to_space; 86 PSPromotionManager* _promotion_manager; 87 88 public: 89 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 90 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 91 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 92 _to_space = heap->young_gen()->to_space(); 93 94 assert(_promotion_manager != NULL, "Sanity"); 95 } 96 97 template <class T> void do_oop_work(T* p) { 98 assert (!oopDesc::is_null(*p), "expected non-null ref"); 99 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 100 "expected an oop while scanning weak refs"); 101 102 // Weak refs may be visited more than once. 103 if (PSScavenge::should_scavenge(p, _to_space)) { 104 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p); 105 } 106 } 107 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 108 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 109 }; 110 111 class PSEvacuateFollowersClosure: public VoidClosure { 112 private: 113 PSPromotionManager* _promotion_manager; 114 public: 115 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 116 117 virtual void do_void() { 118 assert(_promotion_manager != NULL, "Sanity"); 119 _promotion_manager->drain_stacks(true); 120 guarantee(_promotion_manager->stacks_empty(), 121 "stacks should be empty at this point"); 122 } 123 }; 124 125 class PSPromotionFailedClosure : public ObjectClosure { 126 virtual void do_object(oop obj) { 127 if (obj->is_forwarded()) { 128 obj->init_mark(); 129 } 130 } 131 }; 132 133 class PSRefProcTaskProxy: public GCTask { 134 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 135 ProcessTask & _rp_task; 136 uint _work_id; 137 public: 138 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 139 : _rp_task(rp_task), 140 _work_id(work_id) 141 { } 142 143 private: 144 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 145 virtual void do_it(GCTaskManager* manager, uint which); 146 }; 147 148 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 149 { 150 PSPromotionManager* promotion_manager = 151 PSPromotionManager::gc_thread_promotion_manager(which); 152 assert(promotion_manager != NULL, "sanity check"); 153 PSKeepAliveClosure keep_alive(promotion_manager); 154 PSEvacuateFollowersClosure evac_followers(promotion_manager); 155 PSIsAliveClosure is_alive; 156 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 157 } 158 159 class PSRefEnqueueTaskProxy: public GCTask { 160 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 161 EnqueueTask& _enq_task; 162 uint _work_id; 163 164 public: 165 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 166 : _enq_task(enq_task), 167 _work_id(work_id) 168 { } 169 170 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 171 virtual void do_it(GCTaskManager* manager, uint which) 172 { 173 _enq_task.work(_work_id); 174 } 175 }; 176 177 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 178 virtual void execute(ProcessTask& task); 179 virtual void execute(EnqueueTask& task); 180 }; 181 182 void PSRefProcTaskExecutor::execute(ProcessTask& task) 183 { 184 GCTaskQueue* q = GCTaskQueue::create(); 185 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 186 for(uint i=0; i < manager->active_workers(); i++) { 187 q->enqueue(new PSRefProcTaskProxy(task, i)); 188 } 189 ParallelTaskTerminator terminator(manager->active_workers(), 190 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 191 if (task.marks_oops_alive() && manager->active_workers() > 1) { 192 for (uint j = 0; j < manager->active_workers(); j++) { 193 q->enqueue(new StealTask(&terminator)); 194 } 195 } 196 manager->execute_and_wait(q); 197 } 198 199 200 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 201 { 202 GCTaskQueue* q = GCTaskQueue::create(); 203 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 204 for(uint i=0; i < manager->active_workers(); i++) { 205 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 206 } 207 manager->execute_and_wait(q); 208 } 209 210 // This method contains all heap specific policy for invoking scavenge. 211 // PSScavenge::invoke_no_policy() will do nothing but attempt to 212 // scavenge. It will not clean up after failed promotions, bail out if 213 // we've exceeded policy time limits, or any other special behavior. 214 // All such policy should be placed here. 215 // 216 // Note that this method should only be called from the vm_thread while 217 // at a safepoint! 218 bool PSScavenge::invoke() { 219 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 220 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 221 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 222 223 ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap(); 224 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 225 226 PSAdaptiveSizePolicy* policy = heap->size_policy(); 227 IsGCActiveMark mark; 228 229 const bool scavenge_done = PSScavenge::invoke_no_policy(); 230 const bool need_full_gc = !scavenge_done || 231 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 232 bool full_gc_done = false; 233 234 if (UsePerfData) { 235 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 236 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 237 counters->update_full_follows_scavenge(ffs_val); 238 } 239 240 if (need_full_gc) { 241 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 242 CollectorPolicy* cp = heap->collector_policy(); 243 const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); 244 245 if (UseParallelOldGC) { 246 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 247 } else { 248 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); 249 } 250 } 251 252 return full_gc_done; 253 } 254 255 // This method contains no policy. You should probably 256 // be calling invoke() instead. 257 bool PSScavenge::invoke_no_policy() { 258 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 259 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 260 261 assert(_preserved_mark_stack.is_empty(), "should be empty"); 262 assert(_preserved_oop_stack.is_empty(), "should be empty"); 263 264 _gc_timer.register_gc_start(); 265 266 TimeStamp scavenge_entry; 267 TimeStamp scavenge_midpoint; 268 TimeStamp scavenge_exit; 269 270 scavenge_entry.update(); 271 272 if (GC_locker::check_active_before_gc()) { 273 return false; 274 } 275 276 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 277 GCCause::Cause gc_cause = heap->gc_cause(); 278 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 279 280 // Check for potential problems. 281 if (!should_attempt_scavenge()) { 282 return false; 283 } 284 285 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 286 287 bool promotion_failure_occurred = false; 288 289 PSYoungGen* young_gen = heap->young_gen(); 290 PSOldGen* old_gen = heap->old_gen(); 291 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 292 293 heap->increment_total_collections(); 294 295 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 296 297 if ((gc_cause != GCCause::_java_lang_system_gc) || 298 UseAdaptiveSizePolicyWithSystemGC) { 299 // Gather the feedback data for eden occupancy. 300 young_gen->eden_space()->accumulate_statistics(); 301 } 302 303 if (ZapUnusedHeapArea) { 304 // Save information needed to minimize mangling 305 heap->record_gen_tops_before_GC(); 306 } 307 308 heap->print_heap_before_gc(); 309 heap->trace_heap_before_gc(&_gc_tracer); 310 311 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 312 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 313 314 size_t prev_used = heap->used(); 315 316 // Fill in TLABs 317 heap->accumulate_statistics_all_tlabs(); 318 heap->ensure_parsability(true); // retire TLABs 319 320 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 321 HandleMark hm; // Discard invalid handles created during verification 322 Universe::verify(" VerifyBeforeGC:"); 323 } 324 325 { 326 ResourceMark rm; 327 HandleMark hm; 328 329 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 330 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); 331 TraceCollectorStats tcs(counters()); 332 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); 333 334 if (TraceYoungGenTime) accumulated_time()->start(); 335 336 // Let the size policy know we're starting 337 size_policy->minor_collection_begin(); 338 339 // Verify the object start arrays. 340 if (VerifyObjectStartArray && 341 VerifyBeforeGC) { 342 old_gen->verify_object_start_array(); 343 } 344 345 // Verify no unmarked old->young roots 346 if (VerifyRememberedSets) { 347 CardTableExtension::verify_all_young_refs_imprecise(); 348 } 349 350 if (!ScavengeWithObjectsInToSpace) { 351 assert(young_gen->to_space()->is_empty(), 352 "Attempt to scavenge with live objects in to_space"); 353 young_gen->to_space()->clear(SpaceDecorator::Mangle); 354 } else if (ZapUnusedHeapArea) { 355 young_gen->to_space()->mangle_unused_area(); 356 } 357 save_to_space_top_before_gc(); 358 359 COMPILER2_PRESENT(DerivedPointerTable::clear()); 360 361 reference_processor()->enable_discovery(); 362 reference_processor()->setup_policy(false); 363 364 // We track how much was promoted to the next generation for 365 // the AdaptiveSizePolicy. 366 size_t old_gen_used_before = old_gen->used_in_bytes(); 367 368 // For PrintGCDetails 369 size_t young_gen_used_before = young_gen->used_in_bytes(); 370 371 // Reset our survivor overflow. 372 set_survivor_overflow(false); 373 374 // We need to save the old top values before 375 // creating the promotion_manager. We pass the top 376 // values to the card_table, to prevent it from 377 // straying into the promotion labs. 378 HeapWord* old_top = old_gen->object_space()->top(); 379 380 // Release all previously held resources 381 gc_task_manager()->release_all_resources(); 382 383 // Set the number of GC threads to be used in this collection 384 gc_task_manager()->set_active_gang(); 385 gc_task_manager()->task_idle_workers(); 386 // Get the active number of workers here and use that value 387 // throughout the methods. 388 uint active_workers = gc_task_manager()->active_workers(); 389 heap->set_par_threads(active_workers); 390 391 PSPromotionManager::pre_scavenge(); 392 393 // We'll use the promotion manager again later. 394 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 395 { 396 GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id()); 397 ParallelScavengeHeap::ParStrongRootsScope psrs; 398 399 GCTaskQueue* q = GCTaskQueue::create(); 400 401 if (!old_gen->object_space()->is_empty()) { 402 // There are only old-to-young pointers if there are objects 403 // in the old gen. 404 uint stripe_total = active_workers; 405 for(uint i=0; i < stripe_total; i++) { 406 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); 407 } 408 } 409 410 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 411 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 412 // We scan the thread roots in parallel 413 Threads::create_thread_roots_tasks(q); 414 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 415 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 416 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 417 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 418 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); 419 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 420 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 421 422 ParallelTaskTerminator terminator( 423 active_workers, 424 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); 425 if (active_workers > 1) { 426 for (uint j = 0; j < active_workers; j++) { 427 q->enqueue(new StealTask(&terminator)); 428 } 429 } 430 431 gc_task_manager()->execute_and_wait(q); 432 } 433 434 scavenge_midpoint.update(); 435 436 // Process reference objects discovered during scavenge 437 { 438 GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id()); 439 440 reference_processor()->setup_policy(false); // not always_clear 441 reference_processor()->set_active_mt_degree(active_workers); 442 PSKeepAliveClosure keep_alive(promotion_manager); 443 PSEvacuateFollowersClosure evac_followers(promotion_manager); 444 ReferenceProcessorStats stats; 445 if (reference_processor()->processing_is_mt()) { 446 PSRefProcTaskExecutor task_executor; 447 stats = reference_processor()->process_discovered_references( 448 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 449 &_gc_timer, _gc_tracer.gc_id()); 450 } else { 451 stats = reference_processor()->process_discovered_references( 452 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id()); 453 } 454 455 _gc_tracer.report_gc_reference_stats(stats); 456 457 // Enqueue reference objects discovered during scavenge. 458 if (reference_processor()->processing_is_mt()) { 459 PSRefProcTaskExecutor task_executor; 460 reference_processor()->enqueue_discovered_references(&task_executor); 461 } else { 462 reference_processor()->enqueue_discovered_references(NULL); 463 } 464 } 465 466 { 467 GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id()); 468 // Unlink any dead interned Strings and process the remaining live ones. 469 PSScavengeRootsClosure root_closure(promotion_manager); 470 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); 471 } 472 473 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 474 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 475 if (promotion_failure_occurred) { 476 clean_up_failed_promotion(); 477 if (PrintGC) { 478 gclog_or_tty->print("--"); 479 } 480 } 481 482 // Let the size policy know we're done. Note that we count promotion 483 // failure cleanup time as part of the collection (otherwise, we're 484 // implicitly saying it's mutator time). 485 size_policy->minor_collection_end(gc_cause); 486 487 if (!promotion_failure_occurred) { 488 // Swap the survivor spaces. 489 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 490 young_gen->from_space()->clear(SpaceDecorator::Mangle); 491 young_gen->swap_spaces(); 492 493 size_t survived = young_gen->from_space()->used_in_bytes(); 494 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 495 size_policy->update_averages(_survivor_overflow, survived, promoted); 496 497 // A successful scavenge should restart the GC time limit count which is 498 // for full GC's. 499 size_policy->reset_gc_overhead_limit_count(); 500 if (UseAdaptiveSizePolicy) { 501 // Calculate the new survivor size and tenuring threshold 502 503 if (PrintAdaptiveSizePolicy) { 504 gclog_or_tty->print("AdaptiveSizeStart: "); 505 gclog_or_tty->stamp(); 506 gclog_or_tty->print_cr(" collection: %d ", 507 heap->total_collections()); 508 509 if (Verbose) { 510 gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT 511 " young_gen_capacity: " SIZE_FORMAT, 512 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 513 } 514 } 515 516 517 if (UsePerfData) { 518 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 519 counters->update_old_eden_size( 520 size_policy->calculated_eden_size_in_bytes()); 521 counters->update_old_promo_size( 522 size_policy->calculated_promo_size_in_bytes()); 523 counters->update_old_capacity(old_gen->capacity_in_bytes()); 524 counters->update_young_capacity(young_gen->capacity_in_bytes()); 525 counters->update_survived(survived); 526 counters->update_promoted(promoted); 527 counters->update_survivor_overflowed(_survivor_overflow); 528 } 529 530 size_t max_young_size = young_gen->max_size(); 531 532 // Deciding a free ratio in the young generation is tricky, so if 533 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 534 // that the old generation size may have been limited because of them) we 535 // should then limit our young generation size using NewRatio to have it 536 // follow the old generation size. 537 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 538 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); 539 } 540 541 size_t survivor_limit = 542 size_policy->max_survivor_size(max_young_size); 543 _tenuring_threshold = 544 size_policy->compute_survivor_space_size_and_threshold( 545 _survivor_overflow, 546 _tenuring_threshold, 547 survivor_limit); 548 549 if (PrintTenuringDistribution) { 550 gclog_or_tty->cr(); 551 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u" 552 " (max threshold " UINTX_FORMAT ")", 553 size_policy->calculated_survivor_size_in_bytes(), 554 _tenuring_threshold, MaxTenuringThreshold); 555 } 556 557 if (UsePerfData) { 558 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 559 counters->update_tenuring_threshold(_tenuring_threshold); 560 counters->update_survivor_size_counters(); 561 } 562 563 // Do call at minor collections? 564 // Don't check if the size_policy is ready at this 565 // level. Let the size_policy check that internally. 566 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 567 ((gc_cause != GCCause::_java_lang_system_gc) || 568 UseAdaptiveSizePolicyWithSystemGC)) { 569 570 // Calculate optimal free space amounts 571 assert(young_gen->max_size() > 572 young_gen->from_space()->capacity_in_bytes() + 573 young_gen->to_space()->capacity_in_bytes(), 574 "Sizes of space in young gen are out-of-bounds"); 575 576 size_t young_live = young_gen->used_in_bytes(); 577 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 578 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 579 size_t max_old_gen_size = old_gen->max_gen_size(); 580 size_t max_eden_size = max_young_size - 581 young_gen->from_space()->capacity_in_bytes() - 582 young_gen->to_space()->capacity_in_bytes(); 583 584 // Used for diagnostics 585 size_policy->clear_generation_free_space_flags(); 586 587 size_policy->compute_eden_space_size(young_live, 588 eden_live, 589 cur_eden, 590 max_eden_size, 591 false /* not full gc*/); 592 593 size_policy->check_gc_overhead_limit(young_live, 594 eden_live, 595 max_old_gen_size, 596 max_eden_size, 597 false /* not full gc*/, 598 gc_cause, 599 heap->collector_policy()); 600 601 size_policy->decay_supplemental_growth(false /* not full gc*/); 602 } 603 // Resize the young generation at every collection 604 // even if new sizes have not been calculated. This is 605 // to allow resizes that may have been inhibited by the 606 // relative location of the "to" and "from" spaces. 607 608 // Resizing the old gen at minor collects can cause increases 609 // that don't feed back to the generation sizing policy until 610 // a major collection. Don't resize the old gen here. 611 612 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 613 size_policy->calculated_survivor_size_in_bytes()); 614 615 if (PrintAdaptiveSizePolicy) { 616 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 617 heap->total_collections()); 618 } 619 } 620 621 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 622 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 623 // Also update() will case adaptive NUMA chunk resizing. 624 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 625 young_gen->eden_space()->update(); 626 627 heap->gc_policy_counters()->update_counters(); 628 629 heap->resize_all_tlabs(); 630 631 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 632 } 633 634 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 635 636 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 637 638 { 639 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id()); 640 641 CodeCache::prune_scavenge_root_nmethods(); 642 } 643 644 // Re-verify object start arrays 645 if (VerifyObjectStartArray && 646 VerifyAfterGC) { 647 old_gen->verify_object_start_array(); 648 } 649 650 // Verify all old -> young cards are now precise 651 if (VerifyRememberedSets) { 652 // Precise verification will give false positives. Until this is fixed, 653 // use imprecise verification. 654 // CardTableExtension::verify_all_young_refs_precise(); 655 CardTableExtension::verify_all_young_refs_imprecise(); 656 } 657 658 if (TraceYoungGenTime) accumulated_time()->stop(); 659 660 if (PrintGC) { 661 if (PrintGCDetails) { 662 // Don't print a GC timestamp here. This is after the GC so 663 // would be confusing. 664 young_gen->print_used_change(young_gen_used_before); 665 } 666 heap->print_heap_change(prev_used); 667 } 668 669 // Track memory usage and detect low memory 670 MemoryService::track_memory_usage(); 671 heap->update_counters(); 672 673 gc_task_manager()->release_idle_workers(); 674 } 675 676 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 677 HandleMark hm; // Discard invalid handles created during verification 678 Universe::verify(" VerifyAfterGC:"); 679 } 680 681 heap->print_heap_after_gc(); 682 heap->trace_heap_after_gc(&_gc_tracer); 683 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 684 685 if (ZapUnusedHeapArea) { 686 young_gen->eden_space()->check_mangled_unused_area_complete(); 687 young_gen->from_space()->check_mangled_unused_area_complete(); 688 young_gen->to_space()->check_mangled_unused_area_complete(); 689 } 690 691 scavenge_exit.update(); 692 693 if (PrintGCTaskTimeStamps) { 694 tty->print_cr("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT, 695 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 696 scavenge_exit.ticks()); 697 gc_task_manager()->print_task_time_stamps(); 698 } 699 700 #ifdef TRACESPINNING 701 ParallelTaskTerminator::print_termination_counts(); 702 #endif 703 704 705 _gc_timer.register_gc_end(); 706 707 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 708 709 return !promotion_failure_occurred; 710 } 711 712 // This method iterates over all objects in the young generation, 713 // unforwarding markOops. It then restores any preserved mark oops, 714 // and clears the _preserved_mark_stack. 715 void PSScavenge::clean_up_failed_promotion() { 716 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 717 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 718 719 PSYoungGen* young_gen = heap->young_gen(); 720 721 { 722 ResourceMark rm; 723 724 // Unforward all pointers in the young gen. 725 PSPromotionFailedClosure unforward_closure; 726 young_gen->object_iterate(&unforward_closure); 727 728 if (PrintGC && Verbose) { 729 gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size()); 730 } 731 732 // Restore any saved marks. 733 while (!_preserved_oop_stack.is_empty()) { 734 oop obj = _preserved_oop_stack.pop(); 735 markOop mark = _preserved_mark_stack.pop(); 736 obj->set_mark(mark); 737 } 738 739 // Clear the preserved mark and oop stack caches. 740 _preserved_mark_stack.clear(true); 741 _preserved_oop_stack.clear(true); 742 } 743 744 // Reset the PromotionFailureALot counters. 745 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 746 } 747 748 // This method is called whenever an attempt to promote an object 749 // fails. Some markOops will need preservation, some will not. Note 750 // that the entire eden is traversed after a failed promotion, with 751 // all forwarded headers replaced by the default markOop. This means 752 // it is not necessary to preserve most markOops. 753 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 754 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 755 // Should use per-worker private stacks here rather than 756 // locking a common pair of stacks. 757 ThreadCritical tc; 758 _preserved_oop_stack.push(obj); 759 _preserved_mark_stack.push(obj_mark); 760 } 761 } 762 763 bool PSScavenge::should_attempt_scavenge() { 764 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 765 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 766 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 767 768 if (UsePerfData) { 769 counters->update_scavenge_skipped(not_skipped); 770 } 771 772 PSYoungGen* young_gen = heap->young_gen(); 773 PSOldGen* old_gen = heap->old_gen(); 774 775 if (!ScavengeWithObjectsInToSpace) { 776 // Do not attempt to promote unless to_space is empty 777 if (!young_gen->to_space()->is_empty()) { 778 _consecutive_skipped_scavenges++; 779 if (UsePerfData) { 780 counters->update_scavenge_skipped(to_space_not_empty); 781 } 782 return false; 783 } 784 } 785 786 // Test to see if the scavenge will likely fail. 787 PSAdaptiveSizePolicy* policy = heap->size_policy(); 788 789 // A similar test is done in the policy's should_full_GC(). If this is 790 // changed, decide if that test should also be changed. 791 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 792 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 793 bool result = promotion_estimate < old_gen->free_in_bytes(); 794 795 if (PrintGCDetails && Verbose) { 796 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 797 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 798 " padded_average_promoted " SIZE_FORMAT 799 " free in old gen " SIZE_FORMAT, 800 (size_t) policy->average_promoted_in_bytes(), 801 (size_t) policy->padded_average_promoted_in_bytes(), 802 old_gen->free_in_bytes()); 803 if (young_gen->used_in_bytes() < 804 (size_t) policy->padded_average_promoted_in_bytes()) { 805 gclog_or_tty->print_cr(" padded_promoted_average is greater" 806 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 807 } 808 } 809 810 if (result) { 811 _consecutive_skipped_scavenges = 0; 812 } else { 813 _consecutive_skipped_scavenges++; 814 if (UsePerfData) { 815 counters->update_scavenge_skipped(promoted_too_large); 816 } 817 } 818 return result; 819 } 820 821 // Used to add tasks 822 GCTaskManager* const PSScavenge::gc_task_manager() { 823 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 824 "shouldn't return NULL"); 825 return ParallelScavengeHeap::gc_task_manager(); 826 } 827 828 void PSScavenge::initialize() { 829 // Arguments must have been parsed 830 831 if (AlwaysTenure || NeverTenure) { 832 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1, 833 err_msg("MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold)); 834 _tenuring_threshold = MaxTenuringThreshold; 835 } else { 836 // We want to smooth out our startup times for the AdaptiveSizePolicy 837 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 838 MaxTenuringThreshold; 839 } 840 841 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 842 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 843 844 PSYoungGen* young_gen = heap->young_gen(); 845 PSOldGen* old_gen = heap->old_gen(); 846 847 // Set boundary between young_gen and old_gen 848 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 849 "old above young"); 850 set_young_generation_boundary(young_gen->eden_space()->bottom()); 851 852 // Initialize ref handling object for scavenging. 853 MemRegion mr = young_gen->reserved(); 854 855 _ref_processor = 856 new ReferenceProcessor(mr, // span 857 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 858 (int) ParallelGCThreads, // mt processing degree 859 true, // mt discovery 860 (int) ParallelGCThreads, // mt discovery degree 861 true, // atomic_discovery 862 NULL); // header provides liveness info 863 864 // Cache the cardtable 865 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set()); 866 867 _counters = new CollectorCounters("PSScavenge", 0); 868 }