1 /* 2 * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/parallel/cardTableExtension.hpp" 29 #include "gc/parallel/gcTaskManager.hpp" 30 #include "gc/parallel/parallelScavengeHeap.hpp" 31 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 32 #include "gc/parallel/psMarkSweep.hpp" 33 #include "gc/parallel/psParallelCompact.hpp" 34 #include "gc/parallel/psScavenge.inline.hpp" 35 #include "gc/parallel/psTasks.hpp" 36 #include "gc/shared/collectorPolicy.hpp" 37 #include "gc/shared/gcCause.hpp" 38 #include "gc/shared/gcHeapSummary.hpp" 39 #include "gc/shared/gcLocker.inline.hpp" 40 #include "gc/shared/gcTimer.hpp" 41 #include "gc/shared/gcTrace.hpp" 42 #include "gc/shared/gcTraceTime.hpp" 43 #include "gc/shared/isGCActiveMark.hpp" 44 #include "gc/shared/referencePolicy.hpp" 45 #include "gc/shared/referenceProcessor.hpp" 46 #include "gc/shared/spaceDecorator.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "oops/oop.inline.hpp" 49 #include "runtime/biasedLocking.hpp" 50 #include "runtime/fprofiler.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/threadCritical.hpp" 53 #include "runtime/vmThread.hpp" 54 #include "runtime/vm_operations.hpp" 55 #include "services/memoryService.hpp" 56 #include "utilities/stack.inline.hpp" 57 58 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 59 int PSScavenge::_consecutive_skipped_scavenges = 0; 60 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 61 CardTableExtension* PSScavenge::_card_table = NULL; 62 bool PSScavenge::_survivor_overflow = false; 63 uint PSScavenge::_tenuring_threshold = 0; 64 HeapWord* PSScavenge::_young_generation_boundary = NULL; 65 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 66 elapsedTimer PSScavenge::_accumulated_time; 67 STWGCTimer PSScavenge::_gc_timer; 68 ParallelScavengeTracer PSScavenge::_gc_tracer; 69 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack; 70 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack; 71 CollectorCounters* PSScavenge::_counters = NULL; 72 73 // Define before use 74 class PSIsAliveClosure: public BoolObjectClosure { 75 public: 76 bool do_object_b(oop p) { 77 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 78 } 79 }; 80 81 PSIsAliveClosure PSScavenge::_is_alive_closure; 82 83 class PSKeepAliveClosure: public OopClosure { 84 protected: 85 MutableSpace* _to_space; 86 PSPromotionManager* _promotion_manager; 87 88 public: 89 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 90 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 91 _to_space = heap->young_gen()->to_space(); 92 93 assert(_promotion_manager != NULL, "Sanity"); 94 } 95 96 template <class T> void do_oop_work(T* p) { 97 assert (!oopDesc::is_null(*p), "expected non-null ref"); 98 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 99 "expected an oop while scanning weak refs"); 100 101 // Weak refs may be visited more than once. 102 if (PSScavenge::should_scavenge(p, _to_space)) { 103 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p); 104 } 105 } 106 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 107 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 108 }; 109 110 class PSEvacuateFollowersClosure: public VoidClosure { 111 private: 112 PSPromotionManager* _promotion_manager; 113 public: 114 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 115 116 virtual void do_void() { 117 assert(_promotion_manager != NULL, "Sanity"); 118 _promotion_manager->drain_stacks(true); 119 guarantee(_promotion_manager->stacks_empty(), 120 "stacks should be empty at this point"); 121 } 122 }; 123 124 class PSPromotionFailedClosure : public ObjectClosure { 125 virtual void do_object(oop obj) { 126 if (obj->is_forwarded()) { 127 obj->init_mark(); 128 } 129 } 130 }; 131 132 class PSRefProcTaskProxy: public GCTask { 133 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 134 ProcessTask & _rp_task; 135 uint _work_id; 136 public: 137 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 138 : _rp_task(rp_task), 139 _work_id(work_id) 140 { } 141 142 private: 143 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 144 virtual void do_it(GCTaskManager* manager, uint which); 145 }; 146 147 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 148 { 149 PSPromotionManager* promotion_manager = 150 PSPromotionManager::gc_thread_promotion_manager(which); 151 assert(promotion_manager != NULL, "sanity check"); 152 PSKeepAliveClosure keep_alive(promotion_manager); 153 PSEvacuateFollowersClosure evac_followers(promotion_manager); 154 PSIsAliveClosure is_alive; 155 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 156 } 157 158 class PSRefEnqueueTaskProxy: public GCTask { 159 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 160 EnqueueTask& _enq_task; 161 uint _work_id; 162 163 public: 164 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 165 : _enq_task(enq_task), 166 _work_id(work_id) 167 { } 168 169 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 170 virtual void do_it(GCTaskManager* manager, uint which) 171 { 172 _enq_task.work(_work_id); 173 } 174 }; 175 176 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 177 virtual void execute(ProcessTask& task); 178 virtual void execute(EnqueueTask& task); 179 }; 180 181 void PSRefProcTaskExecutor::execute(ProcessTask& task) 182 { 183 GCTaskQueue* q = GCTaskQueue::create(); 184 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 185 for(uint i=0; i < manager->active_workers(); i++) { 186 q->enqueue(new PSRefProcTaskProxy(task, i)); 187 } 188 ParallelTaskTerminator terminator(manager->active_workers(), 189 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 190 if (task.marks_oops_alive() && manager->active_workers() > 1) { 191 for (uint j = 0; j < manager->active_workers(); j++) { 192 q->enqueue(new StealTask(&terminator)); 193 } 194 } 195 manager->execute_and_wait(q); 196 } 197 198 199 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 200 { 201 GCTaskQueue* q = GCTaskQueue::create(); 202 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 203 for(uint i=0; i < manager->active_workers(); i++) { 204 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 205 } 206 manager->execute_and_wait(q); 207 } 208 209 // This method contains all heap specific policy for invoking scavenge. 210 // PSScavenge::invoke_no_policy() will do nothing but attempt to 211 // scavenge. It will not clean up after failed promotions, bail out if 212 // we've exceeded policy time limits, or any other special behavior. 213 // All such policy should be placed here. 214 // 215 // Note that this method should only be called from the vm_thread while 216 // at a safepoint! 217 bool PSScavenge::invoke() { 218 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 219 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 220 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant"); 221 222 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap(); 223 PSAdaptiveSizePolicy* policy = heap->size_policy(); 224 IsGCActiveMark mark; 225 226 const bool scavenge_done = PSScavenge::invoke_no_policy(); 227 const bool need_full_gc = !scavenge_done || 228 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 229 bool full_gc_done = false; 230 231 if (UsePerfData) { 232 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 233 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 234 counters->update_full_follows_scavenge(ffs_val); 235 } 236 237 if (need_full_gc) { 238 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 239 CollectorPolicy* cp = heap->collector_policy(); 240 const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); 241 242 if (UseParallelOldGC) { 243 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 244 } else { 245 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); 246 } 247 } 248 249 return full_gc_done; 250 } 251 252 // This method contains no policy. You should probably 253 // be calling invoke() instead. 254 bool PSScavenge::invoke_no_policy() { 255 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 256 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 257 258 assert(_preserved_mark_stack.is_empty(), "should be empty"); 259 assert(_preserved_oop_stack.is_empty(), "should be empty"); 260 261 _gc_timer.register_gc_start(); 262 263 TimeStamp scavenge_entry; 264 TimeStamp scavenge_midpoint; 265 TimeStamp scavenge_exit; 266 267 scavenge_entry.update(); 268 269 if (GC_locker::check_active_before_gc()) { 270 return false; 271 } 272 273 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 274 GCCause::Cause gc_cause = heap->gc_cause(); 275 276 // Check for potential problems. 277 if (!should_attempt_scavenge()) { 278 return false; 279 } 280 281 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 282 283 bool promotion_failure_occurred = false; 284 285 PSYoungGen* young_gen = heap->young_gen(); 286 PSOldGen* old_gen = heap->old_gen(); 287 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 288 289 heap->increment_total_collections(); 290 291 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 292 293 if ((gc_cause != GCCause::_java_lang_system_gc) || 294 UseAdaptiveSizePolicyWithSystemGC) { 295 // Gather the feedback data for eden occupancy. 296 young_gen->eden_space()->accumulate_statistics(); 297 } 298 299 if (ZapUnusedHeapArea) { 300 // Save information needed to minimize mangling 301 heap->record_gen_tops_before_GC(); 302 } 303 304 heap->print_heap_before_gc(); 305 heap->trace_heap_before_gc(&_gc_tracer); 306 307 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 308 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 309 310 size_t prev_used = heap->used(); 311 312 // Fill in TLABs 313 heap->accumulate_statistics_all_tlabs(); 314 heap->ensure_parsability(true); // retire TLABs 315 316 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 317 HandleMark hm; // Discard invalid handles created during verification 318 Universe::verify(" VerifyBeforeGC:"); 319 } 320 321 { 322 ResourceMark rm; 323 HandleMark hm; 324 325 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 326 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); 327 TraceCollectorStats tcs(counters()); 328 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); 329 330 if (TraceYoungGenTime) accumulated_time()->start(); 331 332 // Let the size policy know we're starting 333 size_policy->minor_collection_begin(); 334 335 // Verify the object start arrays. 336 if (VerifyObjectStartArray && 337 VerifyBeforeGC) { 338 old_gen->verify_object_start_array(); 339 } 340 341 // Verify no unmarked old->young roots 342 if (VerifyRememberedSets) { 343 CardTableExtension::verify_all_young_refs_imprecise(); 344 } 345 346 if (!ScavengeWithObjectsInToSpace) { 347 assert(young_gen->to_space()->is_empty(), 348 "Attempt to scavenge with live objects in to_space"); 349 young_gen->to_space()->clear(SpaceDecorator::Mangle); 350 } else if (ZapUnusedHeapArea) { 351 young_gen->to_space()->mangle_unused_area(); 352 } 353 save_to_space_top_before_gc(); 354 355 COMPILER2_PRESENT(DerivedPointerTable::clear()); 356 357 reference_processor()->enable_discovery(); 358 reference_processor()->setup_policy(false); 359 360 // We track how much was promoted to the next generation for 361 // the AdaptiveSizePolicy. 362 size_t old_gen_used_before = old_gen->used_in_bytes(); 363 364 // For PrintGCDetails 365 size_t young_gen_used_before = young_gen->used_in_bytes(); 366 367 // Reset our survivor overflow. 368 set_survivor_overflow(false); 369 370 // We need to save the old top values before 371 // creating the promotion_manager. We pass the top 372 // values to the card_table, to prevent it from 373 // straying into the promotion labs. 374 HeapWord* old_top = old_gen->object_space()->top(); 375 376 // Release all previously held resources 377 gc_task_manager()->release_all_resources(); 378 379 // Set the number of GC threads to be used in this collection 380 gc_task_manager()->set_active_gang(); 381 gc_task_manager()->task_idle_workers(); 382 // Get the active number of workers here and use that value 383 // throughout the methods. 384 uint active_workers = gc_task_manager()->active_workers(); 385 386 PSPromotionManager::pre_scavenge(); 387 388 // We'll use the promotion manager again later. 389 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 390 { 391 GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id()); 392 ParallelScavengeHeap::ParStrongRootsScope psrs; 393 394 GCTaskQueue* q = GCTaskQueue::create(); 395 396 if (!old_gen->object_space()->is_empty()) { 397 // There are only old-to-young pointers if there are objects 398 // in the old gen. 399 uint stripe_total = active_workers; 400 for(uint i=0; i < stripe_total; i++) { 401 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); 402 } 403 } 404 405 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 406 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 407 // We scan the thread roots in parallel 408 Threads::create_thread_roots_tasks(q); 409 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 410 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 411 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 412 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 413 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); 414 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 415 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 416 417 ParallelTaskTerminator terminator( 418 active_workers, 419 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); 420 if (active_workers > 1) { 421 for (uint j = 0; j < active_workers; j++) { 422 q->enqueue(new StealTask(&terminator)); 423 } 424 } 425 426 gc_task_manager()->execute_and_wait(q); 427 } 428 429 scavenge_midpoint.update(); 430 431 // Process reference objects discovered during scavenge 432 { 433 GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id()); 434 435 reference_processor()->setup_policy(false); // not always_clear 436 reference_processor()->set_active_mt_degree(active_workers); 437 PSKeepAliveClosure keep_alive(promotion_manager); 438 PSEvacuateFollowersClosure evac_followers(promotion_manager); 439 ReferenceProcessorStats stats; 440 if (reference_processor()->processing_is_mt()) { 441 PSRefProcTaskExecutor task_executor; 442 stats = reference_processor()->process_discovered_references( 443 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 444 &_gc_timer, _gc_tracer.gc_id()); 445 } else { 446 stats = reference_processor()->process_discovered_references( 447 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id()); 448 } 449 450 _gc_tracer.report_gc_reference_stats(stats); 451 452 // Enqueue reference objects discovered during scavenge. 453 if (reference_processor()->processing_is_mt()) { 454 PSRefProcTaskExecutor task_executor; 455 reference_processor()->enqueue_discovered_references(&task_executor); 456 } else { 457 reference_processor()->enqueue_discovered_references(NULL); 458 } 459 } 460 461 { 462 GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id()); 463 // Unlink any dead interned Strings and process the remaining live ones. 464 PSScavengeRootsClosure root_closure(promotion_manager); 465 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); 466 } 467 468 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 469 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 470 if (promotion_failure_occurred) { 471 clean_up_failed_promotion(); 472 if (PrintGC) { 473 gclog_or_tty->print("--"); 474 } 475 } 476 477 // Let the size policy know we're done. Note that we count promotion 478 // failure cleanup time as part of the collection (otherwise, we're 479 // implicitly saying it's mutator time). 480 size_policy->minor_collection_end(gc_cause); 481 482 if (!promotion_failure_occurred) { 483 // Swap the survivor spaces. 484 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 485 young_gen->from_space()->clear(SpaceDecorator::Mangle); 486 young_gen->swap_spaces(); 487 488 size_t survived = young_gen->from_space()->used_in_bytes(); 489 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 490 size_policy->update_averages(_survivor_overflow, survived, promoted); 491 492 // A successful scavenge should restart the GC time limit count which is 493 // for full GC's. 494 size_policy->reset_gc_overhead_limit_count(); 495 if (UseAdaptiveSizePolicy) { 496 // Calculate the new survivor size and tenuring threshold 497 498 if (PrintAdaptiveSizePolicy) { 499 gclog_or_tty->print("AdaptiveSizeStart: "); 500 gclog_or_tty->stamp(); 501 gclog_or_tty->print_cr(" collection: %d ", 502 heap->total_collections()); 503 504 if (Verbose) { 505 gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT 506 " young_gen_capacity: " SIZE_FORMAT, 507 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 508 } 509 } 510 511 512 if (UsePerfData) { 513 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 514 counters->update_old_eden_size( 515 size_policy->calculated_eden_size_in_bytes()); 516 counters->update_old_promo_size( 517 size_policy->calculated_promo_size_in_bytes()); 518 counters->update_old_capacity(old_gen->capacity_in_bytes()); 519 counters->update_young_capacity(young_gen->capacity_in_bytes()); 520 counters->update_survived(survived); 521 counters->update_promoted(promoted); 522 counters->update_survivor_overflowed(_survivor_overflow); 523 } 524 525 size_t max_young_size = young_gen->max_size(); 526 527 // Deciding a free ratio in the young generation is tricky, so if 528 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 529 // that the old generation size may have been limited because of them) we 530 // should then limit our young generation size using NewRatio to have it 531 // follow the old generation size. 532 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 533 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); 534 } 535 536 size_t survivor_limit = 537 size_policy->max_survivor_size(max_young_size); 538 _tenuring_threshold = 539 size_policy->compute_survivor_space_size_and_threshold( 540 _survivor_overflow, 541 _tenuring_threshold, 542 survivor_limit); 543 544 if (PrintTenuringDistribution) { 545 gclog_or_tty->cr(); 546 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u" 547 " (max threshold " UINTX_FORMAT ")", 548 size_policy->calculated_survivor_size_in_bytes(), 549 _tenuring_threshold, MaxTenuringThreshold); 550 } 551 552 if (UsePerfData) { 553 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 554 counters->update_tenuring_threshold(_tenuring_threshold); 555 counters->update_survivor_size_counters(); 556 } 557 558 // Do call at minor collections? 559 // Don't check if the size_policy is ready at this 560 // level. Let the size_policy check that internally. 561 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 562 ((gc_cause != GCCause::_java_lang_system_gc) || 563 UseAdaptiveSizePolicyWithSystemGC)) { 564 565 // Calculate optimal free space amounts 566 assert(young_gen->max_size() > 567 young_gen->from_space()->capacity_in_bytes() + 568 young_gen->to_space()->capacity_in_bytes(), 569 "Sizes of space in young gen are out-of-bounds"); 570 571 size_t young_live = young_gen->used_in_bytes(); 572 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 573 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 574 size_t max_old_gen_size = old_gen->max_gen_size(); 575 size_t max_eden_size = max_young_size - 576 young_gen->from_space()->capacity_in_bytes() - 577 young_gen->to_space()->capacity_in_bytes(); 578 579 // Used for diagnostics 580 size_policy->clear_generation_free_space_flags(); 581 582 size_policy->compute_eden_space_size(young_live, 583 eden_live, 584 cur_eden, 585 max_eden_size, 586 false /* not full gc*/); 587 588 size_policy->check_gc_overhead_limit(young_live, 589 eden_live, 590 max_old_gen_size, 591 max_eden_size, 592 false /* not full gc*/, 593 gc_cause, 594 heap->collector_policy()); 595 596 size_policy->decay_supplemental_growth(false /* not full gc*/); 597 } 598 // Resize the young generation at every collection 599 // even if new sizes have not been calculated. This is 600 // to allow resizes that may have been inhibited by the 601 // relative location of the "to" and "from" spaces. 602 603 // Resizing the old gen at minor collects can cause increases 604 // that don't feed back to the generation sizing policy until 605 // a major collection. Don't resize the old gen here. 606 607 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 608 size_policy->calculated_survivor_size_in_bytes()); 609 610 if (PrintAdaptiveSizePolicy) { 611 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 612 heap->total_collections()); 613 } 614 } 615 616 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 617 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 618 // Also update() will case adaptive NUMA chunk resizing. 619 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 620 young_gen->eden_space()->update(); 621 622 heap->gc_policy_counters()->update_counters(); 623 624 heap->resize_all_tlabs(); 625 626 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 627 } 628 629 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 630 631 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 632 633 { 634 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id()); 635 636 CodeCache::prune_scavenge_root_nmethods(); 637 } 638 639 // Re-verify object start arrays 640 if (VerifyObjectStartArray && 641 VerifyAfterGC) { 642 old_gen->verify_object_start_array(); 643 } 644 645 // Verify all old -> young cards are now precise 646 if (VerifyRememberedSets) { 647 // Precise verification will give false positives. Until this is fixed, 648 // use imprecise verification. 649 // CardTableExtension::verify_all_young_refs_precise(); 650 CardTableExtension::verify_all_young_refs_imprecise(); 651 } 652 653 if (TraceYoungGenTime) accumulated_time()->stop(); 654 655 if (PrintGC) { 656 if (PrintGCDetails) { 657 // Don't print a GC timestamp here. This is after the GC so 658 // would be confusing. 659 young_gen->print_used_change(young_gen_used_before); 660 } 661 heap->print_heap_change(prev_used); 662 } 663 664 // Track memory usage and detect low memory 665 MemoryService::track_memory_usage(); 666 heap->update_counters(); 667 668 gc_task_manager()->release_idle_workers(); 669 } 670 671 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 672 HandleMark hm; // Discard invalid handles created during verification 673 Universe::verify(" VerifyAfterGC:"); 674 } 675 676 heap->print_heap_after_gc(); 677 heap->trace_heap_after_gc(&_gc_tracer); 678 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 679 680 if (ZapUnusedHeapArea) { 681 young_gen->eden_space()->check_mangled_unused_area_complete(); 682 young_gen->from_space()->check_mangled_unused_area_complete(); 683 young_gen->to_space()->check_mangled_unused_area_complete(); 684 } 685 686 scavenge_exit.update(); 687 688 if (PrintGCTaskTimeStamps) { 689 tty->print_cr("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT, 690 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 691 scavenge_exit.ticks()); 692 gc_task_manager()->print_task_time_stamps(); 693 } 694 695 #ifdef TRACESPINNING 696 ParallelTaskTerminator::print_termination_counts(); 697 #endif 698 699 700 _gc_timer.register_gc_end(); 701 702 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 703 704 return !promotion_failure_occurred; 705 } 706 707 // This method iterates over all objects in the young generation, 708 // unforwarding markOops. It then restores any preserved mark oops, 709 // and clears the _preserved_mark_stack. 710 void PSScavenge::clean_up_failed_promotion() { 711 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 712 PSYoungGen* young_gen = heap->young_gen(); 713 714 { 715 ResourceMark rm; 716 717 // Unforward all pointers in the young gen. 718 PSPromotionFailedClosure unforward_closure; 719 young_gen->object_iterate(&unforward_closure); 720 721 if (PrintGC && Verbose) { 722 gclog_or_tty->print_cr("Restoring " SIZE_FORMAT " marks", _preserved_oop_stack.size()); 723 } 724 725 // Restore any saved marks. 726 while (!_preserved_oop_stack.is_empty()) { 727 oop obj = _preserved_oop_stack.pop(); 728 markOop mark = _preserved_mark_stack.pop(); 729 obj->set_mark(mark); 730 } 731 732 // Clear the preserved mark and oop stack caches. 733 _preserved_mark_stack.clear(true); 734 _preserved_oop_stack.clear(true); 735 } 736 737 // Reset the PromotionFailureALot counters. 738 NOT_PRODUCT(heap->reset_promotion_should_fail();) 739 } 740 741 // This method is called whenever an attempt to promote an object 742 // fails. Some markOops will need preservation, some will not. Note 743 // that the entire eden is traversed after a failed promotion, with 744 // all forwarded headers replaced by the default markOop. This means 745 // it is not necessary to preserve most markOops. 746 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 747 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 748 // Should use per-worker private stacks here rather than 749 // locking a common pair of stacks. 750 ThreadCritical tc; 751 _preserved_oop_stack.push(obj); 752 _preserved_mark_stack.push(obj_mark); 753 } 754 } 755 756 bool PSScavenge::should_attempt_scavenge() { 757 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 758 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 759 760 if (UsePerfData) { 761 counters->update_scavenge_skipped(not_skipped); 762 } 763 764 PSYoungGen* young_gen = heap->young_gen(); 765 PSOldGen* old_gen = heap->old_gen(); 766 767 if (!ScavengeWithObjectsInToSpace) { 768 // Do not attempt to promote unless to_space is empty 769 if (!young_gen->to_space()->is_empty()) { 770 _consecutive_skipped_scavenges++; 771 if (UsePerfData) { 772 counters->update_scavenge_skipped(to_space_not_empty); 773 } 774 return false; 775 } 776 } 777 778 // Test to see if the scavenge will likely fail. 779 PSAdaptiveSizePolicy* policy = heap->size_policy(); 780 781 // A similar test is done in the policy's should_full_GC(). If this is 782 // changed, decide if that test should also be changed. 783 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 784 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 785 bool result = promotion_estimate < old_gen->free_in_bytes(); 786 787 if (PrintGCDetails && Verbose) { 788 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 789 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 790 " padded_average_promoted " SIZE_FORMAT 791 " free in old gen " SIZE_FORMAT, 792 (size_t) policy->average_promoted_in_bytes(), 793 (size_t) policy->padded_average_promoted_in_bytes(), 794 old_gen->free_in_bytes()); 795 if (young_gen->used_in_bytes() < 796 (size_t) policy->padded_average_promoted_in_bytes()) { 797 gclog_or_tty->print_cr(" padded_promoted_average is greater" 798 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 799 } 800 } 801 802 if (result) { 803 _consecutive_skipped_scavenges = 0; 804 } else { 805 _consecutive_skipped_scavenges++; 806 if (UsePerfData) { 807 counters->update_scavenge_skipped(promoted_too_large); 808 } 809 } 810 return result; 811 } 812 813 // Used to add tasks 814 GCTaskManager* const PSScavenge::gc_task_manager() { 815 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 816 "shouldn't return NULL"); 817 return ParallelScavengeHeap::gc_task_manager(); 818 } 819 820 void PSScavenge::initialize() { 821 // Arguments must have been parsed 822 823 if (AlwaysTenure || NeverTenure) { 824 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1, 825 err_msg("MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold)); 826 _tenuring_threshold = MaxTenuringThreshold; 827 } else { 828 // We want to smooth out our startup times for the AdaptiveSizePolicy 829 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 830 MaxTenuringThreshold; 831 } 832 833 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 834 PSYoungGen* young_gen = heap->young_gen(); 835 PSOldGen* old_gen = heap->old_gen(); 836 837 // Set boundary between young_gen and old_gen 838 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 839 "old above young"); 840 set_young_generation_boundary(young_gen->eden_space()->bottom()); 841 842 // Initialize ref handling object for scavenging. 843 MemRegion mr = young_gen->reserved(); 844 845 _ref_processor = 846 new ReferenceProcessor(mr, // span 847 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 848 (int) ParallelGCThreads, // mt processing degree 849 true, // mt discovery 850 (int) ParallelGCThreads, // mt discovery degree 851 true, // atomic_discovery 852 NULL); // header provides liveness info 853 854 // Cache the cardtable 855 _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set()); 856 857 _counters = new CollectorCounters("PSScavenge", 0); 858 }