1 /* 2 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" 35 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 36 #include "gc_implementation/parallelScavenge/psTasks.hpp" 37 #include "gc_implementation/shared/gcHeapSummary.hpp" 38 #include "gc_implementation/shared/gcTimer.hpp" 39 #include "gc_implementation/shared/gcTrace.hpp" 40 #include "gc_implementation/shared/gcTraceTime.hpp" 41 #include "gc_implementation/shared/isGCActiveMark.hpp" 42 #include "gc_implementation/shared/spaceDecorator.hpp" 43 #include "gc_interface/gcCause.hpp" 44 #include "memory/collectorPolicy.hpp" 45 #include "memory/gcLocker.inline.hpp" 46 #include "memory/referencePolicy.hpp" 47 #include "memory/referenceProcessor.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "oops/oop.psgc.inline.hpp" 51 #include "runtime/biasedLocking.hpp" 52 #include "runtime/fprofiler.hpp" 53 #include "runtime/handles.inline.hpp" 54 #include "runtime/threadCritical.hpp" 55 #include "runtime/vmThread.hpp" 56 #include "runtime/vm_operations.hpp" 57 #include "services/memoryService.hpp" 58 #include "utilities/stack.inline.hpp" 59 60 61 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 62 int PSScavenge::_consecutive_skipped_scavenges = 0; 63 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 64 CardTableExtension* PSScavenge::_card_table = NULL; 65 bool PSScavenge::_survivor_overflow = false; 66 int PSScavenge::_tenuring_threshold = 0; 67 HeapWord* PSScavenge::_young_generation_boundary = NULL; 68 elapsedTimer PSScavenge::_accumulated_time; 69 STWGCTimer PSScavenge::_gc_timer; 70 ParallelScavengeTracer PSScavenge::_gc_tracer; 71 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack; 72 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack; 73 CollectorCounters* PSScavenge::_counters = NULL; 74 75 // Define before use 76 class PSIsAliveClosure: public BoolObjectClosure { 77 public: 78 void do_object(oop p) { 79 assert(false, "Do not call."); 80 } 81 bool do_object_b(oop p) { 82 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); 83 } 84 }; 85 86 PSIsAliveClosure PSScavenge::_is_alive_closure; 87 88 class PSKeepAliveClosure: public OopClosure { 89 protected: 90 MutableSpace* _to_space; 91 PSPromotionManager* _promotion_manager; 92 93 public: 94 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 95 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 96 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 97 _to_space = heap->young_gen()->to_space(); 98 99 assert(_promotion_manager != NULL, "Sanity"); 100 } 101 102 template <class T> void do_oop_work(T* p) { 103 assert (!oopDesc::is_null(*p), "expected non-null ref"); 104 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 105 "expected an oop while scanning weak refs"); 106 107 // Weak refs may be visited more than once. 108 if (PSScavenge::should_scavenge(p, _to_space)) { 109 PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p); 110 } 111 } 112 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 113 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 114 }; 115 116 class PSEvacuateFollowersClosure: public VoidClosure { 117 private: 118 PSPromotionManager* _promotion_manager; 119 public: 120 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 121 122 virtual void do_void() { 123 assert(_promotion_manager != NULL, "Sanity"); 124 _promotion_manager->drain_stacks(true); 125 guarantee(_promotion_manager->stacks_empty(), 126 "stacks should be empty at this point"); 127 } 128 }; 129 130 class PSPromotionFailedClosure : public ObjectClosure { 131 virtual void do_object(oop obj) { 132 if (obj->is_forwarded()) { 133 obj->init_mark(); 134 } 135 } 136 }; 137 138 class PSRefProcTaskProxy: public GCTask { 139 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 140 ProcessTask & _rp_task; 141 uint _work_id; 142 public: 143 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 144 : _rp_task(rp_task), 145 _work_id(work_id) 146 { } 147 148 private: 149 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 150 virtual void do_it(GCTaskManager* manager, uint which); 151 }; 152 153 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 154 { 155 PSPromotionManager* promotion_manager = 156 PSPromotionManager::gc_thread_promotion_manager(which); 157 assert(promotion_manager != NULL, "sanity check"); 158 PSKeepAliveClosure keep_alive(promotion_manager); 159 PSEvacuateFollowersClosure evac_followers(promotion_manager); 160 PSIsAliveClosure is_alive; 161 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 162 } 163 164 class PSRefEnqueueTaskProxy: public GCTask { 165 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 166 EnqueueTask& _enq_task; 167 uint _work_id; 168 169 public: 170 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 171 : _enq_task(enq_task), 172 _work_id(work_id) 173 { } 174 175 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 176 virtual void do_it(GCTaskManager* manager, uint which) 177 { 178 _enq_task.work(_work_id); 179 } 180 }; 181 182 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 183 virtual void execute(ProcessTask& task); 184 virtual void execute(EnqueueTask& task); 185 }; 186 187 void PSRefProcTaskExecutor::execute(ProcessTask& task) 188 { 189 GCTaskQueue* q = GCTaskQueue::create(); 190 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 191 for(uint i=0; i < manager->active_workers(); i++) { 192 q->enqueue(new PSRefProcTaskProxy(task, i)); 193 } 194 ParallelTaskTerminator terminator(manager->active_workers(), 195 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 196 if (task.marks_oops_alive() && manager->active_workers() > 1) { 197 for (uint j = 0; j < manager->active_workers(); j++) { 198 q->enqueue(new StealTask(&terminator)); 199 } 200 } 201 manager->execute_and_wait(q); 202 } 203 204 205 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 206 { 207 GCTaskQueue* q = GCTaskQueue::create(); 208 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); 209 for(uint i=0; i < manager->active_workers(); i++) { 210 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 211 } 212 manager->execute_and_wait(q); 213 } 214 215 // This method contains all heap specific policy for invoking scavenge. 216 // PSScavenge::invoke_no_policy() will do nothing but attempt to 217 // scavenge. It will not clean up after failed promotions, bail out if 218 // we've exceeded policy time limits, or any other special behavior. 219 // All such policy should be placed here. 220 // 221 // Note that this method should only be called from the vm_thread while 222 // at a safepoint! 223 bool PSScavenge::invoke() { 224 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 225 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 226 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 227 228 ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap(); 229 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 230 231 PSAdaptiveSizePolicy* policy = heap->size_policy(); 232 IsGCActiveMark mark; 233 234 const bool scavenge_done = PSScavenge::invoke_no_policy(); 235 const bool need_full_gc = !scavenge_done || 236 policy->should_full_GC(heap->old_gen()->free_in_bytes()); 237 bool full_gc_done = false; 238 239 if (UsePerfData) { 240 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); 241 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; 242 counters->update_full_follows_scavenge(ffs_val); 243 } 244 245 if (need_full_gc) { 246 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 247 CollectorPolicy* cp = heap->collector_policy(); 248 const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); 249 250 if (UseParallelOldGC) { 251 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); 252 } else { 253 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); 254 } 255 } 256 257 return full_gc_done; 258 } 259 260 // This method contains no policy. You should probably 261 // be calling invoke() instead. 262 bool PSScavenge::invoke_no_policy() { 263 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 264 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 265 266 assert(_preserved_mark_stack.is_empty(), "should be empty"); 267 assert(_preserved_oop_stack.is_empty(), "should be empty"); 268 269 _gc_timer.register_gc_start(); 270 271 TimeStamp scavenge_entry; 272 TimeStamp scavenge_midpoint; 273 TimeStamp scavenge_exit; 274 275 scavenge_entry.update(); 276 277 if (GC_locker::check_active_before_gc()) { 278 return false; 279 } 280 281 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 282 GCCause::Cause gc_cause = heap->gc_cause(); 283 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 284 285 // Check for potential problems. 286 if (!should_attempt_scavenge()) { 287 return false; 288 } 289 290 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 291 292 bool promotion_failure_occurred = false; 293 294 PSYoungGen* young_gen = heap->young_gen(); 295 PSOldGen* old_gen = heap->old_gen(); 296 PSPermGen* perm_gen = heap->perm_gen(); 297 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 298 299 heap->increment_total_collections(); 300 301 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 302 303 if ((gc_cause != GCCause::_java_lang_system_gc) || 304 UseAdaptiveSizePolicyWithSystemGC) { 305 // Gather the feedback data for eden occupancy. 306 young_gen->eden_space()->accumulate_statistics(); 307 } 308 309 if (ZapUnusedHeapArea) { 310 // Save information needed to minimize mangling 311 heap->record_gen_tops_before_GC(); 312 } 313 314 heap->print_heap_before_gc(); 315 heap->trace_heap_before_gc(&_gc_tracer); 316 317 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 318 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 319 320 size_t prev_used = heap->used(); 321 322 // Fill in TLABs 323 heap->accumulate_statistics_all_tlabs(); 324 heap->ensure_parsability(true); // retire TLABs 325 326 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 327 HandleMark hm; // Discard invalid handles created during verification 328 Universe::verify(" VerifyBeforeGC:"); 329 } 330 331 { 332 ResourceMark rm; 333 HandleMark hm; 334 335 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 336 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); 337 TraceCollectorStats tcs(counters()); 338 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); 339 340 if (TraceGen0Time) accumulated_time()->start(); 341 342 // Let the size policy know we're starting 343 size_policy->minor_collection_begin(); 344 345 // Verify the object start arrays. 346 if (VerifyObjectStartArray && 347 VerifyBeforeGC) { 348 old_gen->verify_object_start_array(); 349 perm_gen->verify_object_start_array(); 350 } 351 352 // Verify no unmarked old->young roots 353 if (VerifyRememberedSets) { 354 CardTableExtension::verify_all_young_refs_imprecise(); 355 } 356 357 if (!ScavengeWithObjectsInToSpace) { 358 assert(young_gen->to_space()->is_empty(), 359 "Attempt to scavenge with live objects in to_space"); 360 young_gen->to_space()->clear(SpaceDecorator::Mangle); 361 } else if (ZapUnusedHeapArea) { 362 young_gen->to_space()->mangle_unused_area(); 363 } 364 save_to_space_top_before_gc(); 365 366 COMPILER2_PRESENT(DerivedPointerTable::clear()); 367 368 reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 369 reference_processor()->setup_policy(false); 370 371 // We track how much was promoted to the next generation for 372 // the AdaptiveSizePolicy. 373 size_t old_gen_used_before = old_gen->used_in_bytes(); 374 375 // For PrintGCDetails 376 size_t young_gen_used_before = young_gen->used_in_bytes(); 377 378 // Reset our survivor overflow. 379 set_survivor_overflow(false); 380 381 // We need to save the old/perm top values before 382 // creating the promotion_manager. We pass the top 383 // values to the card_table, to prevent it from 384 // straying into the promotion labs. 385 HeapWord* old_top = old_gen->object_space()->top(); 386 HeapWord* perm_top = perm_gen->object_space()->top(); 387 388 // Release all previously held resources 389 gc_task_manager()->release_all_resources(); 390 391 // Set the number of GC threads to be used in this collection 392 gc_task_manager()->set_active_gang(); 393 gc_task_manager()->task_idle_workers(); 394 // Get the active number of workers here and use that value 395 // throughout the methods. 396 uint active_workers = gc_task_manager()->active_workers(); 397 heap->set_par_threads(active_workers); 398 399 PSPromotionManager::pre_scavenge(); 400 401 // We'll use the promotion manager again later. 402 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 403 { 404 GCTraceTime tm("Scavenge", false, false, &_gc_timer); 405 ParallelScavengeHeap::ParStrongRootsScope psrs; 406 407 GCTaskQueue* q = GCTaskQueue::create(); 408 409 uint stripe_total = active_workers; 410 for(uint i=0; i < stripe_total; i++) { 411 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); 412 } 413 414 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); 415 416 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 417 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 418 // We scan the thread roots in parallel 419 Threads::create_thread_roots_tasks(q); 420 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 421 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 422 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 423 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 424 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 425 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 426 427 ParallelTaskTerminator terminator( 428 active_workers, 429 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); 430 if (active_workers > 1) { 431 for (uint j = 0; j < active_workers; j++) { 432 q->enqueue(new StealTask(&terminator)); 433 } 434 } 435 436 gc_task_manager()->execute_and_wait(q); 437 } 438 439 scavenge_midpoint.update(); 440 441 // Process reference objects discovered during scavenge 442 { 443 GCTraceTime tm("References", false, false, &_gc_timer); 444 445 reference_processor()->setup_policy(false); // not always_clear 446 reference_processor()->set_active_mt_degree(active_workers); 447 PSKeepAliveClosure keep_alive(promotion_manager); 448 PSEvacuateFollowersClosure evac_followers(promotion_manager); 449 ReferenceProcessorStats stats; 450 if (reference_processor()->processing_is_mt()) { 451 PSRefProcTaskExecutor task_executor; 452 stats = reference_processor()->process_discovered_references( 453 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, 454 &_gc_timer); 455 } else { 456 stats = reference_processor()->process_discovered_references( 457 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); 458 } 459 460 _gc_tracer.report_gc_reference_stats(stats); 461 462 // Enqueue reference objects discovered during scavenge. 463 if (reference_processor()->processing_is_mt()) { 464 PSRefProcTaskExecutor task_executor; 465 reference_processor()->enqueue_discovered_references(&task_executor); 466 } else { 467 reference_processor()->enqueue_discovered_references(NULL); 468 } 469 } 470 471 if (!JavaObjectsInPerm) { 472 GCTraceTime tm("StringTable", false, false, &_gc_timer); 473 // Unlink any dead interned Strings 474 StringTable::unlink(&_is_alive_closure); 475 // Process the remaining live ones 476 PSScavengeRootsClosure root_closure(promotion_manager); 477 StringTable::oops_do(&root_closure); 478 } 479 480 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 481 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 482 if (promotion_failure_occurred) { 483 clean_up_failed_promotion(); 484 if (PrintGC) { 485 gclog_or_tty->print("--"); 486 } 487 } 488 489 // Let the size policy know we're done. Note that we count promotion 490 // failure cleanup time as part of the collection (otherwise, we're 491 // implicitly saying it's mutator time). 492 size_policy->minor_collection_end(gc_cause); 493 494 if (!promotion_failure_occurred) { 495 // Swap the survivor spaces. 496 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 497 young_gen->from_space()->clear(SpaceDecorator::Mangle); 498 young_gen->swap_spaces(); 499 500 size_t survived = young_gen->from_space()->used_in_bytes(); 501 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 502 size_policy->update_averages(_survivor_overflow, survived, promoted); 503 504 // A successful scavenge should restart the GC time limit count which is 505 // for full GC's. 506 size_policy->reset_gc_overhead_limit_count(); 507 if (UseAdaptiveSizePolicy) { 508 // Calculate the new survivor size and tenuring threshold 509 510 if (PrintAdaptiveSizePolicy) { 511 gclog_or_tty->print("AdaptiveSizeStart: "); 512 gclog_or_tty->stamp(); 513 gclog_or_tty->print_cr(" collection: %d ", 514 heap->total_collections()); 515 516 if (Verbose) { 517 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 518 " perm_gen_capacity: %d ", 519 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 520 perm_gen->capacity_in_bytes()); 521 } 522 } 523 524 525 if (UsePerfData) { 526 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 527 counters->update_old_eden_size( 528 size_policy->calculated_eden_size_in_bytes()); 529 counters->update_old_promo_size( 530 size_policy->calculated_promo_size_in_bytes()); 531 counters->update_old_capacity(old_gen->capacity_in_bytes()); 532 counters->update_young_capacity(young_gen->capacity_in_bytes()); 533 counters->update_survived(survived); 534 counters->update_promoted(promoted); 535 counters->update_survivor_overflowed(_survivor_overflow); 536 } 537 538 size_t survivor_limit = 539 size_policy->max_survivor_size(young_gen->max_size()); 540 _tenuring_threshold = 541 size_policy->compute_survivor_space_size_and_threshold( 542 _survivor_overflow, 543 _tenuring_threshold, 544 survivor_limit); 545 546 if (PrintTenuringDistribution) { 547 gclog_or_tty->cr(); 548 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", 549 size_policy->calculated_survivor_size_in_bytes(), 550 _tenuring_threshold, MaxTenuringThreshold); 551 } 552 553 if (UsePerfData) { 554 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 555 counters->update_tenuring_threshold(_tenuring_threshold); 556 counters->update_survivor_size_counters(); 557 } 558 559 // Do call at minor collections? 560 // Don't check if the size_policy is ready at this 561 // level. Let the size_policy check that internally. 562 if (UseAdaptiveSizePolicy && 563 UseAdaptiveGenerationSizePolicyAtMinorCollection && 564 ((gc_cause != GCCause::_java_lang_system_gc) || 565 UseAdaptiveSizePolicyWithSystemGC)) { 566 567 // Calculate optimial free space amounts 568 assert(young_gen->max_size() > 569 young_gen->from_space()->capacity_in_bytes() + 570 young_gen->to_space()->capacity_in_bytes(), 571 "Sizes of space in young gen are out-of-bounds"); 572 size_t max_eden_size = young_gen->max_size() - 573 young_gen->from_space()->capacity_in_bytes() - 574 young_gen->to_space()->capacity_in_bytes(); 575 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 576 young_gen->eden_space()->used_in_bytes(), 577 old_gen->used_in_bytes(), 578 perm_gen->used_in_bytes(), 579 young_gen->eden_space()->capacity_in_bytes(), 580 old_gen->max_gen_size(), 581 max_eden_size, 582 false /* full gc*/, 583 gc_cause, 584 heap->collector_policy()); 585 586 } 587 // Resize the young generation at every collection 588 // even if new sizes have not been calculated. This is 589 // to allow resizes that may have been inhibited by the 590 // relative location of the "to" and "from" spaces. 591 592 // Resizing the old gen at minor collects can cause increases 593 // that don't feed back to the generation sizing policy until 594 // a major collection. Don't resize the old gen here. 595 596 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 597 size_policy->calculated_survivor_size_in_bytes()); 598 599 if (PrintAdaptiveSizePolicy) { 600 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 601 heap->total_collections()); 602 } 603 } 604 605 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 606 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 607 // Also update() will case adaptive NUMA chunk resizing. 608 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 609 young_gen->eden_space()->update(); 610 611 heap->gc_policy_counters()->update_counters(); 612 613 heap->resize_all_tlabs(); 614 615 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 616 } 617 618 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 619 620 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 621 622 { 623 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); 624 625 CodeCache::prune_scavenge_root_nmethods(); 626 } 627 628 // Re-verify object start arrays 629 if (VerifyObjectStartArray && 630 VerifyAfterGC) { 631 old_gen->verify_object_start_array(); 632 perm_gen->verify_object_start_array(); 633 } 634 635 // Verify all old -> young cards are now precise 636 if (VerifyRememberedSets) { 637 // Precise verification will give false positives. Until this is fixed, 638 // use imprecise verification. 639 // CardTableExtension::verify_all_young_refs_precise(); 640 CardTableExtension::verify_all_young_refs_imprecise(); 641 } 642 643 if (TraceGen0Time) accumulated_time()->stop(); 644 645 if (PrintGC) { 646 if (PrintGCDetails) { 647 // Don't print a GC timestamp here. This is after the GC so 648 // would be confusing. 649 young_gen->print_used_change(young_gen_used_before); 650 } 651 heap->print_heap_change(prev_used); 652 } 653 654 // Track memory usage and detect low memory 655 MemoryService::track_memory_usage(); 656 heap->update_counters(); 657 658 gc_task_manager()->release_idle_workers(); 659 } 660 661 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 662 HandleMark hm; // Discard invalid handles created during verification 663 Universe::verify(" VerifyAfterGC:"); 664 } 665 666 heap->print_heap_after_gc(); 667 heap->trace_heap_after_gc(&_gc_tracer); 668 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 669 670 if (ZapUnusedHeapArea) { 671 young_gen->eden_space()->check_mangled_unused_area_complete(); 672 young_gen->from_space()->check_mangled_unused_area_complete(); 673 young_gen->to_space()->check_mangled_unused_area_complete(); 674 } 675 676 scavenge_exit.update(); 677 678 if (PrintGCTaskTimeStamps) { 679 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, 680 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 681 scavenge_exit.ticks()); 682 gc_task_manager()->print_task_time_stamps(); 683 } 684 685 #ifdef TRACESPINNING 686 ParallelTaskTerminator::print_termination_counts(); 687 #endif 688 689 690 _gc_timer.register_gc_end(); 691 692 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 693 694 return !promotion_failure_occurred; 695 } 696 697 // This method iterates over all objects in the young generation, 698 // unforwarding markOops. It then restores any preserved mark oops, 699 // and clears the _preserved_mark_stack. 700 void PSScavenge::clean_up_failed_promotion() { 701 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 702 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 703 704 PSYoungGen* young_gen = heap->young_gen(); 705 706 { 707 ResourceMark rm; 708 709 // Unforward all pointers in the young gen. 710 PSPromotionFailedClosure unforward_closure; 711 young_gen->object_iterate(&unforward_closure); 712 713 if (PrintGC && Verbose) { 714 gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size()); 715 } 716 717 // Restore any saved marks. 718 while (!_preserved_oop_stack.is_empty()) { 719 oop obj = _preserved_oop_stack.pop(); 720 markOop mark = _preserved_mark_stack.pop(); 721 obj->set_mark(mark); 722 } 723 724 // Clear the preserved mark and oop stack caches. 725 _preserved_mark_stack.clear(true); 726 _preserved_oop_stack.clear(true); 727 } 728 729 // Reset the PromotionFailureALot counters. 730 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 731 } 732 733 // This method is called whenever an attempt to promote an object 734 // fails. Some markOops will need preservation, some will not. Note 735 // that the entire eden is traversed after a failed promotion, with 736 // all forwarded headers replaced by the default markOop. This means 737 // it is not necessary to preserve most markOops. 738 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 739 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 740 // Should use per-worker private stacks here rather than 741 // locking a common pair of stacks. 742 ThreadCritical tc; 743 _preserved_oop_stack.push(obj); 744 _preserved_mark_stack.push(obj_mark); 745 } 746 } 747 748 bool PSScavenge::should_attempt_scavenge() { 749 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 750 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 751 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 752 753 if (UsePerfData) { 754 counters->update_scavenge_skipped(not_skipped); 755 } 756 757 PSYoungGen* young_gen = heap->young_gen(); 758 PSOldGen* old_gen = heap->old_gen(); 759 760 if (!ScavengeWithObjectsInToSpace) { 761 // Do not attempt to promote unless to_space is empty 762 if (!young_gen->to_space()->is_empty()) { 763 _consecutive_skipped_scavenges++; 764 if (UsePerfData) { 765 counters->update_scavenge_skipped(to_space_not_empty); 766 } 767 return false; 768 } 769 } 770 771 // Test to see if the scavenge will likely fail. 772 PSAdaptiveSizePolicy* policy = heap->size_policy(); 773 774 // A similar test is done in the policy's should_full_GC(). If this is 775 // changed, decide if that test should also be changed. 776 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 777 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 778 bool result = promotion_estimate < old_gen->free_in_bytes(); 779 780 if (PrintGCDetails && Verbose) { 781 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 782 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 783 " padded_average_promoted " SIZE_FORMAT 784 " free in old gen " SIZE_FORMAT, 785 (size_t) policy->average_promoted_in_bytes(), 786 (size_t) policy->padded_average_promoted_in_bytes(), 787 old_gen->free_in_bytes()); 788 if (young_gen->used_in_bytes() < 789 (size_t) policy->padded_average_promoted_in_bytes()) { 790 gclog_or_tty->print_cr(" padded_promoted_average is greater" 791 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 792 } 793 } 794 795 if (result) { 796 _consecutive_skipped_scavenges = 0; 797 } else { 798 _consecutive_skipped_scavenges++; 799 if (UsePerfData) { 800 counters->update_scavenge_skipped(promoted_too_large); 801 } 802 } 803 return result; 804 } 805 806 // Used to add tasks 807 GCTaskManager* const PSScavenge::gc_task_manager() { 808 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 809 "shouldn't return NULL"); 810 return ParallelScavengeHeap::gc_task_manager(); 811 } 812 813 void PSScavenge::initialize() { 814 // Arguments must have been parsed 815 816 if (AlwaysTenure) { 817 _tenuring_threshold = 0; 818 } else if (NeverTenure) { 819 _tenuring_threshold = markOopDesc::max_age + 1; 820 } else { 821 // We want to smooth out our startup times for the AdaptiveSizePolicy 822 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 823 MaxTenuringThreshold; 824 } 825 826 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 827 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 828 829 PSYoungGen* young_gen = heap->young_gen(); 830 PSOldGen* old_gen = heap->old_gen(); 831 PSPermGen* perm_gen = heap->perm_gen(); 832 833 // Set boundary between young_gen and old_gen 834 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(), 835 "perm above old"); 836 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 837 "old above young"); 838 _young_generation_boundary = young_gen->eden_space()->bottom(); 839 840 // Initialize ref handling object for scavenging. 841 MemRegion mr = young_gen->reserved(); 842 843 _ref_processor = 844 new ReferenceProcessor(mr, // span 845 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing 846 (int) ParallelGCThreads, // mt processing degree 847 true, // mt discovery 848 (int) ParallelGCThreads, // mt discovery degree 849 true, // atomic_discovery 850 NULL, // header provides liveness info 851 false); // next field updates do not need write barrier 852 853 // Cache the cardtable 854 BarrierSet* bs = Universe::heap()->barrier_set(); 855 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 856 _card_table = (CardTableExtension*)bs; 857 858 _counters = new CollectorCounters("PSScavenge", 0); 859 }