1 /* 2 * Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 26 # include "incls/_precompiled.incl" 27 # include "incls/_psScavenge.cpp.incl" 28 29 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 30 int PSScavenge::_consecutive_skipped_scavenges = 0; 31 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 32 CardTableExtension* PSScavenge::_card_table = NULL; 33 bool PSScavenge::_survivor_overflow = false; 34 int PSScavenge::_tenuring_threshold = 0; 35 HeapWord* PSScavenge::_young_generation_boundary = NULL; 36 elapsedTimer PSScavenge::_accumulated_time; 37 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL; 38 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL; 39 CollectorCounters* PSScavenge::_counters = NULL; 40 41 // Define before use 42 class PSIsAliveClosure: public BoolObjectClosure { 43 public: 44 void do_object(oop p) { 45 assert(false, "Do not call."); 46 } 47 bool do_object_b(oop p) { 48 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); 49 } 50 }; 51 52 PSIsAliveClosure PSScavenge::_is_alive_closure; 53 54 class PSKeepAliveClosure: public OopClosure { 55 protected: 56 MutableSpace* _to_space; 57 PSPromotionManager* _promotion_manager; 58 59 public: 60 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 61 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 62 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 63 _to_space = heap->young_gen()->to_space(); 64 65 assert(_promotion_manager != NULL, "Sanity"); 66 } 67 68 template <class T> void do_oop_work(T* p) { 69 assert (!oopDesc::is_null(*p), "expected non-null ref"); 70 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 71 "expected an oop while scanning weak refs"); 72 73 // Weak refs may be visited more than once. 74 if (PSScavenge::should_scavenge(p, _to_space)) { 75 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); 76 } 77 } 78 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 79 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 80 }; 81 82 class PSEvacuateFollowersClosure: public VoidClosure { 83 private: 84 PSPromotionManager* _promotion_manager; 85 public: 86 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 87 88 virtual void do_void() { 89 assert(_promotion_manager != NULL, "Sanity"); 90 _promotion_manager->drain_stacks(true); 91 guarantee(_promotion_manager->stacks_empty(), 92 "stacks should be empty at this point"); 93 } 94 }; 95 96 class PSPromotionFailedClosure : public ObjectClosure { 97 virtual void do_object(oop obj) { 98 if (obj->is_forwarded()) { 99 obj->init_mark(); 100 } 101 } 102 }; 103 104 class PSRefProcTaskProxy: public GCTask { 105 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 106 ProcessTask & _rp_task; 107 uint _work_id; 108 public: 109 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 110 : _rp_task(rp_task), 111 _work_id(work_id) 112 { } 113 114 private: 115 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 116 virtual void do_it(GCTaskManager* manager, uint which); 117 }; 118 119 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 120 { 121 PSPromotionManager* promotion_manager = 122 PSPromotionManager::gc_thread_promotion_manager(which); 123 assert(promotion_manager != NULL, "sanity check"); 124 PSKeepAliveClosure keep_alive(promotion_manager); 125 PSEvacuateFollowersClosure evac_followers(promotion_manager); 126 PSIsAliveClosure is_alive; 127 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 128 } 129 130 class PSRefEnqueueTaskProxy: public GCTask { 131 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 132 EnqueueTask& _enq_task; 133 uint _work_id; 134 135 public: 136 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 137 : _enq_task(enq_task), 138 _work_id(work_id) 139 { } 140 141 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 142 virtual void do_it(GCTaskManager* manager, uint which) 143 { 144 _enq_task.work(_work_id); 145 } 146 }; 147 148 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 149 virtual void execute(ProcessTask& task); 150 virtual void execute(EnqueueTask& task); 151 }; 152 153 void PSRefProcTaskExecutor::execute(ProcessTask& task) 154 { 155 GCTaskQueue* q = GCTaskQueue::create(); 156 for(uint i=0; i<ParallelGCThreads; i++) { 157 q->enqueue(new PSRefProcTaskProxy(task, i)); 158 } 159 ParallelTaskTerminator terminator( 160 ParallelScavengeHeap::gc_task_manager()->workers(), 161 UseDepthFirstScavengeOrder ? 162 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth() 163 : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth()); 164 if (task.marks_oops_alive() && ParallelGCThreads > 1) { 165 for (uint j=0; j<ParallelGCThreads; j++) { 166 q->enqueue(new StealTask(&terminator)); 167 } 168 } 169 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 170 } 171 172 173 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 174 { 175 GCTaskQueue* q = GCTaskQueue::create(); 176 for(uint i=0; i<ParallelGCThreads; i++) { 177 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 178 } 179 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 180 } 181 182 // This method contains all heap specific policy for invoking scavenge. 183 // PSScavenge::invoke_no_policy() will do nothing but attempt to 184 // scavenge. It will not clean up after failed promotions, bail out if 185 // we've exceeded policy time limits, or any other special behavior. 186 // All such policy should be placed here. 187 // 188 // Note that this method should only be called from the vm_thread while 189 // at a safepoint! 190 void PSScavenge::invoke() 191 { 192 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 193 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 194 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 195 196 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 197 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 198 199 PSAdaptiveSizePolicy* policy = heap->size_policy(); 200 201 // Before each allocation/collection attempt, find out from the 202 // policy object if GCs are, on the whole, taking too long. If so, 203 // bail out without attempting a collection. 204 if (!policy->gc_time_limit_exceeded()) { 205 IsGCActiveMark mark; 206 207 bool scavenge_was_done = PSScavenge::invoke_no_policy(); 208 209 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 210 if (UsePerfData) 211 counters->update_full_follows_scavenge(0); 212 if (!scavenge_was_done || 213 policy->should_full_GC(heap->old_gen()->free_in_bytes())) { 214 if (UsePerfData) 215 counters->update_full_follows_scavenge(full_follows_scavenge); 216 217 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 218 if (UseParallelOldGC) { 219 PSParallelCompact::invoke_no_policy(false); 220 } else { 221 PSMarkSweep::invoke_no_policy(false); 222 } 223 } 224 } 225 } 226 227 // This method contains no policy. You should probably 228 // be calling invoke() instead. 229 bool PSScavenge::invoke_no_policy() { 230 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 231 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 232 233 TimeStamp scavenge_entry; 234 TimeStamp scavenge_midpoint; 235 TimeStamp scavenge_exit; 236 237 scavenge_entry.update(); 238 239 if (GC_locker::check_active_before_gc()) { 240 return false; 241 } 242 243 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 244 GCCause::Cause gc_cause = heap->gc_cause(); 245 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 246 247 // Check for potential problems. 248 if (!should_attempt_scavenge()) { 249 return false; 250 } 251 252 bool promotion_failure_occurred = false; 253 254 PSYoungGen* young_gen = heap->young_gen(); 255 PSOldGen* old_gen = heap->old_gen(); 256 PSPermGen* perm_gen = heap->perm_gen(); 257 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 258 heap->increment_total_collections(); 259 260 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 261 262 if ((gc_cause != GCCause::_java_lang_system_gc) || 263 UseAdaptiveSizePolicyWithSystemGC) { 264 // Gather the feedback data for eden occupancy. 265 young_gen->eden_space()->accumulate_statistics(); 266 } 267 268 if (ZapUnusedHeapArea) { 269 // Save information needed to minimize mangling 270 heap->record_gen_tops_before_GC(); 271 } 272 273 if (PrintHeapAtGC) { 274 Universe::print_heap_before_gc(); 275 } 276 277 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 278 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 279 280 size_t prev_used = heap->used(); 281 assert(promotion_failed() == false, "Sanity"); 282 283 // Fill in TLABs 284 heap->accumulate_statistics_all_tlabs(); 285 heap->ensure_parsability(true); // retire TLABs 286 287 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 288 HandleMark hm; // Discard invalid handles created during verification 289 gclog_or_tty->print(" VerifyBeforeGC:"); 290 Universe::verify(true); 291 } 292 293 { 294 ResourceMark rm; 295 HandleMark hm; 296 297 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 298 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 299 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); 300 TraceCollectorStats tcs(counters()); 301 TraceMemoryManagerStats tms(false /* not full GC */); 302 303 if (TraceGen0Time) accumulated_time()->start(); 304 305 // Let the size policy know we're starting 306 size_policy->minor_collection_begin(); 307 308 // Verify the object start arrays. 309 if (VerifyObjectStartArray && 310 VerifyBeforeGC) { 311 old_gen->verify_object_start_array(); 312 perm_gen->verify_object_start_array(); 313 } 314 315 // Verify no unmarked old->young roots 316 if (VerifyRememberedSets) { 317 CardTableExtension::verify_all_young_refs_imprecise(); 318 } 319 320 if (!ScavengeWithObjectsInToSpace) { 321 assert(young_gen->to_space()->is_empty(), 322 "Attempt to scavenge with live objects in to_space"); 323 young_gen->to_space()->clear(SpaceDecorator::Mangle); 324 } else if (ZapUnusedHeapArea) { 325 young_gen->to_space()->mangle_unused_area(); 326 } 327 save_to_space_top_before_gc(); 328 329 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 330 COMPILER2_PRESENT(DerivedPointerTable::clear()); 331 332 reference_processor()->enable_discovery(); 333 reference_processor()->setup_policy(false); 334 335 // We track how much was promoted to the next generation for 336 // the AdaptiveSizePolicy. 337 size_t old_gen_used_before = old_gen->used_in_bytes(); 338 339 // For PrintGCDetails 340 size_t young_gen_used_before = young_gen->used_in_bytes(); 341 342 // Reset our survivor overflow. 343 set_survivor_overflow(false); 344 345 // We need to save the old/perm top values before 346 // creating the promotion_manager. We pass the top 347 // values to the card_table, to prevent it from 348 // straying into the promotion labs. 349 HeapWord* old_top = old_gen->object_space()->top(); 350 HeapWord* perm_top = perm_gen->object_space()->top(); 351 352 // Release all previously held resources 353 gc_task_manager()->release_all_resources(); 354 355 PSPromotionManager::pre_scavenge(); 356 357 // We'll use the promotion manager again later. 358 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 359 { 360 // TraceTime("Roots"); 361 ParallelScavengeHeap::ParStrongRootsScope psrs; 362 363 GCTaskQueue* q = GCTaskQueue::create(); 364 365 for(uint i=0; i<ParallelGCThreads; i++) { 366 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); 367 } 368 369 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); 370 371 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 372 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 373 // We scan the thread roots in parallel 374 Threads::create_thread_roots_tasks(q); 375 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 376 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 377 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 378 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 379 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 380 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 381 382 ParallelTaskTerminator terminator( 383 gc_task_manager()->workers(), 384 promotion_manager->depth_first() ? 385 (TaskQueueSetSuper*) promotion_manager->stack_array_depth() 386 : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth()); 387 if (ParallelGCThreads>1) { 388 for (uint j=0; j<ParallelGCThreads; j++) { 389 q->enqueue(new StealTask(&terminator)); 390 } 391 } 392 393 gc_task_manager()->execute_and_wait(q); 394 } 395 396 scavenge_midpoint.update(); 397 398 // Process reference objects discovered during scavenge 399 { 400 reference_processor()->setup_policy(false); // not always_clear 401 PSKeepAliveClosure keep_alive(promotion_manager); 402 PSEvacuateFollowersClosure evac_followers(promotion_manager); 403 if (reference_processor()->processing_is_mt()) { 404 PSRefProcTaskExecutor task_executor; 405 reference_processor()->process_discovered_references( 406 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); 407 } else { 408 reference_processor()->process_discovered_references( 409 &_is_alive_closure, &keep_alive, &evac_followers, NULL); 410 } 411 } 412 413 // Enqueue reference objects discovered during scavenge. 414 if (reference_processor()->processing_is_mt()) { 415 PSRefProcTaskExecutor task_executor; 416 reference_processor()->enqueue_discovered_references(&task_executor); 417 } else { 418 reference_processor()->enqueue_discovered_references(NULL); 419 } 420 421 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 422 assert(promotion_manager->claimed_stack_empty(), "Sanity"); 423 PSPromotionManager::post_scavenge(); 424 425 promotion_failure_occurred = promotion_failed(); 426 if (promotion_failure_occurred) { 427 clean_up_failed_promotion(); 428 if (PrintGC) { 429 gclog_or_tty->print("--"); 430 } 431 } 432 433 // Let the size policy know we're done. Note that we count promotion 434 // failure cleanup time as part of the collection (otherwise, we're 435 // implicitly saying it's mutator time). 436 size_policy->minor_collection_end(gc_cause); 437 438 if (!promotion_failure_occurred) { 439 // Swap the survivor spaces. 440 441 442 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 443 young_gen->from_space()->clear(SpaceDecorator::Mangle); 444 young_gen->swap_spaces(); 445 446 size_t survived = young_gen->from_space()->used_in_bytes(); 447 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 448 size_policy->update_averages(_survivor_overflow, survived, promoted); 449 450 bool free_ratio_in_effect = false; 451 if ((UseFreeRatioForParallelGC || 452 (UseFreeRatioOnlyInSystemGCForParallelGC && 453 gc_cause == GCCause::_java_lang_system_gc))) { 454 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 455 free_ratio_in_effect = heap->try_to_shrink_by_free_ratio(false); 456 } 457 458 if (!free_ratio_in_effect && UseAdaptiveSizePolicy) { 459 // Calculate the new survivor size and tenuring threshold 460 461 if (PrintAdaptiveSizePolicy) { 462 gclog_or_tty->print("AdaptiveSizeStart: "); 463 gclog_or_tty->stamp(); 464 gclog_or_tty->print_cr(" collection: %d ", 465 heap->total_collections()); 466 467 if (Verbose) { 468 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 469 " perm_gen_capacity: %d ", 470 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 471 perm_gen->capacity_in_bytes()); 472 } 473 } 474 475 476 if (UsePerfData) { 477 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 478 counters->update_old_eden_size( 479 size_policy->calculated_eden_size_in_bytes()); 480 counters->update_old_promo_size( 481 size_policy->calculated_promo_size_in_bytes()); 482 counters->update_old_capacity(old_gen->capacity_in_bytes()); 483 counters->update_young_capacity(young_gen->capacity_in_bytes()); 484 counters->update_survived(survived); 485 counters->update_promoted(promoted); 486 counters->update_survivor_overflowed(_survivor_overflow); 487 } 488 489 size_t survivor_limit = 490 size_policy->max_survivor_size(young_gen->max_size()); 491 _tenuring_threshold = 492 size_policy->compute_survivor_space_size_and_threshold( 493 _survivor_overflow, 494 _tenuring_threshold, 495 survivor_limit); 496 497 if (PrintTenuringDistribution) { 498 gclog_or_tty->cr(); 499 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", 500 size_policy->calculated_survivor_size_in_bytes(), 501 _tenuring_threshold, MaxTenuringThreshold); 502 } 503 504 if (UsePerfData) { 505 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 506 counters->update_tenuring_threshold(_tenuring_threshold); 507 counters->update_survivor_size_counters(); 508 } 509 510 // Do call at minor collections? 511 // Don't check if the size_policy is ready at this 512 // level. Let the size_policy check that internally. 513 if (UseAdaptiveSizePolicy && 514 UseAdaptiveGenerationSizePolicyAtMinorCollection && 515 ((gc_cause != GCCause::_java_lang_system_gc) || 516 UseAdaptiveSizePolicyWithSystemGC)) { 517 518 // Calculate optimial free space amounts 519 assert(young_gen->max_size() > 520 young_gen->from_space()->capacity_in_bytes() + 521 young_gen->to_space()->capacity_in_bytes(), 522 "Sizes of space in young gen are out-of-bounds"); 523 size_t max_eden_size = young_gen->max_size() - 524 young_gen->from_space()->capacity_in_bytes() - 525 young_gen->to_space()->capacity_in_bytes(); 526 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 527 young_gen->eden_space()->used_in_bytes(), 528 old_gen->used_in_bytes(), 529 perm_gen->used_in_bytes(), 530 young_gen->eden_space()->capacity_in_bytes(), 531 old_gen->max_gen_size(), 532 max_eden_size, 533 false /* full gc*/, 534 gc_cause); 535 536 } 537 // Resize the young generation at every collection 538 // even if new sizes have not been calculated. This is 539 // to allow resizes that may have been inhibited by the 540 // relative location of the "to" and "from" spaces. 541 542 // Resizing the old gen at minor collects can cause increases 543 // that don't feed back to the generation sizing policy until 544 // a major collection. Don't resize the old gen here. 545 546 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 547 size_policy->calculated_survivor_size_in_bytes()); 548 549 if (PrintAdaptiveSizePolicy) { 550 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 551 heap->total_collections()); 552 } 553 } 554 555 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 556 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 557 // Also update() will case adaptive NUMA chunk resizing. 558 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 559 young_gen->eden_space()->update(); 560 561 heap->gc_policy_counters()->update_counters(); 562 563 heap->resize_all_tlabs(); 564 565 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 566 } 567 568 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 569 570 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 571 572 // Re-verify object start arrays 573 if (VerifyObjectStartArray && 574 VerifyAfterGC) { 575 old_gen->verify_object_start_array(); 576 perm_gen->verify_object_start_array(); 577 } 578 579 // Verify all old -> young cards are now precise 580 if (VerifyRememberedSets) { 581 // Precise verification will give false positives. Until this is fixed, 582 // use imprecise verification. 583 // CardTableExtension::verify_all_young_refs_precise(); 584 CardTableExtension::verify_all_young_refs_imprecise(); 585 } 586 587 if (TraceGen0Time) accumulated_time()->stop(); 588 589 if (PrintGC) { 590 if (PrintGCDetails) { 591 // Don't print a GC timestamp here. This is after the GC so 592 // would be confusing. 593 young_gen->print_used_change(young_gen_used_before); 594 } 595 heap->print_heap_change(prev_used); 596 } 597 598 // Track memory usage and detect low memory 599 MemoryService::track_memory_usage(); 600 heap->update_counters(); 601 } 602 603 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 604 HandleMark hm; // Discard invalid handles created during verification 605 gclog_or_tty->print(" VerifyAfterGC:"); 606 Universe::verify(false); 607 } 608 609 if (PrintHeapAtGC) { 610 Universe::print_heap_after_gc(); 611 } 612 613 if (ZapUnusedHeapArea) { 614 young_gen->eden_space()->check_mangled_unused_area_complete(); 615 young_gen->from_space()->check_mangled_unused_area_complete(); 616 young_gen->to_space()->check_mangled_unused_area_complete(); 617 } 618 619 scavenge_exit.update(); 620 621 if (PrintGCTaskTimeStamps) { 622 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, 623 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 624 scavenge_exit.ticks()); 625 gc_task_manager()->print_task_time_stamps(); 626 } 627 628 #ifdef TRACESPINNING 629 ParallelTaskTerminator::print_termination_counts(); 630 #endif 631 632 return !promotion_failure_occurred; 633 } 634 635 // This method iterates over all objects in the young generation, 636 // unforwarding markOops. It then restores any preserved mark oops, 637 // and clears the _preserved_mark_stack. 638 void PSScavenge::clean_up_failed_promotion() { 639 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 640 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 641 assert(promotion_failed(), "Sanity"); 642 643 PSYoungGen* young_gen = heap->young_gen(); 644 645 { 646 ResourceMark rm; 647 648 // Unforward all pointers in the young gen. 649 PSPromotionFailedClosure unforward_closure; 650 young_gen->object_iterate(&unforward_closure); 651 652 if (PrintGC && Verbose) { 653 gclog_or_tty->print_cr("Restoring %d marks", 654 _preserved_oop_stack->length()); 655 } 656 657 // Restore any saved marks. 658 for (int i=0; i < _preserved_oop_stack->length(); i++) { 659 oop obj = _preserved_oop_stack->at(i); 660 markOop mark = _preserved_mark_stack->at(i); 661 obj->set_mark(mark); 662 } 663 664 // Deallocate the preserved mark and oop stacks. 665 // The stacks were allocated as CHeap objects, so 666 // we must call delete to prevent mem leaks. 667 delete _preserved_mark_stack; 668 _preserved_mark_stack = NULL; 669 delete _preserved_oop_stack; 670 _preserved_oop_stack = NULL; 671 } 672 673 // Reset the PromotionFailureALot counters. 674 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 675 } 676 677 // This method is called whenever an attempt to promote an object 678 // fails. Some markOops will need preserving, some will not. Note 679 // that the entire eden is traversed after a failed promotion, with 680 // all forwarded headers replaced by the default markOop. This means 681 // it is not neccessary to preserve most markOops. 682 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 683 if (_preserved_mark_stack == NULL) { 684 ThreadCritical tc; // Lock and retest 685 if (_preserved_mark_stack == NULL) { 686 assert(_preserved_oop_stack == NULL, "Sanity"); 687 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 688 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 689 } 690 } 691 692 // Because we must hold the ThreadCritical lock before using 693 // the stacks, we should be safe from observing partial allocations, 694 // which are also guarded by the ThreadCritical lock. 695 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 696 ThreadCritical tc; 697 _preserved_oop_stack->push(obj); 698 _preserved_mark_stack->push(obj_mark); 699 } 700 } 701 702 bool PSScavenge::should_attempt_scavenge() { 703 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 704 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 705 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 706 707 if (UsePerfData) { 708 counters->update_scavenge_skipped(not_skipped); 709 } 710 711 PSYoungGen* young_gen = heap->young_gen(); 712 PSOldGen* old_gen = heap->old_gen(); 713 714 if (!ScavengeWithObjectsInToSpace) { 715 // Do not attempt to promote unless to_space is empty 716 if (!young_gen->to_space()->is_empty()) { 717 _consecutive_skipped_scavenges++; 718 if (UsePerfData) { 719 counters->update_scavenge_skipped(to_space_not_empty); 720 } 721 return false; 722 } 723 } 724 725 // Test to see if the scavenge will likely fail. 726 PSAdaptiveSizePolicy* policy = heap->size_policy(); 727 728 // A similar test is done in the policy's should_full_GC(). If this is 729 // changed, decide if that test should also be changed. 730 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 731 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 732 bool result = promotion_estimate < old_gen->free_in_bytes(); 733 734 if (PrintGCDetails && Verbose) { 735 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 736 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 737 " padded_average_promoted " SIZE_FORMAT 738 " free in old gen " SIZE_FORMAT, 739 (size_t) policy->average_promoted_in_bytes(), 740 (size_t) policy->padded_average_promoted_in_bytes(), 741 old_gen->free_in_bytes()); 742 if (young_gen->used_in_bytes() < 743 (size_t) policy->padded_average_promoted_in_bytes()) { 744 gclog_or_tty->print_cr(" padded_promoted_average is greater" 745 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 746 } 747 } 748 749 if (result) { 750 _consecutive_skipped_scavenges = 0; 751 } else { 752 _consecutive_skipped_scavenges++; 753 if (UsePerfData) { 754 counters->update_scavenge_skipped(promoted_too_large); 755 } 756 } 757 return result; 758 } 759 760 // Used to add tasks 761 GCTaskManager* const PSScavenge::gc_task_manager() { 762 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 763 "shouldn't return NULL"); 764 return ParallelScavengeHeap::gc_task_manager(); 765 } 766 767 void PSScavenge::initialize() { 768 // Arguments must have been parsed 769 770 if (AlwaysTenure) { 771 _tenuring_threshold = 0; 772 } else if (NeverTenure) { 773 _tenuring_threshold = markOopDesc::max_age + 1; 774 } else { 775 // We want to smooth out our startup times for the AdaptiveSizePolicy 776 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 777 MaxTenuringThreshold; 778 } 779 780 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 781 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 782 783 PSYoungGen* young_gen = heap->young_gen(); 784 PSOldGen* old_gen = heap->old_gen(); 785 PSPermGen* perm_gen = heap->perm_gen(); 786 787 // Set boundary between young_gen and old_gen 788 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(), 789 "perm above old"); 790 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 791 "old above young"); 792 _young_generation_boundary = young_gen->eden_space()->bottom(); 793 794 // Initialize ref handling object for scavenging. 795 MemRegion mr = young_gen->reserved(); 796 _ref_processor = ReferenceProcessor::create_ref_processor( 797 mr, // span 798 true, // atomic_discovery 799 true, // mt_discovery 800 NULL, // is_alive_non_header 801 ParallelGCThreads, 802 ParallelRefProcEnabled); 803 804 // Cache the cardtable 805 BarrierSet* bs = Universe::heap()->barrier_set(); 806 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 807 _card_table = (CardTableExtension*)bs; 808 809 _counters = new CollectorCounters("PSScavenge", 0); 810 }