1 /* 2 * Copyright 2002-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 26 # include "incls/_precompiled.incl" 27 # include "incls/_psScavenge.cpp.incl" 28 29 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 30 int PSScavenge::_consecutive_skipped_scavenges = 0; 31 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 32 CardTableExtension* PSScavenge::_card_table = NULL; 33 bool PSScavenge::_survivor_overflow = false; 34 int PSScavenge::_tenuring_threshold = 0; 35 HeapWord* PSScavenge::_young_generation_boundary = NULL; 36 elapsedTimer PSScavenge::_accumulated_time; 37 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL; 38 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL; 39 CollectorCounters* PSScavenge::_counters = NULL; 40 41 // Define before use 42 class PSIsAliveClosure: public BoolObjectClosure { 43 public: 44 void do_object(oop p) { 45 assert(false, "Do not call."); 46 } 47 bool do_object_b(oop p) { 48 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); 49 } 50 }; 51 52 PSIsAliveClosure PSScavenge::_is_alive_closure; 53 54 class PSKeepAliveClosure: public OopClosure { 55 protected: 56 MutableSpace* _to_space; 57 PSPromotionManager* _promotion_manager; 58 59 public: 60 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 61 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 62 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 63 _to_space = heap->young_gen()->to_space(); 64 65 assert(_promotion_manager != NULL, "Sanity"); 66 } 67 68 template <class T> void do_oop_work(T* p) { 69 assert (!oopDesc::is_null(*p), "expected non-null ref"); 70 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 71 "expected an oop while scanning weak refs"); 72 73 // Weak refs may be visited more than once. 74 if (PSScavenge::should_scavenge(p, _to_space)) { 75 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); 76 } 77 } 78 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 79 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 80 }; 81 82 class PSEvacuateFollowersClosure: public VoidClosure { 83 private: 84 PSPromotionManager* _promotion_manager; 85 public: 86 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 87 88 virtual void do_void() { 89 assert(_promotion_manager != NULL, "Sanity"); 90 _promotion_manager->drain_stacks(true); 91 guarantee(_promotion_manager->stacks_empty(), 92 "stacks should be empty at this point"); 93 } 94 }; 95 96 class PSPromotionFailedClosure : public ObjectClosure { 97 virtual void do_object(oop obj) { 98 if (obj->is_forwarded()) { 99 obj->init_mark(); 100 } 101 } 102 }; 103 104 class PSRefProcTaskProxy: public GCTask { 105 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 106 ProcessTask & _rp_task; 107 uint _work_id; 108 public: 109 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 110 : _rp_task(rp_task), 111 _work_id(work_id) 112 { } 113 114 private: 115 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 116 virtual void do_it(GCTaskManager* manager, uint which); 117 }; 118 119 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 120 { 121 PSPromotionManager* promotion_manager = 122 PSPromotionManager::gc_thread_promotion_manager(which); 123 assert(promotion_manager != NULL, "sanity check"); 124 PSKeepAliveClosure keep_alive(promotion_manager); 125 PSEvacuateFollowersClosure evac_followers(promotion_manager); 126 PSIsAliveClosure is_alive; 127 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 128 } 129 130 class PSRefEnqueueTaskProxy: public GCTask { 131 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 132 EnqueueTask& _enq_task; 133 uint _work_id; 134 135 public: 136 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 137 : _enq_task(enq_task), 138 _work_id(work_id) 139 { } 140 141 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 142 virtual void do_it(GCTaskManager* manager, uint which) 143 { 144 _enq_task.work(_work_id); 145 } 146 }; 147 148 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 149 virtual void execute(ProcessTask& task); 150 virtual void execute(EnqueueTask& task); 151 }; 152 153 void PSRefProcTaskExecutor::execute(ProcessTask& task) 154 { 155 GCTaskQueue* q = GCTaskQueue::create(); 156 for(uint i=0; i<ParallelGCThreads; i++) { 157 q->enqueue(new PSRefProcTaskProxy(task, i)); 158 } 159 ParallelTaskTerminator terminator( 160 ParallelScavengeHeap::gc_task_manager()->workers(), 161 UseDepthFirstScavengeOrder ? 162 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth() 163 : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth()); 164 if (task.marks_oops_alive() && ParallelGCThreads > 1) { 165 for (uint j=0; j<ParallelGCThreads; j++) { 166 q->enqueue(new StealTask(&terminator)); 167 } 168 } 169 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 170 } 171 172 173 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 174 { 175 GCTaskQueue* q = GCTaskQueue::create(); 176 for(uint i=0; i<ParallelGCThreads; i++) { 177 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 178 } 179 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 180 } 181 182 // This method contains all heap specific policy for invoking scavenge. 183 // PSScavenge::invoke_no_policy() will do nothing but attempt to 184 // scavenge. It will not clean up after failed promotions, bail out if 185 // we've exceeded policy time limits, or any other special behavior. 186 // All such policy should be placed here. 187 // 188 // Note that this method should only be called from the vm_thread while 189 // at a safepoint! 190 void PSScavenge::invoke() 191 { 192 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 193 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 194 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 195 196 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 197 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 198 199 PSAdaptiveSizePolicy* policy = heap->size_policy(); 200 201 // Before each allocation/collection attempt, find out from the 202 // policy object if GCs are, on the whole, taking too long. If so, 203 // bail out without attempting a collection. 204 if (!policy->gc_time_limit_exceeded()) { 205 IsGCActiveMark mark; 206 207 bool scavenge_was_done = PSScavenge::invoke_no_policy(); 208 209 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 210 if (UsePerfData) 211 counters->update_full_follows_scavenge(0); 212 if (!scavenge_was_done || 213 policy->should_full_GC(heap->old_gen()->free_in_bytes())) { 214 if (UsePerfData) 215 counters->update_full_follows_scavenge(full_follows_scavenge); 216 217 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 218 if (UseParallelOldGC) { 219 PSParallelCompact::invoke_no_policy(false); 220 } else { 221 PSMarkSweep::invoke_no_policy(false); 222 } 223 } 224 } 225 } 226 227 // This method contains no policy. You should probably 228 // be calling invoke() instead. 229 bool PSScavenge::invoke_no_policy() { 230 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 231 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 232 233 TimeStamp scavenge_entry; 234 TimeStamp scavenge_midpoint; 235 TimeStamp scavenge_exit; 236 237 scavenge_entry.update(); 238 239 if (GC_locker::check_active_before_gc()) { 240 return false; 241 } 242 243 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 244 GCCause::Cause gc_cause = heap->gc_cause(); 245 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 246 247 // Check for potential problems. 248 if (!should_attempt_scavenge()) { 249 return false; 250 } 251 252 bool promotion_failure_occurred = false; 253 254 PSYoungGen* young_gen = heap->young_gen(); 255 PSOldGen* old_gen = heap->old_gen(); 256 PSPermGen* perm_gen = heap->perm_gen(); 257 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 258 heap->increment_total_collections(); 259 260 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 261 262 if ((gc_cause != GCCause::_java_lang_system_gc) || 263 UseAdaptiveSizePolicyWithSystemGC) { 264 // Gather the feedback data for eden occupancy. 265 young_gen->eden_space()->accumulate_statistics(); 266 } 267 268 if (ZapUnusedHeapArea) { 269 // Save information needed to minimize mangling 270 heap->record_gen_tops_before_GC(); 271 } 272 273 if (PrintHeapAtGC) { 274 Universe::print_heap_before_gc(); 275 } 276 277 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 278 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 279 280 size_t prev_used = heap->used(); 281 assert(promotion_failed() == false, "Sanity"); 282 283 // Fill in TLABs 284 heap->accumulate_statistics_all_tlabs(); 285 heap->ensure_parsability(true); // retire TLABs 286 287 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 288 HandleMark hm; // Discard invalid handles created during verification 289 gclog_or_tty->print(" VerifyBeforeGC:"); 290 Universe::verify(true); 291 } 292 293 { 294 ResourceMark rm; 295 HandleMark hm; 296 297 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 298 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 299 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); 300 TraceCollectorStats tcs(counters()); 301 TraceMemoryManagerStats tms(false /* not full GC */); 302 303 if (TraceGen0Time) accumulated_time()->start(); 304 305 // Let the size policy know we're starting 306 size_policy->minor_collection_begin(); 307 308 // Verify the object start arrays. 309 if (VerifyObjectStartArray && 310 VerifyBeforeGC) { 311 old_gen->verify_object_start_array(); 312 perm_gen->verify_object_start_array(); 313 } 314 315 // Verify no unmarked old->young roots 316 if (VerifyRememberedSets) { 317 CardTableExtension::verify_all_young_refs_imprecise(); 318 } 319 320 if (!ScavengeWithObjectsInToSpace) { 321 assert(young_gen->to_space()->is_empty(), 322 "Attempt to scavenge with live objects in to_space"); 323 young_gen->to_space()->clear(SpaceDecorator::Mangle); 324 } else if (ZapUnusedHeapArea) { 325 young_gen->to_space()->mangle_unused_area(); 326 } 327 save_to_space_top_before_gc(); 328 329 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 330 COMPILER2_PRESENT(DerivedPointerTable::clear()); 331 332 reference_processor()->enable_discovery(); 333 reference_processor()->setup_policy(false); 334 335 // We track how much was promoted to the next generation for 336 // the AdaptiveSizePolicy. 337 size_t old_gen_used_before = old_gen->used_in_bytes(); 338 339 // For PrintGCDetails 340 size_t young_gen_used_before = young_gen->used_in_bytes(); 341 342 // Reset our survivor overflow. 343 set_survivor_overflow(false); 344 345 // We need to save the old/perm top values before 346 // creating the promotion_manager. We pass the top 347 // values to the card_table, to prevent it from 348 // straying into the promotion labs. 349 HeapWord* old_top = old_gen->object_space()->top(); 350 HeapWord* perm_top = perm_gen->object_space()->top(); 351 352 // Release all previously held resources 353 gc_task_manager()->release_all_resources(); 354 355 PSPromotionManager::pre_scavenge(); 356 357 // We'll use the promotion manager again later. 358 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 359 { 360 // TraceTime("Roots"); 361 ParallelScavengeHeap::ParStrongRootsScope psrs; 362 363 GCTaskQueue* q = GCTaskQueue::create(); 364 365 for(uint i=0; i<ParallelGCThreads; i++) { 366 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); 367 } 368 369 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); 370 371 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 372 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 373 // We scan the thread roots in parallel 374 Threads::create_thread_roots_tasks(q); 375 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 376 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 377 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 378 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 379 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 380 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 381 382 ParallelTaskTerminator terminator( 383 gc_task_manager()->workers(), 384 promotion_manager->depth_first() ? 385 (TaskQueueSetSuper*) promotion_manager->stack_array_depth() 386 : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth()); 387 if (ParallelGCThreads>1) { 388 for (uint j=0; j<ParallelGCThreads; j++) { 389 q->enqueue(new StealTask(&terminator)); 390 } 391 } 392 393 gc_task_manager()->execute_and_wait(q); 394 } 395 396 scavenge_midpoint.update(); 397 398 // Process reference objects discovered during scavenge 399 { 400 reference_processor()->setup_policy(false); // not always_clear 401 PSKeepAliveClosure keep_alive(promotion_manager); 402 PSEvacuateFollowersClosure evac_followers(promotion_manager); 403 if (reference_processor()->processing_is_mt()) { 404 PSRefProcTaskExecutor task_executor; 405 reference_processor()->process_discovered_references( 406 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); 407 } else { 408 reference_processor()->process_discovered_references( 409 &_is_alive_closure, &keep_alive, &evac_followers, NULL); 410 } 411 } 412 413 // Enqueue reference objects discovered during scavenge. 414 if (reference_processor()->processing_is_mt()) { 415 PSRefProcTaskExecutor task_executor; 416 reference_processor()->enqueue_discovered_references(&task_executor); 417 } else { 418 reference_processor()->enqueue_discovered_references(NULL); 419 } 420 421 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 422 assert(promotion_manager->claimed_stack_empty(), "Sanity"); 423 PSPromotionManager::post_scavenge(); 424 425 promotion_failure_occurred = promotion_failed(); 426 if (promotion_failure_occurred) { 427 clean_up_failed_promotion(); 428 if (PrintGC) { 429 gclog_or_tty->print("--"); 430 } 431 } 432 433 // Let the size policy know we're done. Note that we count promotion 434 // failure cleanup time as part of the collection (otherwise, we're 435 // implicitly saying it's mutator time). 436 size_policy->minor_collection_end(gc_cause); 437 438 if (!promotion_failure_occurred) { 439 // Swap the survivor spaces. 440 441 442 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 443 young_gen->from_space()->clear(SpaceDecorator::Mangle); 444 young_gen->swap_spaces(); 445 446 size_t survived = young_gen->from_space()->used_in_bytes(); 447 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 448 size_policy->update_averages(_survivor_overflow, survived, promoted); 449 450 if (UseAdaptiveSizePolicy) { 451 // Calculate the new survivor size and tenuring threshold 452 453 if (PrintAdaptiveSizePolicy) { 454 gclog_or_tty->print("AdaptiveSizeStart: "); 455 gclog_or_tty->stamp(); 456 gclog_or_tty->print_cr(" collection: %d ", 457 heap->total_collections()); 458 459 if (Verbose) { 460 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 461 " perm_gen_capacity: %d ", 462 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 463 perm_gen->capacity_in_bytes()); 464 } 465 } 466 467 468 if (UsePerfData) { 469 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 470 counters->update_old_eden_size( 471 size_policy->calculated_eden_size_in_bytes()); 472 counters->update_old_promo_size( 473 size_policy->calculated_promo_size_in_bytes()); 474 counters->update_old_capacity(old_gen->capacity_in_bytes()); 475 counters->update_young_capacity(young_gen->capacity_in_bytes()); 476 counters->update_survived(survived); 477 counters->update_promoted(promoted); 478 counters->update_survivor_overflowed(_survivor_overflow); 479 } 480 481 size_t survivor_limit = 482 size_policy->max_survivor_size(young_gen->max_size()); 483 _tenuring_threshold = 484 size_policy->compute_survivor_space_size_and_threshold( 485 _survivor_overflow, 486 _tenuring_threshold, 487 survivor_limit); 488 489 if (PrintTenuringDistribution) { 490 gclog_or_tty->cr(); 491 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", 492 size_policy->calculated_survivor_size_in_bytes(), 493 _tenuring_threshold, MaxTenuringThreshold); 494 } 495 496 if (UsePerfData) { 497 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 498 counters->update_tenuring_threshold(_tenuring_threshold); 499 counters->update_survivor_size_counters(); 500 } 501 502 // Do call at minor collections? 503 // Don't check if the size_policy is ready at this 504 // level. Let the size_policy check that internally. 505 if (UseAdaptiveSizePolicy && 506 UseAdaptiveGenerationSizePolicyAtMinorCollection && 507 ((gc_cause != GCCause::_java_lang_system_gc) || 508 UseAdaptiveSizePolicyWithSystemGC)) { 509 510 // Calculate optimial free space amounts 511 assert(young_gen->max_size() > 512 young_gen->from_space()->capacity_in_bytes() + 513 young_gen->to_space()->capacity_in_bytes(), 514 "Sizes of space in young gen are out-of-bounds"); 515 size_t max_eden_size = young_gen->max_size() - 516 young_gen->from_space()->capacity_in_bytes() - 517 young_gen->to_space()->capacity_in_bytes(); 518 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 519 young_gen->eden_space()->used_in_bytes(), 520 old_gen->used_in_bytes(), 521 perm_gen->used_in_bytes(), 522 young_gen->eden_space()->capacity_in_bytes(), 523 old_gen->max_gen_size(), 524 max_eden_size, 525 false /* full gc*/, 526 gc_cause); 527 528 } 529 // Resize the young generation at every collection 530 // even if new sizes have not been calculated. This is 531 // to allow resizes that may have been inhibited by the 532 // relative location of the "to" and "from" spaces. 533 534 // Resizing the old gen at minor collects can cause increases 535 // that don't feed back to the generation sizing policy until 536 // a major collection. Don't resize the old gen here. 537 538 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 539 size_policy->calculated_survivor_size_in_bytes()); 540 541 if (PrintAdaptiveSizePolicy) { 542 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 543 heap->total_collections()); 544 } 545 } 546 547 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 548 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 549 // Also update() will case adaptive NUMA chunk resizing. 550 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 551 young_gen->eden_space()->update(); 552 553 heap->gc_policy_counters()->update_counters(); 554 555 heap->resize_all_tlabs(); 556 557 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 558 } 559 560 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 561 562 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 563 564 // Re-verify object start arrays 565 if (VerifyObjectStartArray && 566 VerifyAfterGC) { 567 old_gen->verify_object_start_array(); 568 perm_gen->verify_object_start_array(); 569 } 570 571 // Verify all old -> young cards are now precise 572 if (VerifyRememberedSets) { 573 // Precise verification will give false positives. Until this is fixed, 574 // use imprecise verification. 575 // CardTableExtension::verify_all_young_refs_precise(); 576 CardTableExtension::verify_all_young_refs_imprecise(); 577 } 578 579 if (TraceGen0Time) accumulated_time()->stop(); 580 581 if (PrintGC) { 582 if (PrintGCDetails) { 583 // Don't print a GC timestamp here. This is after the GC so 584 // would be confusing. 585 young_gen->print_used_change(young_gen_used_before); 586 } 587 heap->print_heap_change(prev_used); 588 } 589 590 // Track memory usage and detect low memory 591 MemoryService::track_memory_usage(); 592 heap->update_counters(); 593 } 594 595 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 596 HandleMark hm; // Discard invalid handles created during verification 597 gclog_or_tty->print(" VerifyAfterGC:"); 598 Universe::verify(false); 599 } 600 601 if (PrintHeapAtGC) { 602 Universe::print_heap_after_gc(); 603 } 604 605 if (ZapUnusedHeapArea) { 606 young_gen->eden_space()->check_mangled_unused_area_complete(); 607 young_gen->from_space()->check_mangled_unused_area_complete(); 608 young_gen->to_space()->check_mangled_unused_area_complete(); 609 } 610 611 scavenge_exit.update(); 612 613 if (PrintGCTaskTimeStamps) { 614 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, 615 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 616 scavenge_exit.ticks()); 617 gc_task_manager()->print_task_time_stamps(); 618 } 619 620 #ifdef TRACESPINNING 621 ParallelTaskTerminator::print_termination_counts(); 622 #endif 623 624 return !promotion_failure_occurred; 625 } 626 627 // This method iterates over all objects in the young generation, 628 // unforwarding markOops. It then restores any preserved mark oops, 629 // and clears the _preserved_mark_stack. 630 void PSScavenge::clean_up_failed_promotion() { 631 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 632 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 633 assert(promotion_failed(), "Sanity"); 634 635 PSYoungGen* young_gen = heap->young_gen(); 636 637 { 638 ResourceMark rm; 639 640 // Unforward all pointers in the young gen. 641 PSPromotionFailedClosure unforward_closure; 642 young_gen->object_iterate(&unforward_closure); 643 644 if (PrintGC && Verbose) { 645 gclog_or_tty->print_cr("Restoring %d marks", 646 _preserved_oop_stack->length()); 647 } 648 649 // Restore any saved marks. 650 for (int i=0; i < _preserved_oop_stack->length(); i++) { 651 oop obj = _preserved_oop_stack->at(i); 652 markOop mark = _preserved_mark_stack->at(i); 653 obj->set_mark(mark); 654 } 655 656 // Deallocate the preserved mark and oop stacks. 657 // The stacks were allocated as CHeap objects, so 658 // we must call delete to prevent mem leaks. 659 delete _preserved_mark_stack; 660 _preserved_mark_stack = NULL; 661 delete _preserved_oop_stack; 662 _preserved_oop_stack = NULL; 663 } 664 665 // Reset the PromotionFailureALot counters. 666 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 667 } 668 669 // This method is called whenever an attempt to promote an object 670 // fails. Some markOops will need preserving, some will not. Note 671 // that the entire eden is traversed after a failed promotion, with 672 // all forwarded headers replaced by the default markOop. This means 673 // it is not neccessary to preserve most markOops. 674 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 675 if (_preserved_mark_stack == NULL) { 676 ThreadCritical tc; // Lock and retest 677 if (_preserved_mark_stack == NULL) { 678 assert(_preserved_oop_stack == NULL, "Sanity"); 679 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 680 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 681 } 682 } 683 684 // Because we must hold the ThreadCritical lock before using 685 // the stacks, we should be safe from observing partial allocations, 686 // which are also guarded by the ThreadCritical lock. 687 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 688 ThreadCritical tc; 689 _preserved_oop_stack->push(obj); 690 _preserved_mark_stack->push(obj_mark); 691 } 692 } 693 694 bool PSScavenge::should_attempt_scavenge() { 695 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 696 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 697 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 698 699 if (UsePerfData) { 700 counters->update_scavenge_skipped(not_skipped); 701 } 702 703 PSYoungGen* young_gen = heap->young_gen(); 704 PSOldGen* old_gen = heap->old_gen(); 705 706 if (!ScavengeWithObjectsInToSpace) { 707 // Do not attempt to promote unless to_space is empty 708 if (!young_gen->to_space()->is_empty()) { 709 _consecutive_skipped_scavenges++; 710 if (UsePerfData) { 711 counters->update_scavenge_skipped(to_space_not_empty); 712 } 713 return false; 714 } 715 } 716 717 // Test to see if the scavenge will likely fail. 718 PSAdaptiveSizePolicy* policy = heap->size_policy(); 719 720 // A similar test is done in the policy's should_full_GC(). If this is 721 // changed, decide if that test should also be changed. 722 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 723 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 724 bool result = promotion_estimate < old_gen->free_in_bytes(); 725 726 if (PrintGCDetails && Verbose) { 727 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 728 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 729 " padded_average_promoted " SIZE_FORMAT 730 " free in old gen " SIZE_FORMAT, 731 (size_t) policy->average_promoted_in_bytes(), 732 (size_t) policy->padded_average_promoted_in_bytes(), 733 old_gen->free_in_bytes()); 734 if (young_gen->used_in_bytes() < 735 (size_t) policy->padded_average_promoted_in_bytes()) { 736 gclog_or_tty->print_cr(" padded_promoted_average is greater" 737 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 738 } 739 } 740 741 if (result) { 742 _consecutive_skipped_scavenges = 0; 743 } else { 744 _consecutive_skipped_scavenges++; 745 if (UsePerfData) { 746 counters->update_scavenge_skipped(promoted_too_large); 747 } 748 } 749 return result; 750 } 751 752 // Used to add tasks 753 GCTaskManager* const PSScavenge::gc_task_manager() { 754 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 755 "shouldn't return NULL"); 756 return ParallelScavengeHeap::gc_task_manager(); 757 } 758 759 void PSScavenge::initialize() { 760 // Arguments must have been parsed 761 762 if (AlwaysTenure) { 763 _tenuring_threshold = 0; 764 } else if (NeverTenure) { 765 _tenuring_threshold = markOopDesc::max_age + 1; 766 } else { 767 // We want to smooth out our startup times for the AdaptiveSizePolicy 768 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 769 MaxTenuringThreshold; 770 } 771 772 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 773 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 774 775 PSYoungGen* young_gen = heap->young_gen(); 776 PSOldGen* old_gen = heap->old_gen(); 777 PSPermGen* perm_gen = heap->perm_gen(); 778 779 // Set boundary between young_gen and old_gen 780 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(), 781 "perm above old"); 782 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 783 "old above young"); 784 _young_generation_boundary = young_gen->eden_space()->bottom(); 785 786 // Initialize ref handling object for scavenging. 787 MemRegion mr = young_gen->reserved(); 788 _ref_processor = ReferenceProcessor::create_ref_processor( 789 mr, // span 790 true, // atomic_discovery 791 true, // mt_discovery 792 NULL, // is_alive_non_header 793 ParallelGCThreads, 794 ParallelRefProcEnabled); 795 796 // Cache the cardtable 797 BarrierSet* bs = Universe::heap()->barrier_set(); 798 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 799 _card_table = (CardTableExtension*)bs; 800 801 _counters = new CollectorCounters("PSScavenge", 0); 802 }