1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)psScavenge.cpp 1.99 07/09/07 09:53:34 JVM" 3 #endif 4 /* 5 * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 29 # include "incls/_precompiled.incl" 30 # include "incls/_psScavenge.cpp.incl" 31 32 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 33 int PSScavenge::_consecutive_skipped_scavenges = 0; 34 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 35 CardTableExtension* PSScavenge::_card_table = NULL; 36 bool PSScavenge::_survivor_overflow = false; 37 int PSScavenge::_tenuring_threshold = 0; 38 HeapWord* PSScavenge::_young_generation_boundary = NULL; 39 elapsedTimer PSScavenge::_accumulated_time; 40 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL; 41 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL; 42 CollectorCounters* PSScavenge::_counters = NULL; 43 44 // Define before use 45 class PSIsAliveClosure: public BoolObjectClosure { 46 public: 47 void do_object(oop p) { 48 assert(false, "Do not call."); 49 } 50 bool do_object_b(oop p) { 51 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); 52 } 53 }; 54 55 PSIsAliveClosure PSScavenge::_is_alive_closure; 56 57 class PSKeepAliveClosure: public OopClosure { 58 protected: 59 MutableSpace* _to_space; 60 PSPromotionManager* _promotion_manager; 61 62 public: 63 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 64 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 65 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 66 _to_space = heap->young_gen()->to_space(); 67 68 assert(_promotion_manager != NULL, "Sanity"); 69 } 70 71 template <class T> void do_oop_work(T* p) { 72 assert (!oopDesc::is_null(*p), "expected non-null ref"); 73 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 74 "expected an oop while scanning weak refs"); 75 76 // Weak refs may be visited more than once. 77 if (PSScavenge::should_scavenge(p, _to_space)) { 78 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); 79 } 80 } 81 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 82 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 83 }; 84 85 class PSEvacuateFollowersClosure: public VoidClosure { 86 private: 87 PSPromotionManager* _promotion_manager; 88 public: 89 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 90 91 virtual void do_void() { 92 assert(_promotion_manager != NULL, "Sanity"); 93 _promotion_manager->drain_stacks(true); 94 guarantee(_promotion_manager->stacks_empty(), 95 "stacks should be empty at this point"); 96 } 97 }; 98 99 class PSPromotionFailedClosure : public ObjectClosure { 100 virtual void do_object(oop obj) { 101 if (obj->is_forwarded()) { 102 obj->init_mark(); 103 } 104 } 105 }; 106 107 class PSRefProcTaskProxy: public GCTask { 108 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 109 ProcessTask & _rp_task; 110 uint _work_id; 111 public: 112 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 113 : _rp_task(rp_task), 114 _work_id(work_id) 115 { } 116 117 private: 118 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 119 virtual void do_it(GCTaskManager* manager, uint which); 120 }; 121 122 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 123 { 124 PSPromotionManager* promotion_manager = 125 PSPromotionManager::gc_thread_promotion_manager(which); 126 assert(promotion_manager != NULL, "sanity check"); 127 PSKeepAliveClosure keep_alive(promotion_manager); 128 PSEvacuateFollowersClosure evac_followers(promotion_manager); 129 PSIsAliveClosure is_alive; 130 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 131 } 132 133 class PSRefEnqueueTaskProxy: public GCTask { 134 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 135 EnqueueTask& _enq_task; 136 uint _work_id; 137 138 public: 139 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 140 : _enq_task(enq_task), 141 _work_id(work_id) 142 { } 143 144 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 145 virtual void do_it(GCTaskManager* manager, uint which) 146 { 147 _enq_task.work(_work_id); 148 } 149 }; 150 151 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 152 virtual void execute(ProcessTask& task); 153 virtual void execute(EnqueueTask& task); 154 }; 155 156 void PSRefProcTaskExecutor::execute(ProcessTask& task) 157 { 158 GCTaskQueue* q = GCTaskQueue::create(); 159 for(uint i=0; i<ParallelGCThreads; i++) { 160 q->enqueue(new PSRefProcTaskProxy(task, i)); 161 } 162 ParallelTaskTerminator terminator( 163 ParallelScavengeHeap::gc_task_manager()->workers(), 164 UseDepthFirstScavengeOrder ? 165 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth() 166 : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth()); 167 if (task.marks_oops_alive() && ParallelGCThreads > 1) { 168 for (uint j=0; j<ParallelGCThreads; j++) { 169 q->enqueue(new StealTask(&terminator)); 170 } 171 } 172 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 173 } 174 175 176 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 177 { 178 GCTaskQueue* q = GCTaskQueue::create(); 179 for(uint i=0; i<ParallelGCThreads; i++) { 180 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 181 } 182 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 183 } 184 185 // This method contains all heap specific policy for invoking scavenge. 186 // PSScavenge::invoke_no_policy() will do nothing but attempt to 187 // scavenge. It will not clean up after failed promotions, bail out if 188 // we've exceeded policy time limits, or any other special behavior. 189 // All such policy should be placed here. 190 // 191 // Note that this method should only be called from the vm_thread while 192 // at a safepoint! 193 void PSScavenge::invoke() 194 { 195 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 196 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 197 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 198 199 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 200 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 201 202 PSAdaptiveSizePolicy* policy = heap->size_policy(); 203 204 // Before each allocation/collection attempt, find out from the 205 // policy object if GCs are, on the whole, taking too long. If so, 206 // bail out without attempting a collection. 207 if (!policy->gc_time_limit_exceeded()) { 208 IsGCActiveMark mark; 209 210 bool scavenge_was_done = PSScavenge::invoke_no_policy(); 211 212 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 213 if (UsePerfData) 214 counters->update_full_follows_scavenge(0); 215 if (!scavenge_was_done || 216 policy->should_full_GC(heap->old_gen()->free_in_bytes())) { 217 if (UsePerfData) 218 counters->update_full_follows_scavenge(full_follows_scavenge); 219 220 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 221 if (UseParallelOldGC) { 222 PSParallelCompact::invoke_no_policy(false); 223 } else { 224 PSMarkSweep::invoke_no_policy(false); 225 } 226 } 227 } 228 } 229 230 // This method contains no policy. You should probably 231 // be calling invoke() instead. 232 bool PSScavenge::invoke_no_policy() { 233 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 234 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 235 236 TimeStamp scavenge_entry; 237 TimeStamp scavenge_midpoint; 238 TimeStamp scavenge_exit; 239 240 scavenge_entry.update(); 241 242 if (GC_locker::check_active_before_gc()) { 243 return false; 244 } 245 246 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 247 GCCause::Cause gc_cause = heap->gc_cause(); 248 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 249 250 // Check for potential problems. 251 if (!should_attempt_scavenge()) { 252 return false; 253 } 254 255 bool promotion_failure_occurred = false; 256 257 PSYoungGen* young_gen = heap->young_gen(); 258 PSOldGen* old_gen = heap->old_gen(); 259 PSPermGen* perm_gen = heap->perm_gen(); 260 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 261 heap->increment_total_collections(); 262 263 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 264 265 if ((gc_cause != GCCause::_java_lang_system_gc) || 266 UseAdaptiveSizePolicyWithSystemGC) { 267 // Gather the feedback data for eden occupancy. 268 young_gen->eden_space()->accumulate_statistics(); 269 } 270 271 if (ZapUnusedHeapArea) { 272 // Save information needed to minimize mangling 273 heap->record_gen_tops_before_GC(); 274 } 275 276 if (PrintHeapAtGC) { 277 Universe::print_heap_before_gc(); 278 } 279 280 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 281 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 282 283 size_t prev_used = heap->used(); 284 assert(promotion_failed() == false, "Sanity"); 285 286 // Fill in TLABs 287 heap->accumulate_statistics_all_tlabs(); 288 heap->ensure_parsability(true); // retire TLABs 289 290 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 291 HandleMark hm; // Discard invalid handles created during verification 292 gclog_or_tty->print(" VerifyBeforeGC:"); 293 Universe::verify(true); 294 } 295 296 { 297 ResourceMark rm; 298 HandleMark hm; 299 300 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 301 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 302 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); 303 TraceCollectorStats tcs(counters()); 304 TraceMemoryManagerStats tms(false /* not full GC */); 305 306 if (TraceGen0Time) accumulated_time()->start(); 307 308 // Let the size policy know we're starting 309 size_policy->minor_collection_begin(); 310 311 // Verify the object start arrays. 312 if (VerifyObjectStartArray && 313 VerifyBeforeGC) { 314 old_gen->verify_object_start_array(); 315 perm_gen->verify_object_start_array(); 316 } 317 318 // Verify no unmarked old->young roots 319 if (VerifyRememberedSets) { 320 CardTableExtension::verify_all_young_refs_imprecise(); 321 } 322 323 if (!ScavengeWithObjectsInToSpace) { 324 assert(young_gen->to_space()->is_empty(), 325 "Attempt to scavenge with live objects in to_space"); 326 young_gen->to_space()->clear(SpaceDecorator::Mangle); 327 } else if (ZapUnusedHeapArea) { 328 young_gen->to_space()->mangle_unused_area(); 329 } 330 save_to_space_top_before_gc(); 331 332 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 333 COMPILER2_PRESENT(DerivedPointerTable::clear()); 334 335 reference_processor()->enable_discovery(); 336 reference_processor()->setup_policy(false); 337 338 // We track how much was promoted to the next generation for 339 // the AdaptiveSizePolicy. 340 size_t old_gen_used_before = old_gen->used_in_bytes(); 341 342 // For PrintGCDetails 343 size_t young_gen_used_before = young_gen->used_in_bytes(); 344 345 // Reset our survivor overflow. 346 set_survivor_overflow(false); 347 348 // We need to save the old/perm top values before 349 // creating the promotion_manager. We pass the top 350 // values to the card_table, to prevent it from 351 // straying into the promotion labs. 352 HeapWord* old_top = old_gen->object_space()->top(); 353 HeapWord* perm_top = perm_gen->object_space()->top(); 354 355 // Release all previously held resources 356 gc_task_manager()->release_all_resources(); 357 358 PSPromotionManager::pre_scavenge(); 359 360 // We'll use the promotion manager again later. 361 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 362 { 363 // TraceTime("Roots"); 364 365 GCTaskQueue* q = GCTaskQueue::create(); 366 367 for(uint i=0; i<ParallelGCThreads; i++) { 368 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); 369 } 370 371 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); 372 373 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 374 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 375 // We scan the thread roots in parallel 376 Threads::create_thread_roots_tasks(q); 377 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 378 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 379 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 380 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 381 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 382 383 ParallelTaskTerminator terminator( 384 gc_task_manager()->workers(), 385 promotion_manager->depth_first() ? 386 (TaskQueueSetSuper*) promotion_manager->stack_array_depth() 387 : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth()); 388 if (ParallelGCThreads>1) { 389 for (uint j=0; j<ParallelGCThreads; j++) { 390 q->enqueue(new StealTask(&terminator)); 391 } 392 } 393 394 gc_task_manager()->execute_and_wait(q); 395 } 396 397 scavenge_midpoint.update(); 398 399 // Process reference objects discovered during scavenge 400 { 401 reference_processor()->setup_policy(false); // not always_clear 402 PSKeepAliveClosure keep_alive(promotion_manager); 403 PSEvacuateFollowersClosure evac_followers(promotion_manager); 404 if (reference_processor()->processing_is_mt()) { 405 PSRefProcTaskExecutor task_executor; 406 reference_processor()->process_discovered_references( 407 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); 408 } else { 409 reference_processor()->process_discovered_references( 410 &_is_alive_closure, &keep_alive, &evac_followers, NULL); 411 } 412 } 413 414 // Enqueue reference objects discovered during scavenge. 415 if (reference_processor()->processing_is_mt()) { 416 PSRefProcTaskExecutor task_executor; 417 reference_processor()->enqueue_discovered_references(&task_executor); 418 } else { 419 reference_processor()->enqueue_discovered_references(NULL); 420 } 421 422 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 423 assert(promotion_manager->claimed_stack_empty(), "Sanity"); 424 PSPromotionManager::post_scavenge(); 425 426 promotion_failure_occurred = promotion_failed(); 427 if (promotion_failure_occurred) { 428 clean_up_failed_promotion(); 429 if (PrintGC) { 430 gclog_or_tty->print("--"); 431 } 432 } 433 434 // Let the size policy know we're done. Note that we count promotion 435 // failure cleanup time as part of the collection (otherwise, we're 436 // implicitly saying it's mutator time). 437 size_policy->minor_collection_end(gc_cause); 438 439 if (!promotion_failure_occurred) { 440 // Swap the survivor spaces. 441 442 443 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 444 young_gen->from_space()->clear(SpaceDecorator::Mangle); 445 young_gen->swap_spaces(); 446 447 size_t survived = young_gen->from_space()->used_in_bytes(); 448 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 449 size_policy->update_averages(_survivor_overflow, survived, promoted); 450 451 if (UseAdaptiveSizePolicy) { 452 // Calculate the new survivor size and tenuring threshold 453 454 if (PrintAdaptiveSizePolicy) { 455 gclog_or_tty->print("AdaptiveSizeStart: "); 456 gclog_or_tty->stamp(); 457 gclog_or_tty->print_cr(" collection: %d ", 458 heap->total_collections()); 459 460 if (Verbose) { 461 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 462 " perm_gen_capacity: %d ", 463 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 464 perm_gen->capacity_in_bytes()); 465 } 466 } 467 468 469 if (UsePerfData) { 470 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 471 counters->update_old_eden_size( 472 size_policy->calculated_eden_size_in_bytes()); 473 counters->update_old_promo_size( 474 size_policy->calculated_promo_size_in_bytes()); 475 counters->update_old_capacity(old_gen->capacity_in_bytes()); 476 counters->update_young_capacity(young_gen->capacity_in_bytes()); 477 counters->update_survived(survived); 478 counters->update_promoted(promoted); 479 counters->update_survivor_overflowed(_survivor_overflow); 480 } 481 482 size_t survivor_limit = 483 size_policy->max_survivor_size(young_gen->max_size()); 484 _tenuring_threshold = 485 size_policy->compute_survivor_space_size_and_threshold( 486 _survivor_overflow, 487 _tenuring_threshold, 488 survivor_limit); 489 490 if (PrintTenuringDistribution) { 491 gclog_or_tty->cr(); 492 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", 493 size_policy->calculated_survivor_size_in_bytes(), 494 _tenuring_threshold, MaxTenuringThreshold); 495 } 496 497 if (UsePerfData) { 498 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 499 counters->update_tenuring_threshold(_tenuring_threshold); 500 counters->update_survivor_size_counters(); 501 } 502 503 // Do call at minor collections? 504 // Don't check if the size_policy is ready at this 505 // level. Let the size_policy check that internally. 506 if (UseAdaptiveSizePolicy && 507 UseAdaptiveGenerationSizePolicyAtMinorCollection && 508 ((gc_cause != GCCause::_java_lang_system_gc) || 509 UseAdaptiveSizePolicyWithSystemGC)) { 510 511 // Calculate optimial free space amounts 512 assert(young_gen->max_size() > 513 young_gen->from_space()->capacity_in_bytes() + 514 young_gen->to_space()->capacity_in_bytes(), 515 "Sizes of space in young gen are out-of-bounds"); 516 size_t max_eden_size = young_gen->max_size() - 517 young_gen->from_space()->capacity_in_bytes() - 518 young_gen->to_space()->capacity_in_bytes(); 519 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 520 young_gen->eden_space()->used_in_bytes(), 521 old_gen->used_in_bytes(), 522 perm_gen->used_in_bytes(), 523 young_gen->eden_space()->capacity_in_bytes(), 524 old_gen->max_gen_size(), 525 max_eden_size, 526 false /* full gc*/, 527 gc_cause); 528 529 } 530 // Resize the young generation at every collection 531 // even if new sizes have not been calculated. This is 532 // to allow resizes that may have been inhibited by the 533 // relative location of the "to" and "from" spaces. 534 535 // Resizing the old gen at minor collects can cause increases 536 // that don't feed back to the generation sizing policy until 537 // a major collection. Don't resize the old gen here. 538 539 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 540 size_policy->calculated_survivor_size_in_bytes()); 541 542 if (PrintAdaptiveSizePolicy) { 543 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 544 heap->total_collections()); 545 } 546 } 547 548 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 549 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 550 // Also update() will case adaptive NUMA chunk resizing. 551 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 552 young_gen->eden_space()->update(); 553 554 heap->gc_policy_counters()->update_counters(); 555 556 heap->resize_all_tlabs(); 557 558 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 559 } 560 561 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 562 563 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 564 565 // Re-verify object start arrays 566 if (VerifyObjectStartArray && 567 VerifyAfterGC) { 568 old_gen->verify_object_start_array(); 569 perm_gen->verify_object_start_array(); 570 } 571 572 // Verify all old -> young cards are now precise 573 if (VerifyRememberedSets) { 574 // Precise verification will give false positives. Until this is fixed, 575 // use imprecise verification. 576 // CardTableExtension::verify_all_young_refs_precise(); 577 CardTableExtension::verify_all_young_refs_imprecise(); 578 } 579 580 if (TraceGen0Time) accumulated_time()->stop(); 581 582 if (PrintGC) { 583 if (PrintGCDetails) { 584 // Don't print a GC timestamp here. This is after the GC so 585 // would be confusing. 586 young_gen->print_used_change(young_gen_used_before); 587 } 588 heap->print_heap_change(prev_used); 589 } 590 591 // Track memory usage and detect low memory 592 MemoryService::track_memory_usage(); 593 heap->update_counters(); 594 } 595 596 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 597 HandleMark hm; // Discard invalid handles created during verification 598 gclog_or_tty->print(" VerifyAfterGC:"); 599 Universe::verify(false); 600 } 601 602 if (PrintHeapAtGC) { 603 Universe::print_heap_after_gc(); 604 } 605 606 if (ZapUnusedHeapArea) { 607 young_gen->eden_space()->check_mangled_unused_area_complete(); 608 young_gen->from_space()->check_mangled_unused_area_complete(); 609 young_gen->to_space()->check_mangled_unused_area_complete(); 610 } 611 612 scavenge_exit.update(); 613 614 if (PrintGCTaskTimeStamps) { 615 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, 616 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 617 scavenge_exit.ticks()); 618 gc_task_manager()->print_task_time_stamps(); 619 } 620 621 return !promotion_failure_occurred; 622 } 623 624 // This method iterates over all objects in the young generation, 625 // unforwarding markOops. It then restores any preserved mark oops, 626 // and clears the _preserved_mark_stack. 627 void PSScavenge::clean_up_failed_promotion() { 628 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 629 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 630 assert(promotion_failed(), "Sanity"); 631 632 PSYoungGen* young_gen = heap->young_gen(); 633 634 { 635 ResourceMark rm; 636 637 // Unforward all pointers in the young gen. 638 PSPromotionFailedClosure unforward_closure; 639 young_gen->object_iterate(&unforward_closure); 640 641 if (PrintGC && Verbose) { 642 gclog_or_tty->print_cr("Restoring %d marks", 643 _preserved_oop_stack->length()); 644 } 645 646 // Restore any saved marks. 647 for (int i=0; i < _preserved_oop_stack->length(); i++) { 648 oop obj = _preserved_oop_stack->at(i); 649 markOop mark = _preserved_mark_stack->at(i); 650 obj->set_mark(mark); 651 } 652 653 // Deallocate the preserved mark and oop stacks. 654 // The stacks were allocated as CHeap objects, so 655 // we must call delete to prevent mem leaks. 656 delete _preserved_mark_stack; 657 _preserved_mark_stack = NULL; 658 delete _preserved_oop_stack; 659 _preserved_oop_stack = NULL; 660 } 661 662 // Reset the PromotionFailureALot counters. 663 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 664 } 665 666 // This method is called whenever an attempt to promote an object 667 // fails. Some markOops will need preserving, some will not. Note 668 // that the entire eden is traversed after a failed promotion, with 669 // all forwarded headers replaced by the default markOop. This means 670 // it is not neccessary to preserve most markOops. 671 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 672 if (_preserved_mark_stack == NULL) { 673 ThreadCritical tc; // Lock and retest 674 if (_preserved_mark_stack == NULL) { 675 assert(_preserved_oop_stack == NULL, "Sanity"); 676 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 677 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 678 } 679 } 680 681 // Because we must hold the ThreadCritical lock before using 682 // the stacks, we should be safe from observing partial allocations, 683 // which are also guarded by the ThreadCritical lock. 684 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 685 ThreadCritical tc; 686 _preserved_oop_stack->push(obj); 687 _preserved_mark_stack->push(obj_mark); 688 } 689 } 690 691 bool PSScavenge::should_attempt_scavenge() { 692 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 693 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 694 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 695 696 if (UsePerfData) { 697 counters->update_scavenge_skipped(not_skipped); 698 } 699 700 PSYoungGen* young_gen = heap->young_gen(); 701 PSOldGen* old_gen = heap->old_gen(); 702 703 if (!ScavengeWithObjectsInToSpace) { 704 // Do not attempt to promote unless to_space is empty 705 if (!young_gen->to_space()->is_empty()) { 706 _consecutive_skipped_scavenges++; 707 if (UsePerfData) { 708 counters->update_scavenge_skipped(to_space_not_empty); 709 } 710 return false; 711 } 712 } 713 714 // Test to see if the scavenge will likely fail. 715 PSAdaptiveSizePolicy* policy = heap->size_policy(); 716 717 // A similar test is done in the policy's should_full_GC(). If this is 718 // changed, decide if that test should also be changed. 719 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 720 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 721 bool result = promotion_estimate < old_gen->free_in_bytes(); 722 723 if (PrintGCDetails && Verbose) { 724 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 725 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 726 " padded_average_promoted " SIZE_FORMAT 727 " free in old gen " SIZE_FORMAT, 728 (size_t) policy->average_promoted_in_bytes(), 729 (size_t) policy->padded_average_promoted_in_bytes(), 730 old_gen->free_in_bytes()); 731 if (young_gen->used_in_bytes() < 732 (size_t) policy->padded_average_promoted_in_bytes()) { 733 gclog_or_tty->print_cr(" padded_promoted_average is greater" 734 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 735 } 736 } 737 738 if (result) { 739 _consecutive_skipped_scavenges = 0; 740 } else { 741 _consecutive_skipped_scavenges++; 742 if (UsePerfData) { 743 counters->update_scavenge_skipped(promoted_too_large); 744 } 745 } 746 return result; 747 } 748 749 // Used to add tasks 750 GCTaskManager* const PSScavenge::gc_task_manager() { 751 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 752 "shouldn't return NULL"); 753 return ParallelScavengeHeap::gc_task_manager(); 754 } 755 756 void PSScavenge::initialize() { 757 // Arguments must have been parsed 758 759 if (AlwaysTenure) { 760 _tenuring_threshold = 0; 761 } else if (NeverTenure) { 762 _tenuring_threshold = markOopDesc::max_age + 1; 763 } else { 764 // We want to smooth out our startup times for the AdaptiveSizePolicy 765 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 766 MaxTenuringThreshold; 767 } 768 769 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 770 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 771 772 PSYoungGen* young_gen = heap->young_gen(); 773 PSOldGen* old_gen = heap->old_gen(); 774 PSPermGen* perm_gen = heap->perm_gen(); 775 776 // Set boundary between young_gen and old_gen 777 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(), 778 "perm above old"); 779 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 780 "old above young"); 781 _young_generation_boundary = young_gen->eden_space()->bottom(); 782 783 // Initialize ref handling object for scavenging. 784 MemRegion mr = young_gen->reserved(); 785 _ref_processor = ReferenceProcessor::create_ref_processor( 786 mr, // span 787 true, // atomic_discovery 788 true, // mt_discovery 789 NULL, // is_alive_non_header 790 ParallelGCThreads, 791 ParallelRefProcEnabled); 792 793 // Cache the cardtable 794 BarrierSet* bs = Universe::heap()->barrier_set(); 795 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 796 _card_table = (CardTableExtension*)bs; 797 798 _counters = new CollectorCounters("PSScavenge", 0); 799 }