1 /* 2 * Copyright 2002-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 26 # include "incls/_precompiled.incl" 27 # include "incls/_psScavenge.cpp.incl" 28 29 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 30 int PSScavenge::_consecutive_skipped_scavenges = 0; 31 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 32 CardTableExtension* PSScavenge::_card_table = NULL; 33 bool PSScavenge::_survivor_overflow = false; 34 int PSScavenge::_tenuring_threshold = 0; 35 HeapWord* PSScavenge::_young_generation_boundary = NULL; 36 elapsedTimer PSScavenge::_accumulated_time; 37 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL; 38 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL; 39 CollectorCounters* PSScavenge::_counters = NULL; 40 41 // Define before use 42 class PSIsAliveClosure: public BoolObjectClosure { 43 public: 44 void do_object(oop p) { 45 assert(false, "Do not call."); 46 } 47 bool do_object_b(oop p) { 48 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); 49 } 50 }; 51 52 PSIsAliveClosure PSScavenge::_is_alive_closure; 53 54 class PSKeepAliveClosure: public OopClosure { 55 protected: 56 MutableSpace* _to_space; 57 PSPromotionManager* _promotion_manager; 58 59 public: 60 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 61 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 62 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 63 _to_space = heap->young_gen()->to_space(); 64 65 assert(_promotion_manager != NULL, "Sanity"); 66 } 67 68 template <class T> void do_oop_work(T* p) { 69 assert (!oopDesc::is_null(*p), "expected non-null ref"); 70 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 71 "expected an oop while scanning weak refs"); 72 73 // Weak refs may be visited more than once. 74 if (PSScavenge::should_scavenge(p, _to_space)) { 75 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); 76 } 77 } 78 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 79 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 80 }; 81 82 class PSEvacuateFollowersClosure: public VoidClosure { 83 private: 84 PSPromotionManager* _promotion_manager; 85 public: 86 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 87 88 virtual void do_void() { 89 assert(_promotion_manager != NULL, "Sanity"); 90 _promotion_manager->drain_stacks(true); 91 guarantee(_promotion_manager->stacks_empty(), 92 "stacks should be empty at this point"); 93 } 94 }; 95 96 class PSPromotionFailedClosure : public ObjectClosure { 97 virtual void do_object(oop obj) { 98 if (obj->is_forwarded()) { 99 obj->init_mark(); 100 } 101 } 102 }; 103 104 class PSRefProcTaskProxy: public GCTask { 105 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 106 ProcessTask & _rp_task; 107 uint _work_id; 108 public: 109 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 110 : _rp_task(rp_task), 111 _work_id(work_id) 112 { } 113 114 private: 115 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 116 virtual void do_it(GCTaskManager* manager, uint which); 117 }; 118 119 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 120 { 121 PSPromotionManager* promotion_manager = 122 PSPromotionManager::gc_thread_promotion_manager(which); 123 assert(promotion_manager != NULL, "sanity check"); 124 PSKeepAliveClosure keep_alive(promotion_manager); 125 PSEvacuateFollowersClosure evac_followers(promotion_manager); 126 PSIsAliveClosure is_alive; 127 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 128 } 129 130 class PSRefEnqueueTaskProxy: public GCTask { 131 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 132 EnqueueTask& _enq_task; 133 uint _work_id; 134 135 public: 136 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 137 : _enq_task(enq_task), 138 _work_id(work_id) 139 { } 140 141 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 142 virtual void do_it(GCTaskManager* manager, uint which) 143 { 144 _enq_task.work(_work_id); 145 } 146 }; 147 148 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 149 virtual void execute(ProcessTask& task); 150 virtual void execute(EnqueueTask& task); 151 }; 152 153 void PSRefProcTaskExecutor::execute(ProcessTask& task) 154 { 155 GCTaskQueue* q = GCTaskQueue::create(); 156 for(uint i=0; i<ParallelGCThreads; i++) { 157 q->enqueue(new PSRefProcTaskProxy(task, i)); 158 } 159 ParallelTaskTerminator terminator( 160 ParallelScavengeHeap::gc_task_manager()->workers(), 161 UseDepthFirstScavengeOrder ? 162 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth() 163 : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth()); 164 if (task.marks_oops_alive() && ParallelGCThreads > 1) { 165 for (uint j=0; j<ParallelGCThreads; j++) { 166 q->enqueue(new StealTask(&terminator)); 167 } 168 } 169 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 170 } 171 172 173 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 174 { 175 GCTaskQueue* q = GCTaskQueue::create(); 176 for(uint i=0; i<ParallelGCThreads; i++) { 177 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 178 } 179 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 180 } 181 182 // This method contains all heap specific policy for invoking scavenge. 183 // PSScavenge::invoke_no_policy() will do nothing but attempt to 184 // scavenge. It will not clean up after failed promotions, bail out if 185 // we've exceeded policy time limits, or any other special behavior. 186 // All such policy should be placed here. 187 // 188 // Note that this method should only be called from the vm_thread while 189 // at a safepoint! 190 void PSScavenge::invoke() { 191 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 192 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 193 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 194 195 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 196 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 197 198 PSAdaptiveSizePolicy* policy = heap->size_policy(); 199 IsGCActiveMark mark; 200 201 bool scavenge_was_done = PSScavenge::invoke_no_policy(); 202 203 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 204 if (UsePerfData) 205 counters->update_full_follows_scavenge(0); 206 if (!scavenge_was_done || 207 policy->should_full_GC(heap->old_gen()->free_in_bytes())) { 208 if (UsePerfData) 209 counters->update_full_follows_scavenge(full_follows_scavenge); 210 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 211 CollectorPolicy* cp = heap->collector_policy(); 212 const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); 213 214 if (UseParallelOldGC) { 215 PSParallelCompact::invoke_no_policy(clear_all_softrefs); 216 } else { 217 PSMarkSweep::invoke_no_policy(clear_all_softrefs); 218 } 219 } 220 } 221 222 // This method contains no policy. You should probably 223 // be calling invoke() instead. 224 bool PSScavenge::invoke_no_policy() { 225 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 226 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 227 228 TimeStamp scavenge_entry; 229 TimeStamp scavenge_midpoint; 230 TimeStamp scavenge_exit; 231 232 scavenge_entry.update(); 233 234 if (GC_locker::check_active_before_gc()) { 235 return false; 236 } 237 238 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 239 GCCause::Cause gc_cause = heap->gc_cause(); 240 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 241 242 // Check for potential problems. 243 if (!should_attempt_scavenge()) { 244 return false; 245 } 246 247 bool promotion_failure_occurred = false; 248 249 PSYoungGen* young_gen = heap->young_gen(); 250 PSOldGen* old_gen = heap->old_gen(); 251 PSPermGen* perm_gen = heap->perm_gen(); 252 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 253 heap->increment_total_collections(); 254 255 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 256 257 if ((gc_cause != GCCause::_java_lang_system_gc) || 258 UseAdaptiveSizePolicyWithSystemGC) { 259 // Gather the feedback data for eden occupancy. 260 young_gen->eden_space()->accumulate_statistics(); 261 } 262 263 if (ZapUnusedHeapArea) { 264 // Save information needed to minimize mangling 265 heap->record_gen_tops_before_GC(); 266 } 267 268 if (PrintHeapAtGC) { 269 Universe::print_heap_before_gc(); 270 } 271 272 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 273 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 274 275 size_t prev_used = heap->used(); 276 assert(promotion_failed() == false, "Sanity"); 277 278 // Fill in TLABs 279 heap->accumulate_statistics_all_tlabs(); 280 heap->ensure_parsability(true); // retire TLABs 281 282 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 283 HandleMark hm; // Discard invalid handles created during verification 284 gclog_or_tty->print(" VerifyBeforeGC:"); 285 Universe::verify(true); 286 } 287 288 { 289 ResourceMark rm; 290 HandleMark hm; 291 292 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 293 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 294 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); 295 TraceCollectorStats tcs(counters()); 296 TraceMemoryManagerStats tms(false /* not full GC */); 297 298 if (TraceGen0Time) accumulated_time()->start(); 299 300 // Let the size policy know we're starting 301 size_policy->minor_collection_begin(); 302 303 // Verify the object start arrays. 304 if (VerifyObjectStartArray && 305 VerifyBeforeGC) { 306 old_gen->verify_object_start_array(); 307 perm_gen->verify_object_start_array(); 308 } 309 310 // Verify no unmarked old->young roots 311 if (VerifyRememberedSets) { 312 CardTableExtension::verify_all_young_refs_imprecise(); 313 } 314 315 if (!ScavengeWithObjectsInToSpace) { 316 assert(young_gen->to_space()->is_empty(), 317 "Attempt to scavenge with live objects in to_space"); 318 young_gen->to_space()->clear(SpaceDecorator::Mangle); 319 } else if (ZapUnusedHeapArea) { 320 young_gen->to_space()->mangle_unused_area(); 321 } 322 save_to_space_top_before_gc(); 323 324 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 325 COMPILER2_PRESENT(DerivedPointerTable::clear()); 326 327 reference_processor()->enable_discovery(); 328 reference_processor()->setup_policy(false); 329 330 // We track how much was promoted to the next generation for 331 // the AdaptiveSizePolicy. 332 size_t old_gen_used_before = old_gen->used_in_bytes(); 333 334 // For PrintGCDetails 335 size_t young_gen_used_before = young_gen->used_in_bytes(); 336 337 // Reset our survivor overflow. 338 set_survivor_overflow(false); 339 340 // We need to save the old/perm top values before 341 // creating the promotion_manager. We pass the top 342 // values to the card_table, to prevent it from 343 // straying into the promotion labs. 344 HeapWord* old_top = old_gen->object_space()->top(); 345 HeapWord* perm_top = perm_gen->object_space()->top(); 346 347 // Release all previously held resources 348 gc_task_manager()->release_all_resources(); 349 350 PSPromotionManager::pre_scavenge(); 351 352 // We'll use the promotion manager again later. 353 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 354 { 355 // TraceTime("Roots"); 356 ParallelScavengeHeap::ParStrongRootsScope psrs; 357 358 GCTaskQueue* q = GCTaskQueue::create(); 359 360 for(uint i=0; i<ParallelGCThreads; i++) { 361 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); 362 } 363 364 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); 365 366 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 367 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 368 // We scan the thread roots in parallel 369 Threads::create_thread_roots_tasks(q); 370 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 371 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 372 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 373 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 374 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 375 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 376 377 ParallelTaskTerminator terminator( 378 gc_task_manager()->workers(), 379 promotion_manager->depth_first() ? 380 (TaskQueueSetSuper*) promotion_manager->stack_array_depth() 381 : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth()); 382 if (ParallelGCThreads>1) { 383 for (uint j=0; j<ParallelGCThreads; j++) { 384 q->enqueue(new StealTask(&terminator)); 385 } 386 } 387 388 gc_task_manager()->execute_and_wait(q); 389 } 390 391 scavenge_midpoint.update(); 392 393 // Process reference objects discovered during scavenge 394 { 395 reference_processor()->setup_policy(false); // not always_clear 396 PSKeepAliveClosure keep_alive(promotion_manager); 397 PSEvacuateFollowersClosure evac_followers(promotion_manager); 398 if (reference_processor()->processing_is_mt()) { 399 PSRefProcTaskExecutor task_executor; 400 reference_processor()->process_discovered_references( 401 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); 402 } else { 403 reference_processor()->process_discovered_references( 404 &_is_alive_closure, &keep_alive, &evac_followers, NULL); 405 } 406 } 407 408 // Enqueue reference objects discovered during scavenge. 409 if (reference_processor()->processing_is_mt()) { 410 PSRefProcTaskExecutor task_executor; 411 reference_processor()->enqueue_discovered_references(&task_executor); 412 } else { 413 reference_processor()->enqueue_discovered_references(NULL); 414 } 415 416 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 417 assert(promotion_manager->claimed_stack_empty(), "Sanity"); 418 PSPromotionManager::post_scavenge(); 419 420 promotion_failure_occurred = promotion_failed(); 421 if (promotion_failure_occurred) { 422 clean_up_failed_promotion(); 423 if (PrintGC) { 424 gclog_or_tty->print("--"); 425 } 426 } 427 428 // Let the size policy know we're done. Note that we count promotion 429 // failure cleanup time as part of the collection (otherwise, we're 430 // implicitly saying it's mutator time). 431 size_policy->minor_collection_end(gc_cause); 432 433 if (!promotion_failure_occurred) { 434 // Swap the survivor spaces. 435 436 437 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 438 young_gen->from_space()->clear(SpaceDecorator::Mangle); 439 young_gen->swap_spaces(); 440 441 size_t survived = young_gen->from_space()->used_in_bytes(); 442 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 443 size_policy->update_averages(_survivor_overflow, survived, promoted); 444 445 // A successful scavenge should restart the GC time limit count which is 446 // for full GC's. 447 size_policy->reset_gc_overhead_limit_count(); 448 if (PSResizeByFreeRatioWithSystemGC && 449 gc_cause == GCCause::_java_lang_system_gc) { 450 ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap(); 451 heap->resize_by_free_ratio(false); 452 453 } else if (UseAdaptiveSizePolicy) { 454 // Calculate the new survivor size and tenuring threshold 455 456 if (PrintAdaptiveSizePolicy) { 457 gclog_or_tty->print("AdaptiveSizeStart: "); 458 gclog_or_tty->stamp(); 459 gclog_or_tty->print_cr(" collection: %d ", 460 heap->total_collections()); 461 462 if (Verbose) { 463 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 464 " perm_gen_capacity: %d ", 465 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 466 perm_gen->capacity_in_bytes()); 467 } 468 } 469 470 471 if (UsePerfData) { 472 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 473 counters->update_old_eden_size( 474 size_policy->calculated_eden_size_in_bytes()); 475 counters->update_old_promo_size( 476 size_policy->calculated_promo_size_in_bytes()); 477 counters->update_old_capacity(old_gen->capacity_in_bytes()); 478 counters->update_young_capacity(young_gen->capacity_in_bytes()); 479 counters->update_survived(survived); 480 counters->update_promoted(promoted); 481 counters->update_survivor_overflowed(_survivor_overflow); 482 } 483 484 size_t survivor_limit = 485 size_policy->max_survivor_size(young_gen->max_size()); 486 _tenuring_threshold = 487 size_policy->compute_survivor_space_size_and_threshold( 488 _survivor_overflow, 489 _tenuring_threshold, 490 survivor_limit); 491 492 if (PrintTenuringDistribution) { 493 gclog_or_tty->cr(); 494 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", 495 size_policy->calculated_survivor_size_in_bytes(), 496 _tenuring_threshold, MaxTenuringThreshold); 497 } 498 499 if (UsePerfData) { 500 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 501 counters->update_tenuring_threshold(_tenuring_threshold); 502 counters->update_survivor_size_counters(); 503 } 504 505 // Do call at minor collections? 506 // Don't check if the size_policy is ready at this 507 // level. Let the size_policy check that internally. 508 if (UseAdaptiveSizePolicy && 509 UseAdaptiveGenerationSizePolicyAtMinorCollection && 510 ((gc_cause != GCCause::_java_lang_system_gc) || 511 UseAdaptiveSizePolicyWithSystemGC)) { 512 513 // Calculate optimial free space amounts 514 assert(young_gen->max_size() > 515 young_gen->from_space()->capacity_in_bytes() + 516 young_gen->to_space()->capacity_in_bytes(), 517 "Sizes of space in young gen are out-of-bounds"); 518 size_t max_eden_size = young_gen->max_size() - 519 young_gen->from_space()->capacity_in_bytes() - 520 young_gen->to_space()->capacity_in_bytes(); 521 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 522 young_gen->eden_space()->used_in_bytes(), 523 old_gen->used_in_bytes(), 524 perm_gen->used_in_bytes(), 525 young_gen->eden_space()->capacity_in_bytes(), 526 old_gen->max_gen_size(), 527 max_eden_size, 528 false /* full gc*/, 529 gc_cause, 530 heap->collector_policy()); 531 532 } 533 // Resize the young generation at every collection 534 // even if new sizes have not been calculated. This is 535 // to allow resizes that may have been inhibited by the 536 // relative location of the "to" and "from" spaces. 537 538 // Resizing the old gen at minor collects can cause increases 539 // that don't feed back to the generation sizing policy until 540 // a major collection. Don't resize the old gen here. 541 542 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 543 size_policy->calculated_survivor_size_in_bytes()); 544 545 if (PrintAdaptiveSizePolicy) { 546 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 547 heap->total_collections()); 548 } 549 } else { 550 ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap(); 551 heap->resize_by_free_ratio(false); 552 } 553 554 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 555 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 556 // Also update() will case adaptive NUMA chunk resizing. 557 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 558 young_gen->eden_space()->update(); 559 560 heap->gc_policy_counters()->update_counters(); 561 562 heap->resize_all_tlabs(); 563 564 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 565 } 566 567 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 568 569 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 570 571 // Re-verify object start arrays 572 if (VerifyObjectStartArray && 573 VerifyAfterGC) { 574 old_gen->verify_object_start_array(); 575 perm_gen->verify_object_start_array(); 576 } 577 578 // Verify all old -> young cards are now precise 579 if (VerifyRememberedSets) { 580 // Precise verification will give false positives. Until this is fixed, 581 // use imprecise verification. 582 // CardTableExtension::verify_all_young_refs_precise(); 583 CardTableExtension::verify_all_young_refs_imprecise(); 584 } 585 586 if (TraceGen0Time) accumulated_time()->stop(); 587 588 if (PrintGC) { 589 if (PrintGCDetails) { 590 // Don't print a GC timestamp here. This is after the GC so 591 // would be confusing. 592 young_gen->print_used_change(young_gen_used_before); 593 } 594 heap->print_heap_change(prev_used); 595 } 596 597 // Track memory usage and detect low memory 598 MemoryService::track_memory_usage(); 599 heap->update_counters(); 600 } 601 602 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 603 HandleMark hm; // Discard invalid handles created during verification 604 gclog_or_tty->print(" VerifyAfterGC:"); 605 Universe::verify(false); 606 } 607 608 if (PrintHeapAtGC) { 609 Universe::print_heap_after_gc(); 610 } 611 612 if (ZapUnusedHeapArea) { 613 young_gen->eden_space()->check_mangled_unused_area_complete(); 614 young_gen->from_space()->check_mangled_unused_area_complete(); 615 young_gen->to_space()->check_mangled_unused_area_complete(); 616 } 617 618 scavenge_exit.update(); 619 620 if (PrintGCTaskTimeStamps) { 621 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, 622 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 623 scavenge_exit.ticks()); 624 gc_task_manager()->print_task_time_stamps(); 625 } 626 627 #ifdef TRACESPINNING 628 ParallelTaskTerminator::print_termination_counts(); 629 #endif 630 631 return !promotion_failure_occurred; 632 } 633 634 // This method iterates over all objects in the young generation, 635 // unforwarding markOops. It then restores any preserved mark oops, 636 // and clears the _preserved_mark_stack. 637 void PSScavenge::clean_up_failed_promotion() { 638 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 639 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 640 assert(promotion_failed(), "Sanity"); 641 642 PSYoungGen* young_gen = heap->young_gen(); 643 644 { 645 ResourceMark rm; 646 647 // Unforward all pointers in the young gen. 648 PSPromotionFailedClosure unforward_closure; 649 young_gen->object_iterate(&unforward_closure); 650 651 if (PrintGC && Verbose) { 652 gclog_or_tty->print_cr("Restoring %d marks", 653 _preserved_oop_stack->length()); 654 } 655 656 // Restore any saved marks. 657 for (int i=0; i < _preserved_oop_stack->length(); i++) { 658 oop obj = _preserved_oop_stack->at(i); 659 markOop mark = _preserved_mark_stack->at(i); 660 obj->set_mark(mark); 661 } 662 663 // Deallocate the preserved mark and oop stacks. 664 // The stacks were allocated as CHeap objects, so 665 // we must call delete to prevent mem leaks. 666 delete _preserved_mark_stack; 667 _preserved_mark_stack = NULL; 668 delete _preserved_oop_stack; 669 _preserved_oop_stack = NULL; 670 } 671 672 // Reset the PromotionFailureALot counters. 673 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 674 } 675 676 // This method is called whenever an attempt to promote an object 677 // fails. Some markOops will need preserving, some will not. Note 678 // that the entire eden is traversed after a failed promotion, with 679 // all forwarded headers replaced by the default markOop. This means 680 // it is not neccessary to preserve most markOops. 681 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 682 if (_preserved_mark_stack == NULL) { 683 ThreadCritical tc; // Lock and retest 684 if (_preserved_mark_stack == NULL) { 685 assert(_preserved_oop_stack == NULL, "Sanity"); 686 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 687 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 688 } 689 } 690 691 // Because we must hold the ThreadCritical lock before using 692 // the stacks, we should be safe from observing partial allocations, 693 // which are also guarded by the ThreadCritical lock. 694 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 695 ThreadCritical tc; 696 _preserved_oop_stack->push(obj); 697 _preserved_mark_stack->push(obj_mark); 698 } 699 } 700 701 bool PSScavenge::should_attempt_scavenge() { 702 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 703 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 704 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 705 706 if (UsePerfData) { 707 counters->update_scavenge_skipped(not_skipped); 708 } 709 710 PSYoungGen* young_gen = heap->young_gen(); 711 PSOldGen* old_gen = heap->old_gen(); 712 713 if (!ScavengeWithObjectsInToSpace) { 714 // Do not attempt to promote unless to_space is empty 715 if (!young_gen->to_space()->is_empty()) { 716 _consecutive_skipped_scavenges++; 717 if (UsePerfData) { 718 counters->update_scavenge_skipped(to_space_not_empty); 719 } 720 return false; 721 } 722 } 723 724 // Test to see if the scavenge will likely fail. 725 PSAdaptiveSizePolicy* policy = heap->size_policy(); 726 727 // A similar test is done in the policy's should_full_GC(). If this is 728 // changed, decide if that test should also be changed. 729 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 730 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 731 bool result = promotion_estimate < old_gen->free_in_bytes(); 732 733 if (PrintGCDetails && Verbose) { 734 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 735 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 736 " padded_average_promoted " SIZE_FORMAT 737 " free in old gen " SIZE_FORMAT, 738 (size_t) policy->average_promoted_in_bytes(), 739 (size_t) policy->padded_average_promoted_in_bytes(), 740 old_gen->free_in_bytes()); 741 if (young_gen->used_in_bytes() < 742 (size_t) policy->padded_average_promoted_in_bytes()) { 743 gclog_or_tty->print_cr(" padded_promoted_average is greater" 744 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 745 } 746 } 747 748 if (result) { 749 _consecutive_skipped_scavenges = 0; 750 } else { 751 _consecutive_skipped_scavenges++; 752 if (UsePerfData) { 753 counters->update_scavenge_skipped(promoted_too_large); 754 } 755 } 756 return result; 757 } 758 759 // Used to add tasks 760 GCTaskManager* const PSScavenge::gc_task_manager() { 761 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 762 "shouldn't return NULL"); 763 return ParallelScavengeHeap::gc_task_manager(); 764 } 765 766 void PSScavenge::initialize() { 767 // Arguments must have been parsed 768 769 if (AlwaysTenure) { 770 _tenuring_threshold = 0; 771 } else if (NeverTenure) { 772 _tenuring_threshold = markOopDesc::max_age + 1; 773 } else { 774 // We want to smooth out our startup times for the AdaptiveSizePolicy 775 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 776 MaxTenuringThreshold; 777 } 778 779 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 780 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 781 782 PSYoungGen* young_gen = heap->young_gen(); 783 PSOldGen* old_gen = heap->old_gen(); 784 PSPermGen* perm_gen = heap->perm_gen(); 785 786 // Set boundary between young_gen and old_gen 787 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(), 788 "perm above old"); 789 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 790 "old above young"); 791 _young_generation_boundary = young_gen->eden_space()->bottom(); 792 793 // Initialize ref handling object for scavenging. 794 MemRegion mr = young_gen->reserved(); 795 _ref_processor = ReferenceProcessor::create_ref_processor( 796 mr, // span 797 true, // atomic_discovery 798 true, // mt_discovery 799 NULL, // is_alive_non_header 800 ParallelGCThreads, 801 ParallelRefProcEnabled); 802 803 // Cache the cardtable 804 BarrierSet* bs = Universe::heap()->barrier_set(); 805 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 806 _card_table = (CardTableExtension*)bs; 807 808 _counters = new CollectorCounters("PSScavenge", 0); 809 }