1 /* 2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 # include "incls/_precompiled.incl" 27 # include "incls/_psScavenge.cpp.incl" 28 29 HeapWord* PSScavenge::_to_space_top_before_gc = NULL; 30 int PSScavenge::_consecutive_skipped_scavenges = 0; 31 ReferenceProcessor* PSScavenge::_ref_processor = NULL; 32 CardTableExtension* PSScavenge::_card_table = NULL; 33 bool PSScavenge::_survivor_overflow = false; 34 int PSScavenge::_tenuring_threshold = 0; 35 HeapWord* PSScavenge::_young_generation_boundary = NULL; 36 elapsedTimer PSScavenge::_accumulated_time; 37 GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL; 38 GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL; 39 CollectorCounters* PSScavenge::_counters = NULL; 40 41 // Define before use 42 class PSIsAliveClosure: public BoolObjectClosure { 43 public: 44 void do_object(oop p) { 45 assert(false, "Do not call."); 46 } 47 bool do_object_b(oop p) { 48 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); 49 } 50 }; 51 52 PSIsAliveClosure PSScavenge::_is_alive_closure; 53 54 class PSKeepAliveClosure: public OopClosure { 55 protected: 56 MutableSpace* _to_space; 57 PSPromotionManager* _promotion_manager; 58 59 public: 60 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 61 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 62 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 63 _to_space = heap->young_gen()->to_space(); 64 65 assert(_promotion_manager != NULL, "Sanity"); 66 } 67 68 template <class T> void do_oop_work(T* p) { 69 assert (!oopDesc::is_null(*p), "expected non-null ref"); 70 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), 71 "expected an oop while scanning weak refs"); 72 73 // Weak refs may be visited more than once. 74 if (PSScavenge::should_scavenge(p, _to_space)) { 75 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); 76 } 77 } 78 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 79 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 80 }; 81 82 class PSEvacuateFollowersClosure: public VoidClosure { 83 private: 84 PSPromotionManager* _promotion_manager; 85 public: 86 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} 87 88 virtual void do_void() { 89 assert(_promotion_manager != NULL, "Sanity"); 90 _promotion_manager->drain_stacks(true); 91 guarantee(_promotion_manager->stacks_empty(), 92 "stacks should be empty at this point"); 93 } 94 }; 95 96 class PSPromotionFailedClosure : public ObjectClosure { 97 virtual void do_object(oop obj) { 98 if (obj->is_forwarded()) { 99 obj->init_mark(); 100 } 101 } 102 }; 103 104 class PSRefProcTaskProxy: public GCTask { 105 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 106 ProcessTask & _rp_task; 107 uint _work_id; 108 public: 109 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) 110 : _rp_task(rp_task), 111 _work_id(work_id) 112 { } 113 114 private: 115 virtual char* name() { return (char *)"Process referents by policy in parallel"; } 116 virtual void do_it(GCTaskManager* manager, uint which); 117 }; 118 119 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) 120 { 121 PSPromotionManager* promotion_manager = 122 PSPromotionManager::gc_thread_promotion_manager(which); 123 assert(promotion_manager != NULL, "sanity check"); 124 PSKeepAliveClosure keep_alive(promotion_manager); 125 PSEvacuateFollowersClosure evac_followers(promotion_manager); 126 PSIsAliveClosure is_alive; 127 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); 128 } 129 130 class PSRefEnqueueTaskProxy: public GCTask { 131 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 132 EnqueueTask& _enq_task; 133 uint _work_id; 134 135 public: 136 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) 137 : _enq_task(enq_task), 138 _work_id(work_id) 139 { } 140 141 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } 142 virtual void do_it(GCTaskManager* manager, uint which) 143 { 144 _enq_task.work(_work_id); 145 } 146 }; 147 148 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { 149 virtual void execute(ProcessTask& task); 150 virtual void execute(EnqueueTask& task); 151 }; 152 153 void PSRefProcTaskExecutor::execute(ProcessTask& task) 154 { 155 GCTaskQueue* q = GCTaskQueue::create(); 156 for(uint i=0; i<ParallelGCThreads; i++) { 157 q->enqueue(new PSRefProcTaskProxy(task, i)); 158 } 159 ParallelTaskTerminator terminator( 160 ParallelScavengeHeap::gc_task_manager()->workers(), 161 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); 162 if (task.marks_oops_alive() && ParallelGCThreads > 1) { 163 for (uint j=0; j<ParallelGCThreads; j++) { 164 q->enqueue(new StealTask(&terminator)); 165 } 166 } 167 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 168 } 169 170 171 void PSRefProcTaskExecutor::execute(EnqueueTask& task) 172 { 173 GCTaskQueue* q = GCTaskQueue::create(); 174 for(uint i=0; i<ParallelGCThreads; i++) { 175 q->enqueue(new PSRefEnqueueTaskProxy(task, i)); 176 } 177 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); 178 } 179 180 // This method contains all heap specific policy for invoking scavenge. 181 // PSScavenge::invoke_no_policy() will do nothing but attempt to 182 // scavenge. It will not clean up after failed promotions, bail out if 183 // we've exceeded policy time limits, or any other special behavior. 184 // All such policy should be placed here. 185 // 186 // Note that this method should only be called from the vm_thread while 187 // at a safepoint! 188 void PSScavenge::invoke() { 189 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 190 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 191 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 192 193 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 194 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 195 196 PSAdaptiveSizePolicy* policy = heap->size_policy(); 197 IsGCActiveMark mark; 198 199 bool scavenge_was_done = PSScavenge::invoke_no_policy(); 200 201 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 202 if (UsePerfData) 203 counters->update_full_follows_scavenge(0); 204 if (!scavenge_was_done || 205 policy->should_full_GC(heap->old_gen()->free_in_bytes())) { 206 if (UsePerfData) 207 counters->update_full_follows_scavenge(full_follows_scavenge); 208 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); 209 CollectorPolicy* cp = heap->collector_policy(); 210 const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); 211 212 if (UseParallelOldGC) { 213 PSParallelCompact::invoke_no_policy(clear_all_softrefs); 214 } else { 215 PSMarkSweep::invoke_no_policy(clear_all_softrefs); 216 } 217 } 218 } 219 220 // This method contains no policy. You should probably 221 // be calling invoke() instead. 222 bool PSScavenge::invoke_no_policy() { 223 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 224 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 225 226 TimeStamp scavenge_entry; 227 TimeStamp scavenge_midpoint; 228 TimeStamp scavenge_exit; 229 230 scavenge_entry.update(); 231 232 if (GC_locker::check_active_before_gc()) { 233 return false; 234 } 235 236 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 237 GCCause::Cause gc_cause = heap->gc_cause(); 238 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 239 240 // Check for potential problems. 241 if (!should_attempt_scavenge()) { 242 return false; 243 } 244 245 bool promotion_failure_occurred = false; 246 247 PSYoungGen* young_gen = heap->young_gen(); 248 PSOldGen* old_gen = heap->old_gen(); 249 PSPermGen* perm_gen = heap->perm_gen(); 250 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 251 heap->increment_total_collections(); 252 253 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 254 255 if ((gc_cause != GCCause::_java_lang_system_gc) || 256 UseAdaptiveSizePolicyWithSystemGC) { 257 // Gather the feedback data for eden occupancy. 258 young_gen->eden_space()->accumulate_statistics(); 259 } 260 261 if (ZapUnusedHeapArea) { 262 // Save information needed to minimize mangling 263 heap->record_gen_tops_before_GC(); 264 } 265 266 if (PrintHeapAtGC) { 267 Universe::print_heap_before_gc(); 268 } 269 270 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); 271 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 272 273 size_t prev_used = heap->used(); 274 assert(promotion_failed() == false, "Sanity"); 275 276 // Fill in TLABs 277 heap->accumulate_statistics_all_tlabs(); 278 heap->ensure_parsability(true); // retire TLABs 279 280 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 281 HandleMark hm; // Discard invalid handles created during verification 282 gclog_or_tty->print(" VerifyBeforeGC:"); 283 Universe::verify(true); 284 } 285 286 { 287 ResourceMark rm; 288 HandleMark hm; 289 290 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 291 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 292 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); 293 TraceCollectorStats tcs(counters()); 294 TraceMemoryManagerStats tms(false /* not full GC */); 295 296 if (TraceGen0Time) accumulated_time()->start(); 297 298 // Let the size policy know we're starting 299 size_policy->minor_collection_begin(); 300 301 // Verify the object start arrays. 302 if (VerifyObjectStartArray && 303 VerifyBeforeGC) { 304 old_gen->verify_object_start_array(); 305 perm_gen->verify_object_start_array(); 306 } 307 308 // Verify no unmarked old->young roots 309 if (VerifyRememberedSets) { 310 CardTableExtension::verify_all_young_refs_imprecise(); 311 } 312 313 if (!ScavengeWithObjectsInToSpace) { 314 assert(young_gen->to_space()->is_empty(), 315 "Attempt to scavenge with live objects in to_space"); 316 young_gen->to_space()->clear(SpaceDecorator::Mangle); 317 } else if (ZapUnusedHeapArea) { 318 young_gen->to_space()->mangle_unused_area(); 319 } 320 save_to_space_top_before_gc(); 321 322 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 323 COMPILER2_PRESENT(DerivedPointerTable::clear()); 324 325 reference_processor()->enable_discovery(); 326 reference_processor()->setup_policy(false); 327 328 // We track how much was promoted to the next generation for 329 // the AdaptiveSizePolicy. 330 size_t old_gen_used_before = old_gen->used_in_bytes(); 331 332 // For PrintGCDetails 333 size_t young_gen_used_before = young_gen->used_in_bytes(); 334 335 // Reset our survivor overflow. 336 set_survivor_overflow(false); 337 338 // We need to save the old/perm top values before 339 // creating the promotion_manager. We pass the top 340 // values to the card_table, to prevent it from 341 // straying into the promotion labs. 342 HeapWord* old_top = old_gen->object_space()->top(); 343 HeapWord* perm_top = perm_gen->object_space()->top(); 344 345 // Release all previously held resources 346 gc_task_manager()->release_all_resources(); 347 348 PSPromotionManager::pre_scavenge(); 349 350 // We'll use the promotion manager again later. 351 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); 352 { 353 // TraceTime("Roots"); 354 ParallelScavengeHeap::ParStrongRootsScope psrs; 355 356 GCTaskQueue* q = GCTaskQueue::create(); 357 358 for(uint i=0; i<ParallelGCThreads; i++) { 359 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); 360 } 361 362 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); 363 364 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); 365 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); 366 // We scan the thread roots in parallel 367 Threads::create_thread_roots_tasks(q); 368 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); 369 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); 370 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); 371 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); 372 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); 373 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); 374 375 ParallelTaskTerminator terminator( 376 gc_task_manager()->workers(), 377 (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); 378 if (ParallelGCThreads>1) { 379 for (uint j=0; j<ParallelGCThreads; j++) { 380 q->enqueue(new StealTask(&terminator)); 381 } 382 } 383 384 gc_task_manager()->execute_and_wait(q); 385 } 386 387 scavenge_midpoint.update(); 388 389 // Process reference objects discovered during scavenge 390 { 391 reference_processor()->setup_policy(false); // not always_clear 392 PSKeepAliveClosure keep_alive(promotion_manager); 393 PSEvacuateFollowersClosure evac_followers(promotion_manager); 394 if (reference_processor()->processing_is_mt()) { 395 PSRefProcTaskExecutor task_executor; 396 reference_processor()->process_discovered_references( 397 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); 398 } else { 399 reference_processor()->process_discovered_references( 400 &_is_alive_closure, &keep_alive, &evac_followers, NULL); 401 } 402 } 403 404 // Enqueue reference objects discovered during scavenge. 405 if (reference_processor()->processing_is_mt()) { 406 PSRefProcTaskExecutor task_executor; 407 reference_processor()->enqueue_discovered_references(&task_executor); 408 } else { 409 reference_processor()->enqueue_discovered_references(NULL); 410 } 411 412 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 413 PSPromotionManager::post_scavenge(); 414 415 promotion_failure_occurred = promotion_failed(); 416 if (promotion_failure_occurred) { 417 clean_up_failed_promotion(); 418 if (PrintGC) { 419 gclog_or_tty->print("--"); 420 } 421 } 422 423 // Let the size policy know we're done. Note that we count promotion 424 // failure cleanup time as part of the collection (otherwise, we're 425 // implicitly saying it's mutator time). 426 size_policy->minor_collection_end(gc_cause); 427 428 if (!promotion_failure_occurred) { 429 // Swap the survivor spaces. 430 431 432 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 433 young_gen->from_space()->clear(SpaceDecorator::Mangle); 434 young_gen->swap_spaces(); 435 436 size_t survived = young_gen->from_space()->used_in_bytes(); 437 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; 438 size_policy->update_averages(_survivor_overflow, survived, promoted); 439 440 // A successful scavenge should restart the GC time limit count which is 441 // for full GC's. 442 size_policy->reset_gc_overhead_limit_count(); 443 if (UseAdaptiveSizePolicy) { 444 // Calculate the new survivor size and tenuring threshold 445 446 if (PrintAdaptiveSizePolicy) { 447 gclog_or_tty->print("AdaptiveSizeStart: "); 448 gclog_or_tty->stamp(); 449 gclog_or_tty->print_cr(" collection: %d ", 450 heap->total_collections()); 451 452 if (Verbose) { 453 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 454 " perm_gen_capacity: %d ", 455 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 456 perm_gen->capacity_in_bytes()); 457 } 458 } 459 460 461 if (UsePerfData) { 462 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 463 counters->update_old_eden_size( 464 size_policy->calculated_eden_size_in_bytes()); 465 counters->update_old_promo_size( 466 size_policy->calculated_promo_size_in_bytes()); 467 counters->update_old_capacity(old_gen->capacity_in_bytes()); 468 counters->update_young_capacity(young_gen->capacity_in_bytes()); 469 counters->update_survived(survived); 470 counters->update_promoted(promoted); 471 counters->update_survivor_overflowed(_survivor_overflow); 472 } 473 474 size_t survivor_limit = 475 size_policy->max_survivor_size(young_gen->max_size()); 476 _tenuring_threshold = 477 size_policy->compute_survivor_space_size_and_threshold( 478 _survivor_overflow, 479 _tenuring_threshold, 480 survivor_limit); 481 482 if (PrintTenuringDistribution) { 483 gclog_or_tty->cr(); 484 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)", 485 size_policy->calculated_survivor_size_in_bytes(), 486 _tenuring_threshold, MaxTenuringThreshold); 487 } 488 489 if (UsePerfData) { 490 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 491 counters->update_tenuring_threshold(_tenuring_threshold); 492 counters->update_survivor_size_counters(); 493 } 494 495 // Do call at minor collections? 496 // Don't check if the size_policy is ready at this 497 // level. Let the size_policy check that internally. 498 if (UseAdaptiveSizePolicy && 499 UseAdaptiveGenerationSizePolicyAtMinorCollection && 500 ((gc_cause != GCCause::_java_lang_system_gc) || 501 UseAdaptiveSizePolicyWithSystemGC)) { 502 503 // Calculate optimial free space amounts 504 assert(young_gen->max_size() > 505 young_gen->from_space()->capacity_in_bytes() + 506 young_gen->to_space()->capacity_in_bytes(), 507 "Sizes of space in young gen are out-of-bounds"); 508 size_t max_eden_size = young_gen->max_size() - 509 young_gen->from_space()->capacity_in_bytes() - 510 young_gen->to_space()->capacity_in_bytes(); 511 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 512 young_gen->eden_space()->used_in_bytes(), 513 old_gen->used_in_bytes(), 514 perm_gen->used_in_bytes(), 515 young_gen->eden_space()->capacity_in_bytes(), 516 old_gen->max_gen_size(), 517 max_eden_size, 518 false /* full gc*/, 519 gc_cause, 520 heap->collector_policy()); 521 522 } 523 // Resize the young generation at every collection 524 // even if new sizes have not been calculated. This is 525 // to allow resizes that may have been inhibited by the 526 // relative location of the "to" and "from" spaces. 527 528 // Resizing the old gen at minor collects can cause increases 529 // that don't feed back to the generation sizing policy until 530 // a major collection. Don't resize the old gen here. 531 532 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 533 size_policy->calculated_survivor_size_in_bytes()); 534 535 if (PrintAdaptiveSizePolicy) { 536 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 537 heap->total_collections()); 538 } 539 } 540 541 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 542 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 543 // Also update() will case adaptive NUMA chunk resizing. 544 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 545 young_gen->eden_space()->update(); 546 547 heap->gc_policy_counters()->update_counters(); 548 549 heap->resize_all_tlabs(); 550 551 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 552 } 553 554 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 555 556 NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); 557 558 // Re-verify object start arrays 559 if (VerifyObjectStartArray && 560 VerifyAfterGC) { 561 old_gen->verify_object_start_array(); 562 perm_gen->verify_object_start_array(); 563 } 564 565 // Verify all old -> young cards are now precise 566 if (VerifyRememberedSets) { 567 // Precise verification will give false positives. Until this is fixed, 568 // use imprecise verification. 569 // CardTableExtension::verify_all_young_refs_precise(); 570 CardTableExtension::verify_all_young_refs_imprecise(); 571 } 572 573 if (TraceGen0Time) accumulated_time()->stop(); 574 575 if (PrintGC) { 576 if (PrintGCDetails) { 577 // Don't print a GC timestamp here. This is after the GC so 578 // would be confusing. 579 young_gen->print_used_change(young_gen_used_before); 580 } 581 heap->print_heap_change(prev_used); 582 } 583 584 // Track memory usage and detect low memory 585 MemoryService::track_memory_usage(); 586 heap->update_counters(); 587 } 588 589 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 590 HandleMark hm; // Discard invalid handles created during verification 591 gclog_or_tty->print(" VerifyAfterGC:"); 592 Universe::verify(false); 593 } 594 595 if (PrintHeapAtGC) { 596 Universe::print_heap_after_gc(); 597 } 598 599 if (ZapUnusedHeapArea) { 600 young_gen->eden_space()->check_mangled_unused_area_complete(); 601 young_gen->from_space()->check_mangled_unused_area_complete(); 602 young_gen->to_space()->check_mangled_unused_area_complete(); 603 } 604 605 scavenge_exit.update(); 606 607 if (PrintGCTaskTimeStamps) { 608 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, 609 scavenge_entry.ticks(), scavenge_midpoint.ticks(), 610 scavenge_exit.ticks()); 611 gc_task_manager()->print_task_time_stamps(); 612 } 613 614 #ifdef TRACESPINNING 615 ParallelTaskTerminator::print_termination_counts(); 616 #endif 617 618 return !promotion_failure_occurred; 619 } 620 621 // This method iterates over all objects in the young generation, 622 // unforwarding markOops. It then restores any preserved mark oops, 623 // and clears the _preserved_mark_stack. 624 void PSScavenge::clean_up_failed_promotion() { 625 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 626 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 627 assert(promotion_failed(), "Sanity"); 628 629 PSYoungGen* young_gen = heap->young_gen(); 630 631 { 632 ResourceMark rm; 633 634 // Unforward all pointers in the young gen. 635 PSPromotionFailedClosure unforward_closure; 636 young_gen->object_iterate(&unforward_closure); 637 638 if (PrintGC && Verbose) { 639 gclog_or_tty->print_cr("Restoring %d marks", 640 _preserved_oop_stack->length()); 641 } 642 643 // Restore any saved marks. 644 for (int i=0; i < _preserved_oop_stack->length(); i++) { 645 oop obj = _preserved_oop_stack->at(i); 646 markOop mark = _preserved_mark_stack->at(i); 647 obj->set_mark(mark); 648 } 649 650 // Deallocate the preserved mark and oop stacks. 651 // The stacks were allocated as CHeap objects, so 652 // we must call delete to prevent mem leaks. 653 delete _preserved_mark_stack; 654 _preserved_mark_stack = NULL; 655 delete _preserved_oop_stack; 656 _preserved_oop_stack = NULL; 657 } 658 659 // Reset the PromotionFailureALot counters. 660 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 661 } 662 663 // This method is called whenever an attempt to promote an object 664 // fails. Some markOops will need preserving, some will not. Note 665 // that the entire eden is traversed after a failed promotion, with 666 // all forwarded headers replaced by the default markOop. This means 667 // it is not neccessary to preserve most markOops. 668 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { 669 if (_preserved_mark_stack == NULL) { 670 ThreadCritical tc; // Lock and retest 671 if (_preserved_mark_stack == NULL) { 672 assert(_preserved_oop_stack == NULL, "Sanity"); 673 _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); 674 _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); 675 } 676 } 677 678 // Because we must hold the ThreadCritical lock before using 679 // the stacks, we should be safe from observing partial allocations, 680 // which are also guarded by the ThreadCritical lock. 681 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { 682 ThreadCritical tc; 683 _preserved_oop_stack->push(obj); 684 _preserved_mark_stack->push(obj_mark); 685 } 686 } 687 688 bool PSScavenge::should_attempt_scavenge() { 689 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 690 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 691 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 692 693 if (UsePerfData) { 694 counters->update_scavenge_skipped(not_skipped); 695 } 696 697 PSYoungGen* young_gen = heap->young_gen(); 698 PSOldGen* old_gen = heap->old_gen(); 699 700 if (!ScavengeWithObjectsInToSpace) { 701 // Do not attempt to promote unless to_space is empty 702 if (!young_gen->to_space()->is_empty()) { 703 _consecutive_skipped_scavenges++; 704 if (UsePerfData) { 705 counters->update_scavenge_skipped(to_space_not_empty); 706 } 707 return false; 708 } 709 } 710 711 // Test to see if the scavenge will likely fail. 712 PSAdaptiveSizePolicy* policy = heap->size_policy(); 713 714 // A similar test is done in the policy's should_full_GC(). If this is 715 // changed, decide if that test should also be changed. 716 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 717 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 718 bool result = promotion_estimate < old_gen->free_in_bytes(); 719 720 if (PrintGCDetails && Verbose) { 721 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); 722 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT 723 " padded_average_promoted " SIZE_FORMAT 724 " free in old gen " SIZE_FORMAT, 725 (size_t) policy->average_promoted_in_bytes(), 726 (size_t) policy->padded_average_promoted_in_bytes(), 727 old_gen->free_in_bytes()); 728 if (young_gen->used_in_bytes() < 729 (size_t) policy->padded_average_promoted_in_bytes()) { 730 gclog_or_tty->print_cr(" padded_promoted_average is greater" 731 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); 732 } 733 } 734 735 if (result) { 736 _consecutive_skipped_scavenges = 0; 737 } else { 738 _consecutive_skipped_scavenges++; 739 if (UsePerfData) { 740 counters->update_scavenge_skipped(promoted_too_large); 741 } 742 } 743 return result; 744 } 745 746 // Used to add tasks 747 GCTaskManager* const PSScavenge::gc_task_manager() { 748 assert(ParallelScavengeHeap::gc_task_manager() != NULL, 749 "shouldn't return NULL"); 750 return ParallelScavengeHeap::gc_task_manager(); 751 } 752 753 void PSScavenge::initialize() { 754 // Arguments must have been parsed 755 756 if (AlwaysTenure) { 757 _tenuring_threshold = 0; 758 } else if (NeverTenure) { 759 _tenuring_threshold = markOopDesc::max_age + 1; 760 } else { 761 // We want to smooth out our startup times for the AdaptiveSizePolicy 762 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 763 MaxTenuringThreshold; 764 } 765 766 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 767 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 768 769 PSYoungGen* young_gen = heap->young_gen(); 770 PSOldGen* old_gen = heap->old_gen(); 771 PSPermGen* perm_gen = heap->perm_gen(); 772 773 // Set boundary between young_gen and old_gen 774 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(), 775 "perm above old"); 776 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 777 "old above young"); 778 _young_generation_boundary = young_gen->eden_space()->bottom(); 779 780 // Initialize ref handling object for scavenging. 781 MemRegion mr = young_gen->reserved(); 782 _ref_processor = ReferenceProcessor::create_ref_processor( 783 mr, // span 784 true, // atomic_discovery 785 true, // mt_discovery 786 NULL, // is_alive_non_header 787 ParallelGCThreads, 788 ParallelRefProcEnabled); 789 790 // Cache the cardtable 791 BarrierSet* bs = Universe::heap()->barrier_set(); 792 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 793 _card_table = (CardTableExtension*)bs; 794 795 _counters = new CollectorCounters("PSScavenge", 0); 796 }