1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/symbolTable.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc_implementation/parallelScavenge/generationSizer.hpp" 30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" 32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" 33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" 34 #include "gc_implementation/parallelScavenge/psOldGen.hpp" 35 #include "gc_implementation/parallelScavenge/psPermGen.hpp" 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp" 37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" 38 #include "gc_implementation/shared/isGCActiveMark.hpp" 39 #include "gc_implementation/shared/spaceDecorator.hpp" 40 #include "gc_interface/gcCause.hpp" 41 #include "memory/gcLocker.inline.hpp" 42 #include "memory/referencePolicy.hpp" 43 #include "memory/referenceProcessor.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/biasedLocking.hpp" 46 #include "runtime/fprofiler.hpp" 47 #include "runtime/safepoint.hpp" 48 #include "runtime/vmThread.hpp" 49 #include "services/management.hpp" 50 #include "services/memoryService.hpp" 51 #include "utilities/events.hpp" 52 #include "utilities/stack.inline.hpp" 53 54 elapsedTimer PSMarkSweep::_accumulated_time; 55 unsigned int PSMarkSweep::_total_invocations = 0; 56 jlong PSMarkSweep::_time_of_last_gc = 0; 57 CollectorCounters* PSMarkSweep::_counters = NULL; 58 59 void PSMarkSweep::initialize() { 60 MemRegion mr = Universe::heap()->reserved_region(); 61 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc 62 _counters = new CollectorCounters("PSMarkSweep", 1); 63 } 64 65 // This method contains all heap specific policy for invoking mark sweep. 66 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact 67 // the heap. It will do nothing further. If we need to bail out for policy 68 // reasons, scavenge before full gc, or any other specialized behavior, it 69 // needs to be added here. 70 // 71 // Note that this method should only be called from the vm_thread while 72 // at a safepoint! 73 // 74 // Note that the all_soft_refs_clear flag in the collector policy 75 // may be true because this method can be called without intervening 76 // activity. For example when the heap space is tight and full measure 77 // are being taken to free space. 78 79 void PSMarkSweep::invoke(bool maximum_heap_compaction) { 80 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 81 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 82 assert(!Universe::heap()->is_gc_active(), "not reentrant"); 83 84 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 85 GCCause::Cause gc_cause = heap->gc_cause(); 86 PSAdaptiveSizePolicy* policy = heap->size_policy(); 87 IsGCActiveMark mark; 88 89 if (ScavengeBeforeFullGC) { 90 PSScavenge::invoke_no_policy(); 91 } 92 93 const bool clear_all_soft_refs = 94 heap->collector_policy()->should_clear_all_soft_refs(); 95 96 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount; 97 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); 98 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); 99 } 100 101 // This method contains no policy. You should probably 102 // be calling invoke() instead. 103 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { 104 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 105 assert(ref_processor() != NULL, "Sanity"); 106 107 if (GC_locker::check_active_before_gc()) { 108 return false; 109 } 110 111 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 112 GCCause::Cause gc_cause = heap->gc_cause(); 113 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 114 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 115 116 // The scope of casr should end after code that can change 117 // CollectorPolicy::_should_clear_all_soft_refs. 118 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); 119 120 PSYoungGen* young_gen = heap->young_gen(); 121 PSOldGen* old_gen = heap->old_gen(); 122 PSPermGen* perm_gen = heap->perm_gen(); 123 124 // Increment the invocation count 125 heap->increment_total_collections(true /* full */); 126 127 // Save information needed to minimize mangling 128 heap->record_gen_tops_before_GC(); 129 130 // We need to track unique mark sweep invocations as well. 131 _total_invocations++; 132 133 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); 134 135 heap->print_heap_before_gc(); 136 137 // Fill in TLABs 138 heap->accumulate_statistics_all_tlabs(); 139 heap->ensure_parsability(true); // retire TLABs 140 141 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 142 HandleMark hm; // Discard invalid handles created during verification 143 gclog_or_tty->print(" VerifyBeforeGC:"); 144 Universe::verify(true); 145 } 146 147 // Verify object start arrays 148 if (VerifyObjectStartArray && 149 VerifyBeforeGC) { 150 old_gen->verify_object_start_array(); 151 perm_gen->verify_object_start_array(); 152 } 153 154 heap->pre_full_gc_dump(); 155 156 // Filled in below to track the state of the young gen after the collection. 157 bool eden_empty; 158 bool survivors_empty; 159 bool young_gen_empty; 160 161 { 162 HandleMark hm; 163 164 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 165 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 166 TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); 167 TraceCollectorStats tcs(counters()); 168 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); 169 170 if (TraceGen1Time) accumulated_time()->start(); 171 172 // Let the size policy know we're starting 173 size_policy->major_collection_begin(); 174 175 // When collecting the permanent generation methodOops may be moving, 176 // so we either have to flush all bcp data or convert it into bci. 177 CodeCache::gc_prologue(); 178 Threads::gc_prologue(); 179 BiasedLocking::preserve_marks(); 180 181 // Capture heap size before collection for printing. 182 size_t prev_used = heap->used(); 183 184 // Capture perm gen size before collection for sizing. 185 size_t perm_gen_prev_used = perm_gen->used_in_bytes(); 186 187 // For PrintGCDetails 188 size_t old_gen_prev_used = old_gen->used_in_bytes(); 189 size_t young_gen_prev_used = young_gen->used_in_bytes(); 190 191 allocate_stacks(); 192 193 COMPILER2_PRESENT(DerivedPointerTable::clear()); 194 195 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); 196 ref_processor()->setup_policy(clear_all_softrefs); 197 198 mark_sweep_phase1(clear_all_softrefs); 199 200 mark_sweep_phase2(); 201 202 // Don't add any more derived pointers during phase3 203 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); 204 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); 205 206 mark_sweep_phase3(); 207 208 mark_sweep_phase4(); 209 210 restore_marks(); 211 212 deallocate_stacks(); 213 214 if (ZapUnusedHeapArea) { 215 // Do a complete mangle (top to end) because the usage for 216 // scratch does not maintain a top pointer. 217 young_gen->to_space()->mangle_unused_area_complete(); 218 } 219 220 eden_empty = young_gen->eden_space()->is_empty(); 221 if (!eden_empty) { 222 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); 223 } 224 225 // Update heap occupancy information which is used as 226 // input to soft ref clearing policy at the next gc. 227 Universe::update_heap_info_at_gc(); 228 229 survivors_empty = young_gen->from_space()->is_empty() && 230 young_gen->to_space()->is_empty(); 231 young_gen_empty = eden_empty && survivors_empty; 232 233 BarrierSet* bs = heap->barrier_set(); 234 if (bs->is_a(BarrierSet::ModRef)) { 235 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; 236 MemRegion old_mr = heap->old_gen()->reserved(); 237 MemRegion perm_mr = heap->perm_gen()->reserved(); 238 assert(perm_mr.end() <= old_mr.start(), "Generations out of order"); 239 240 if (young_gen_empty) { 241 modBS->clear(MemRegion(perm_mr.start(), old_mr.end())); 242 } else { 243 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end())); 244 } 245 } 246 247 BiasedLocking::restore_marks(); 248 Threads::gc_epilogue(); 249 CodeCache::gc_epilogue(); 250 JvmtiExport::gc_epilogue(); 251 252 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 253 254 ref_processor()->enqueue_discovered_references(NULL); 255 256 // Update time of last GC 257 reset_millis_since_last_gc(); 258 259 // Let the size policy know we're done 260 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); 261 262 if (UseAdaptiveSizePolicy) { 263 264 if (PrintAdaptiveSizePolicy) { 265 gclog_or_tty->print("AdaptiveSizeStart: "); 266 gclog_or_tty->stamp(); 267 gclog_or_tty->print_cr(" collection: %d ", 268 heap->total_collections()); 269 if (Verbose) { 270 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d" 271 " perm_gen_capacity: %d ", 272 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), 273 perm_gen->capacity_in_bytes()); 274 } 275 } 276 277 // Don't check if the size_policy is ready here. Let 278 // the size_policy check that internally. 279 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && 280 ((gc_cause != GCCause::_java_lang_system_gc) || 281 UseAdaptiveSizePolicyWithSystemGC)) { 282 // Calculate optimal free space amounts 283 assert(young_gen->max_size() > 284 young_gen->from_space()->capacity_in_bytes() + 285 young_gen->to_space()->capacity_in_bytes(), 286 "Sizes of space in young gen are out-of-bounds"); 287 size_t max_eden_size = young_gen->max_size() - 288 young_gen->from_space()->capacity_in_bytes() - 289 young_gen->to_space()->capacity_in_bytes(); 290 size_policy->compute_generation_free_space(young_gen->used_in_bytes(), 291 young_gen->eden_space()->used_in_bytes(), 292 old_gen->used_in_bytes(), 293 perm_gen->used_in_bytes(), 294 young_gen->eden_space()->capacity_in_bytes(), 295 old_gen->max_gen_size(), 296 max_eden_size, 297 true /* full gc*/, 298 gc_cause, 299 heap->collector_policy()); 300 301 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); 302 303 // Don't resize the young generation at an major collection. A 304 // desired young generation size may have been calculated but 305 // resizing the young generation complicates the code because the 306 // resizing of the old generation may have moved the boundary 307 // between the young generation and the old generation. Let the 308 // young generation resizing happen at the minor collections. 309 } 310 if (PrintAdaptiveSizePolicy) { 311 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", 312 heap->total_collections()); 313 } 314 } 315 316 if (UsePerfData) { 317 heap->gc_policy_counters()->update_counters(); 318 heap->gc_policy_counters()->update_old_capacity( 319 old_gen->capacity_in_bytes()); 320 heap->gc_policy_counters()->update_young_capacity( 321 young_gen->capacity_in_bytes()); 322 } 323 324 heap->resize_all_tlabs(); 325 326 // We collected the perm gen, so we'll resize it here. 327 perm_gen->compute_new_size(perm_gen_prev_used); 328 329 if (TraceGen1Time) accumulated_time()->stop(); 330 331 if (PrintGC) { 332 if (PrintGCDetails) { 333 // Don't print a GC timestamp here. This is after the GC so 334 // would be confusing. 335 young_gen->print_used_change(young_gen_prev_used); 336 old_gen->print_used_change(old_gen_prev_used); 337 } 338 heap->print_heap_change(prev_used); 339 // Do perm gen after heap becase prev_used does 340 // not include the perm gen (done this way in the other 341 // collectors). 342 if (PrintGCDetails) { 343 perm_gen->print_used_change(perm_gen_prev_used); 344 } 345 } 346 347 // Track memory usage and detect low memory 348 MemoryService::track_memory_usage(); 349 heap->update_counters(); 350 } 351 352 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 353 HandleMark hm; // Discard invalid handles created during verification 354 gclog_or_tty->print(" VerifyAfterGC:"); 355 Universe::verify(false); 356 } 357 358 // Re-verify object start arrays 359 if (VerifyObjectStartArray && 360 VerifyAfterGC) { 361 old_gen->verify_object_start_array(); 362 perm_gen->verify_object_start_array(); 363 } 364 365 if (ZapUnusedHeapArea) { 366 old_gen->object_space()->check_mangled_unused_area_complete(); 367 perm_gen->object_space()->check_mangled_unused_area_complete(); 368 } 369 370 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); 371 372 heap->print_heap_after_gc(); 373 374 heap->post_full_gc_dump(); 375 376 #ifdef TRACESPINNING 377 ParallelTaskTerminator::print_termination_counts(); 378 #endif 379 380 return true; 381 } 382 383 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, 384 PSYoungGen* young_gen, 385 PSOldGen* old_gen) { 386 MutableSpace* const eden_space = young_gen->eden_space(); 387 assert(!eden_space->is_empty(), "eden must be non-empty"); 388 assert(young_gen->virtual_space()->alignment() == 389 old_gen->virtual_space()->alignment(), "alignments do not match"); 390 391 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { 392 return false; 393 } 394 395 // Both generations must be completely committed. 396 if (young_gen->virtual_space()->uncommitted_size() != 0) { 397 return false; 398 } 399 if (old_gen->virtual_space()->uncommitted_size() != 0) { 400 return false; 401 } 402 403 // Figure out how much to take from eden. Include the average amount promoted 404 // in the total; otherwise the next young gen GC will simply bail out to a 405 // full GC. 406 const size_t alignment = old_gen->virtual_space()->alignment(); 407 const size_t eden_used = eden_space->used_in_bytes(); 408 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); 409 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); 410 const size_t eden_capacity = eden_space->capacity_in_bytes(); 411 412 if (absorb_size >= eden_capacity) { 413 return false; // Must leave some space in eden. 414 } 415 416 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; 417 if (new_young_size < young_gen->min_gen_size()) { 418 return false; // Respect young gen minimum size. 419 } 420 421 if (TraceAdaptiveGCBoundary && Verbose) { 422 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " 423 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " 424 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " 425 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", 426 absorb_size / K, 427 eden_capacity / K, (eden_capacity - absorb_size) / K, 428 young_gen->from_space()->used_in_bytes() / K, 429 young_gen->to_space()->used_in_bytes() / K, 430 young_gen->capacity_in_bytes() / K, new_young_size / K); 431 } 432 433 // Fill the unused part of the old gen. 434 MutableSpace* const old_space = old_gen->object_space(); 435 HeapWord* const unused_start = old_space->top(); 436 size_t const unused_words = pointer_delta(old_space->end(), unused_start); 437 438 if (unused_words > 0) { 439 if (unused_words < CollectedHeap::min_fill_size()) { 440 return false; // If the old gen cannot be filled, must give up. 441 } 442 CollectedHeap::fill_with_objects(unused_start, unused_words); 443 } 444 445 // Take the live data from eden and set both top and end in the old gen to 446 // eden top. (Need to set end because reset_after_change() mangles the region 447 // from end to virtual_space->high() in debug builds). 448 HeapWord* const new_top = eden_space->top(); 449 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), 450 absorb_size); 451 young_gen->reset_after_change(); 452 old_space->set_top(new_top); 453 old_space->set_end(new_top); 454 old_gen->reset_after_change(); 455 456 // Update the object start array for the filler object and the data from eden. 457 ObjectStartArray* const start_array = old_gen->start_array(); 458 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { 459 start_array->allocate_block(p); 460 } 461 462 // Could update the promoted average here, but it is not typically updated at 463 // full GCs and the value to use is unclear. Something like 464 // 465 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. 466 467 size_policy->set_bytes_absorbed_from_eden(absorb_size); 468 return true; 469 } 470 471 void PSMarkSweep::allocate_stacks() { 472 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 473 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 474 475 PSYoungGen* young_gen = heap->young_gen(); 476 477 MutableSpace* to_space = young_gen->to_space(); 478 _preserved_marks = (PreservedMark*)to_space->top(); 479 _preserved_count = 0; 480 481 // We want to calculate the size in bytes first. 482 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); 483 // Now divide by the size of a PreservedMark 484 _preserved_count_max /= sizeof(PreservedMark); 485 } 486 487 488 void PSMarkSweep::deallocate_stacks() { 489 _preserved_mark_stack.clear(true); 490 _preserved_oop_stack.clear(true); 491 _marking_stack.clear(); 492 _objarray_stack.clear(true); 493 _revisit_klass_stack.clear(true); 494 _revisit_mdo_stack.clear(true); 495 } 496 497 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { 498 // Recursively traverse all live objects and mark them 499 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty); 500 trace(" 1"); 501 502 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 503 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 504 505 // General strong roots. 506 { 507 ParallelScavengeHeap::ParStrongRootsScope psrs; 508 Universe::oops_do(mark_and_push_closure()); 509 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles 510 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true); 511 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob); 512 ObjectSynchronizer::oops_do(mark_and_push_closure()); 513 FlatProfiler::oops_do(mark_and_push_closure()); 514 Management::oops_do(mark_and_push_closure()); 515 JvmtiExport::oops_do(mark_and_push_closure()); 516 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); 517 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. 518 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); 519 } 520 521 // Flush marking stack. 522 follow_stack(); 523 524 // Process reference objects found during marking 525 { 526 ref_processor()->setup_policy(clear_all_softrefs); 527 ref_processor()->process_discovered_references( 528 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); 529 } 530 531 // Follow system dictionary roots and unload classes 532 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); 533 534 // Follow code cache roots 535 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(), 536 purged_class); 537 follow_stack(); // Flush marking stack 538 539 // Update subklass/sibling/implementor links of live klasses 540 follow_weak_klass_links(); 541 assert(_marking_stack.is_empty(), "just drained"); 542 543 // Visit memoized mdo's and clear unmarked weak refs 544 follow_mdo_weak_refs(); 545 assert(_marking_stack.is_empty(), "just drained"); 546 547 // Visit interned string tables and delete unmarked oops 548 StringTable::unlink(is_alive_closure()); 549 // Clean up unreferenced symbols in symbol table. 550 SymbolTable::unlink(); 551 552 assert(_marking_stack.is_empty(), "stack should be empty by now"); 553 } 554 555 556 void PSMarkSweep::mark_sweep_phase2() { 557 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty); 558 trace("2"); 559 560 // Now all live objects are marked, compute the new object addresses. 561 562 // It is imperative that we traverse perm_gen LAST. If dead space is 563 // allowed a range of dead object may get overwritten by a dead int 564 // array. If perm_gen is not traversed last a klassOop may get 565 // overwritten. This is fine since it is dead, but if the class has dead 566 // instances we have to skip them, and in order to find their size we 567 // need the klassOop! 568 // 569 // It is not required that we traverse spaces in the same order in 570 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops 571 // tracking expects us to do so. See comment under phase4. 572 573 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 574 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 575 576 PSOldGen* old_gen = heap->old_gen(); 577 PSPermGen* perm_gen = heap->perm_gen(); 578 579 // Begin compacting into the old gen 580 PSMarkSweepDecorator::set_destination_decorator_tenured(); 581 582 // This will also compact the young gen spaces. 583 old_gen->precompact(); 584 585 // Compact the perm gen into the perm gen 586 PSMarkSweepDecorator::set_destination_decorator_perm_gen(); 587 588 perm_gen->precompact(); 589 } 590 591 // This should be moved to the shared markSweep code! 592 class PSAlwaysTrueClosure: public BoolObjectClosure { 593 public: 594 void do_object(oop p) { ShouldNotReachHere(); } 595 bool do_object_b(oop p) { return true; } 596 }; 597 static PSAlwaysTrueClosure always_true; 598 599 void PSMarkSweep::mark_sweep_phase3() { 600 // Adjust the pointers to reflect the new locations 601 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty); 602 trace("3"); 603 604 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 605 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 606 607 PSYoungGen* young_gen = heap->young_gen(); 608 PSOldGen* old_gen = heap->old_gen(); 609 PSPermGen* perm_gen = heap->perm_gen(); 610 611 // General strong roots. 612 Universe::oops_do(adjust_root_pointer_closure()); 613 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles 614 Threads::oops_do(adjust_root_pointer_closure(), NULL); 615 ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); 616 FlatProfiler::oops_do(adjust_root_pointer_closure()); 617 Management::oops_do(adjust_root_pointer_closure()); 618 JvmtiExport::oops_do(adjust_root_pointer_closure()); 619 // SO_AllClasses 620 SystemDictionary::oops_do(adjust_root_pointer_closure()); 621 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); 622 623 // Now adjust pointers in remaining weak roots. (All of which should 624 // have been cleared if they pointed to non-surviving objects.) 625 // Global (weak) JNI handles 626 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); 627 628 CodeCache::oops_do(adjust_pointer_closure()); 629 StringTable::oops_do(adjust_root_pointer_closure()); 630 ref_processor()->weak_oops_do(adjust_root_pointer_closure()); 631 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); 632 633 adjust_marks(); 634 635 young_gen->adjust_pointers(); 636 old_gen->adjust_pointers(); 637 perm_gen->adjust_pointers(); 638 } 639 640 void PSMarkSweep::mark_sweep_phase4() { 641 EventMark m("4 compact heap"); 642 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty); 643 trace("4"); 644 645 // All pointers are now adjusted, move objects accordingly 646 647 // It is imperative that we traverse perm_gen first in phase4. All 648 // classes must be allocated earlier than their instances, and traversing 649 // perm_gen first makes sure that all klassOops have moved to their new 650 // location before any instance does a dispatch through it's klass! 651 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 652 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 653 654 PSYoungGen* young_gen = heap->young_gen(); 655 PSOldGen* old_gen = heap->old_gen(); 656 PSPermGen* perm_gen = heap->perm_gen(); 657 658 perm_gen->compact(); 659 old_gen->compact(); 660 young_gen->compact(); 661 } 662 663 jlong PSMarkSweep::millis_since_last_gc() { 664 // We need a monotonically non-deccreasing time in ms but 665 // os::javaTimeMillis() does not guarantee monotonicity. 666 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 667 jlong ret_val = now - _time_of_last_gc; 668 // XXX See note in genCollectedHeap::millis_since_last_gc(). 669 if (ret_val < 0) { 670 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);) 671 return 0; 672 } 673 return ret_val; 674 } 675 676 void PSMarkSweep::reset_millis_since_last_gc() { 677 // We need a monotonically non-deccreasing time in ms but 678 // os::javaTimeMillis() does not guarantee monotonicity. 679 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 680 }