1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)generation.cpp 1.245 07/05/05 17:05:51 JVM" 3 #endif 4 /* 5 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 # include "incls/_precompiled.incl" 29 # include "incls/_generation.cpp.incl" 30 31 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : 32 _level(level), 33 _ref_processor(NULL) { 34 if (!_virtual_space.initialize(rs, initial_size)) { 35 vm_exit_during_initialization("Could not reserve enough space for " 36 "object heap"); 37 } 38 _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), 39 (HeapWord*)_virtual_space.high_boundary()); 40 } 41 42 GenerationSpec* Generation::spec() { 43 GenCollectedHeap* gch = GenCollectedHeap::heap(); 44 assert(0 <= level() && level() < gch->_n_gens, "Bad gen level"); 45 return gch->_gen_specs[level()]; 46 } 47 48 size_t Generation::max_capacity() const { 49 return reserved().byte_size(); 50 } 51 52 void Generation::print_heap_change(size_t prev_used) const { 53 if (PrintGCDetails && Verbose) { 54 gclog_or_tty->print(" " SIZE_FORMAT 55 "->" SIZE_FORMAT 56 "(" SIZE_FORMAT ")", 57 prev_used, used(), capacity()); 58 } else { 59 gclog_or_tty->print(" " SIZE_FORMAT "K" 60 "->" SIZE_FORMAT "K" 61 "(" SIZE_FORMAT "K)", 62 prev_used / K, used() / K, capacity() / K); 63 } 64 } 65 66 // By default we get a single threaded default reference processor; 67 // generations needing multi-threaded refs discovery override this method. 68 void Generation::ref_processor_init() { 69 assert(_ref_processor == NULL, "a reference processor already exists"); 70 assert(!_reserved.is_empty(), "empty generation?"); 71 _ref_processor = 72 new ReferenceProcessor(_reserved, // span 73 refs_discovery_is_atomic(), // atomic_discovery 74 refs_discovery_is_mt()); // mt_discovery 75 if (_ref_processor == NULL) { 76 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 77 } 78 } 79 80 void Generation::print() const { print_on(tty); } 81 82 void Generation::print_on(outputStream* st) const { 83 st->print(" %-20s", name()); 84 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 85 capacity()/K, used()/K); 86 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 87 _virtual_space.low_boundary(), 88 _virtual_space.high(), 89 _virtual_space.high_boundary()); 90 } 91 92 void Generation::print_summary_info() { print_summary_info_on(tty); } 93 94 void Generation::print_summary_info_on(outputStream* st) { 95 StatRecord* sr = stat_record(); 96 double time = sr->accumulated_time.seconds(); 97 st->print_cr("[Accumulated GC generation %d time %3.7f secs, " 98 "%d GC's, avg GC time %3.7f]", 99 level(), time, sr->invocations, 100 sr->invocations > 0 ? time / sr->invocations : 0.0); 101 } 102 103 // Utility iterator classes 104 105 class GenerationIsInReservedClosure : public SpaceClosure { 106 public: 107 const void* _p; 108 Space* sp; 109 virtual void do_space(Space* s) { 110 if (sp == NULL) { 111 if (s->is_in_reserved(_p)) sp = s; 112 } 113 } 114 GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {} 115 }; 116 117 class GenerationIsInClosure : public SpaceClosure { 118 public: 119 const void* _p; 120 Space* sp; 121 virtual void do_space(Space* s) { 122 if (sp == NULL) { 123 if (s->is_in(_p)) sp = s; 124 } 125 } 126 GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {} 127 }; 128 129 bool Generation::is_in(const void* p) const { 130 GenerationIsInClosure blk(p); 131 ((Generation*)this)->space_iterate(&blk); 132 return blk.sp != NULL; 133 } 134 135 DefNewGeneration* Generation::as_DefNewGeneration() { 136 assert((kind() == Generation::DefNew) || 137 (kind() == Generation::ParNew) || 138 (kind() == Generation::ASParNew), 139 "Wrong youngest generation type"); 140 return (DefNewGeneration*) this; 141 } 142 143 Generation* Generation::next_gen() const { 144 GenCollectedHeap* gch = GenCollectedHeap::heap(); 145 int next = level() + 1; 146 if (next < gch->_n_gens) { 147 return gch->_gens[next]; 148 } else { 149 return NULL; 150 } 151 } 152 153 size_t Generation::max_contiguous_available() const { 154 // The largest number of contiguous free words in this or any higher generation. 155 size_t max = 0; 156 for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { 157 size_t avail = gen->contiguous_available(); 158 if (avail > max) { 159 max = avail; 160 } 161 } 162 return max; 163 } 164 165 bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes, 166 bool not_used) const { 167 if (PrintGC && Verbose) { 168 gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe" 169 " contiguous_available: " SIZE_FORMAT 170 " promotion_in_bytes: " SIZE_FORMAT, 171 max_contiguous_available(), promotion_in_bytes); 172 } 173 return max_contiguous_available() >= promotion_in_bytes; 174 } 175 176 // Ignores "ref" and calls allocate(). 177 oop Generation::promote(oop obj, size_t obj_size, oop* ref) { 178 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); 179 180 #ifndef PRODUCT 181 if (Universe::heap()->promotion_should_fail()) { 182 return NULL; 183 } 184 #endif // #ifndef PRODUCT 185 186 HeapWord* result = allocate(obj_size, false); 187 if (result != NULL) { 188 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); 189 return oop(result); 190 } else { 191 GenCollectedHeap* gch = GenCollectedHeap::heap(); 192 return gch->handle_failed_promotion(this, obj, obj_size, ref); 193 } 194 } 195 196 oop Generation::par_promote(int thread_num, 197 oop obj, markOop m, size_t word_sz) { 198 // Could do a bad general impl here that gets a lock. But no. 199 ShouldNotCallThis(); 200 return NULL; 201 } 202 203 void Generation::par_promote_alloc_undo(int thread_num, 204 HeapWord* obj, size_t word_sz) { 205 // Could do a bad general impl here that gets a lock. But no. 206 guarantee(false, "No good general implementation."); 207 } 208 209 Space* Generation::space_containing(const void* p) const { 210 GenerationIsInReservedClosure blk(p); 211 // Cast away const 212 ((Generation*)this)->space_iterate(&blk); 213 return blk.sp; 214 } 215 216 // Some of these are mediocre general implementations. Should be 217 // overridden to get better performance. 218 219 class GenerationBlockStartClosure : public SpaceClosure { 220 public: 221 const void* _p; 222 HeapWord* _start; 223 virtual void do_space(Space* s) { 224 if (_start == NULL && s->is_in_reserved(_p)) { 225 _start = s->block_start(_p); 226 } 227 } 228 GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; } 229 }; 230 231 HeapWord* Generation::block_start(const void* p) const { 232 GenerationBlockStartClosure blk(p); 233 // Cast away const 234 ((Generation*)this)->space_iterate(&blk); 235 return blk._start; 236 } 237 238 class GenerationBlockSizeClosure : public SpaceClosure { 239 public: 240 const HeapWord* _p; 241 size_t size; 242 virtual void do_space(Space* s) { 243 if (size == 0 && s->is_in_reserved(_p)) { 244 size = s->block_size(_p); 245 } 246 } 247 GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; } 248 }; 249 250 size_t Generation::block_size(const HeapWord* p) const { 251 GenerationBlockSizeClosure blk(p); 252 // Cast away const 253 ((Generation*)this)->space_iterate(&blk); 254 assert(blk.size > 0, "seems reasonable"); 255 return blk.size; 256 } 257 258 class GenerationBlockIsObjClosure : public SpaceClosure { 259 public: 260 const HeapWord* _p; 261 bool is_obj; 262 virtual void do_space(Space* s) { 263 if (!is_obj && s->is_in_reserved(_p)) { 264 is_obj |= s->block_is_obj(_p); 265 } 266 } 267 GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; } 268 }; 269 270 bool Generation::block_is_obj(const HeapWord* p) const { 271 GenerationBlockIsObjClosure blk(p); 272 // Cast away const 273 ((Generation*)this)->space_iterate(&blk); 274 return blk.is_obj; 275 } 276 277 class GenerationOopIterateClosure : public SpaceClosure { 278 public: 279 OopClosure* cl; 280 MemRegion mr; 281 virtual void do_space(Space* s) { 282 s->oop_iterate(mr, cl); 283 } 284 GenerationOopIterateClosure(OopClosure* _cl, MemRegion _mr) : 285 cl(_cl), mr(_mr) {} 286 }; 287 288 void Generation::oop_iterate(OopClosure* cl) { 289 GenerationOopIterateClosure blk(cl, _reserved); 290 space_iterate(&blk); 291 } 292 293 void Generation::oop_iterate(MemRegion mr, OopClosure* cl) { 294 GenerationOopIterateClosure blk(cl, mr); 295 space_iterate(&blk); 296 } 297 298 void Generation::younger_refs_in_space_iterate(Space* sp, 299 OopsInGenClosure* cl) { 300 GenRemSet* rs = SharedHeap::heap()->rem_set(); 301 rs->younger_refs_in_space_iterate(sp, cl); 302 } 303 304 class GenerationObjIterateClosure : public SpaceClosure { 305 private: 306 ObjectClosure* _cl; 307 public: 308 virtual void do_space(Space* s) { 309 s->object_iterate(_cl); 310 } 311 GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} 312 }; 313 314 void Generation::object_iterate(ObjectClosure* cl) { 315 GenerationObjIterateClosure blk(cl); 316 space_iterate(&blk); 317 } 318 319 void Generation::prepare_for_compaction(CompactPoint* cp) { 320 // Generic implementation, can be specialized 321 CompactibleSpace* space = first_compaction_space(); 322 while (space != NULL) { 323 space->prepare_for_compaction(cp); 324 space = space->next_compaction_space(); 325 } 326 } 327 328 class AdjustPointersClosure: public SpaceClosure { 329 public: 330 void do_space(Space* sp) { 331 sp->adjust_pointers(); 332 } 333 }; 334 335 void Generation::adjust_pointers() { 336 // Note that this is done over all spaces, not just the compactible 337 // ones. 338 AdjustPointersClosure blk; 339 space_iterate(&blk, true); 340 } 341 342 void Generation::compact() { 343 CompactibleSpace* sp = first_compaction_space(); 344 while (sp != NULL) { 345 sp->compact(); 346 sp = sp->next_compaction_space(); 347 } 348 } 349 350 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, 351 int level, 352 GenRemSet* remset) : 353 Generation(rs, initial_byte_size, level), _rs(remset) 354 { 355 HeapWord* start = (HeapWord*)rs.base(); 356 size_t reserved_byte_size = rs.size(); 357 assert((uintptr_t(start) & 3) == 0, "bad alignment"); 358 assert((reserved_byte_size & 3) == 0, "bad alignment"); 359 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); 360 _bts = new BlockOffsetSharedArray(reserved_mr, 361 heap_word_size(initial_byte_size)); 362 MemRegion committed_mr(start, heap_word_size(initial_byte_size)); 363 _rs->resize_covered_region(committed_mr); 364 if (_bts == NULL) 365 vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); 366 367 // Verify that the start and end of this generation is the start of a card. 368 // If this wasn't true, a single card could span more than on generation, 369 // which would cause problems when we commit/uncommit memory, and when we 370 // clear and dirty cards. 371 guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); 372 if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { 373 // Don't check at the very end of the heap as we'll assert that we're probing off 374 // the end if we try. 375 guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); 376 } 377 } 378 379 380 // No young generation references, clear this generation's cards. 381 void CardGeneration::clear_remembered_set() { 382 _rs->clear(reserved()); 383 } 384 385 386 // Objects in this generation may have moved, invalidate this 387 // generation's cards. 388 void CardGeneration::invalidate_remembered_set() { 389 _rs->invalidate(used_region()); 390 } 391 392 393 // Currently nothing to do. 394 void CardGeneration::prepare_for_verify() {} 395 396 397 void OneContigSpaceCardGeneration::collect(bool full, 398 bool clear_all_soft_refs, 399 size_t size, 400 bool is_tlab) { 401 SpecializationStats::clear(); 402 // Temporarily expand the span of our ref processor, so 403 // refs discovery is over the entire heap, not just this generation 404 ReferenceProcessorSpanMutator 405 x(ref_processor(), GenCollectedHeap::heap()->reserved_region()); 406 GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); 407 SpecializationStats::print(); 408 } 409 410 HeapWord* 411 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size, 412 bool is_tlab, 413 bool parallel) { 414 assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation"); 415 if (parallel) { 416 MutexLocker x(ParGCRareEvent_lock); 417 HeapWord* result = NULL; 418 size_t byte_size = word_size * HeapWordSize; 419 while (true) { 420 expand(byte_size, _min_heap_delta_bytes); 421 if (GCExpandToAllocateDelayMillis > 0) { 422 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 423 } 424 result = _the_space->par_allocate(word_size); 425 if ( result != NULL) { 426 return result; 427 } else { 428 // If there's not enough expansion space available, give up. 429 if (_virtual_space.uncommitted_size() < byte_size) { 430 return NULL; 431 } 432 // else try again 433 } 434 } 435 } else { 436 expand(word_size*HeapWordSize, _min_heap_delta_bytes); 437 return _the_space->allocate(word_size); 438 } 439 } 440 441 void OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { 442 GCMutexLocker x(ExpandHeap_lock); 443 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); 444 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); 445 bool success = false; 446 if (aligned_expand_bytes > aligned_bytes) { 447 success = grow_by(aligned_expand_bytes); 448 } 449 if (!success) { 450 success = grow_by(aligned_bytes); 451 } 452 if (!success) { 453 grow_to_reserved(); 454 } 455 if (GC_locker::is_active()) { 456 if (PrintGC && Verbose) { 457 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); 458 } 459 } 460 } 461 462 463 void OneContigSpaceCardGeneration::shrink(size_t bytes) { 464 assert_locked_or_safepoint(ExpandHeap_lock); 465 size_t size = ReservedSpace::page_align_size_down(bytes); 466 if (size > 0) { 467 shrink_by(size); 468 } 469 } 470 471 472 size_t OneContigSpaceCardGeneration::capacity() const { 473 return _the_space->capacity(); 474 } 475 476 477 size_t OneContigSpaceCardGeneration::used() const { 478 return _the_space->used(); 479 } 480 481 482 size_t OneContigSpaceCardGeneration::free() const { 483 return _the_space->free(); 484 } 485 486 MemRegion OneContigSpaceCardGeneration::used_region() const { 487 return the_space()->used_region(); 488 } 489 490 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const { 491 return _the_space->free(); 492 } 493 494 size_t OneContigSpaceCardGeneration::contiguous_available() const { 495 return _the_space->free() + _virtual_space.uncommitted_size(); 496 } 497 498 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) { 499 assert_locked_or_safepoint(ExpandHeap_lock); 500 bool result = _virtual_space.expand_by(bytes); 501 if (result) { 502 size_t new_word_size = 503 heap_word_size(_virtual_space.committed_size()); 504 MemRegion mr(_the_space->bottom(), new_word_size); 505 // Expand card table 506 Universe::heap()->barrier_set()->resize_covered_region(mr); 507 // Expand shared block offset array 508 _bts->resize(new_word_size); 509 510 // Fix for bug #4668531 511 MemRegion mangle_region(_the_space->end(), (HeapWord*)_virtual_space.high()); 512 _the_space->mangle_region(mangle_region); 513 514 // Expand space -- also expands space's BOT 515 // (which uses (part of) shared array above) 516 _the_space->set_end((HeapWord*)_virtual_space.high()); 517 518 // update the space and generation capacity counters 519 update_counters(); 520 521 if (Verbose && PrintGC) { 522 size_t new_mem_size = _virtual_space.committed_size(); 523 size_t old_mem_size = new_mem_size - bytes; 524 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " 525 SIZE_FORMAT "K to " SIZE_FORMAT "K", 526 name(), old_mem_size/K, bytes/K, new_mem_size/K); 527 } 528 } 529 return result; 530 } 531 532 533 bool OneContigSpaceCardGeneration::grow_to_reserved() { 534 assert_locked_or_safepoint(ExpandHeap_lock); 535 bool success = true; 536 const size_t remaining_bytes = _virtual_space.uncommitted_size(); 537 if (remaining_bytes > 0) { 538 success = grow_by(remaining_bytes); 539 DEBUG_ONLY(if (!success) warning("grow to reserved failed");) 540 } 541 return success; 542 } 543 544 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) { 545 assert_locked_or_safepoint(ExpandHeap_lock); 546 // Shrink committed space 547 _virtual_space.shrink_by(bytes); 548 // Shrink space; this also shrinks the space's BOT 549 _the_space->set_end((HeapWord*) _virtual_space.high()); 550 size_t new_word_size = heap_word_size(_the_space->capacity()); 551 // Shrink the shared block offset array 552 _bts->resize(new_word_size); 553 MemRegion mr(_the_space->bottom(), new_word_size); 554 // Shrink the card table 555 Universe::heap()->barrier_set()->resize_covered_region(mr); 556 557 if (Verbose && PrintGC) { 558 size_t new_mem_size = _virtual_space.committed_size(); 559 size_t old_mem_size = new_mem_size + bytes; 560 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 561 name(), old_mem_size/K, new_mem_size/K); 562 } 563 } 564 565 // Currently nothing to do. 566 void OneContigSpaceCardGeneration::prepare_for_verify() {} 567 568 569 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) { 570 _the_space->object_iterate(blk); 571 } 572 573 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk, 574 bool usedOnly) { 575 blk->do_space(_the_space); 576 } 577 578 void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) { 579 // Deal with delayed initialization of _the_space, 580 // and lack of initialization of _last_gc. 581 if (_last_gc.space() == NULL) { 582 assert(the_space() != NULL, "shouldn't be NULL"); 583 _last_gc = the_space()->bottom_mark(); 584 } 585 the_space()->object_iterate_from(_last_gc, blk); 586 } 587 588 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { 589 blk->set_generation(this); 590 younger_refs_in_space_iterate(_the_space, blk); 591 blk->reset_generation(); 592 } 593 594 void OneContigSpaceCardGeneration::save_marks() { 595 _the_space->set_saved_mark(); 596 } 597 598 599 void OneContigSpaceCardGeneration::reset_saved_marks() { 600 _the_space->reset_saved_mark(); 601 } 602 603 604 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() { 605 return _the_space->saved_mark_at_top(); 606 } 607 608 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ 609 \ 610 void OneContigSpaceCardGeneration:: \ 611 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ 612 blk->set_generation(this); \ 613 _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \ 614 blk->reset_generation(); \ 615 save_marks(); \ 616 } 617 618 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN) 619 620 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN 621 622 623 void OneContigSpaceCardGeneration::gc_epilogue(bool full) { 624 _last_gc = WaterMark(the_space(), the_space()->top()); 625 626 // update the generation and space performance counters 627 update_counters(); 628 } 629 630 void OneContigSpaceCardGeneration::verify(bool allow_dirty) { 631 the_space()->verify(allow_dirty); 632 } 633 634 void OneContigSpaceCardGeneration::print_on(outputStream* st) const { 635 Generation::print_on(st); 636 st->print(" the"); 637 the_space()->print_on(st); 638 }