1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/spaceDecorator.hpp" 27 #include "gc_interface/collectedHeap.inline.hpp" 28 #include "memory/allocation.inline.hpp" 29 #include "memory/blockOffsetTable.inline.hpp" 30 #include "memory/cardTableRS.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/genMarkSweep.hpp" 34 #include "memory/genOopClosures.hpp" 35 #include "memory/genOopClosures.inline.hpp" 36 #include "memory/generation.hpp" 37 #include "memory/generation.inline.hpp" 38 #include "memory/space.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/java.hpp" 41 #include "utilities/copy.hpp" 42 #include "utilities/events.hpp" 43 44 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : 45 _level(level), 46 _ref_processor(NULL) { 47 if (!_virtual_space.initialize(rs, initial_size)) { 48 vm_exit_during_initialization("Could not reserve enough space for " 49 "object heap"); 50 } 51 // Mangle all of the the initial generation. 52 if (ZapUnusedHeapArea) { 53 MemRegion mangle_region((HeapWord*)_virtual_space.low(), 54 (HeapWord*)_virtual_space.high()); 55 SpaceMangler::mangle_region(mangle_region); 56 } 57 _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), 58 (HeapWord*)_virtual_space.high_boundary()); 59 } 60 61 GenerationSpec* Generation::spec() { 62 GenCollectedHeap* gch = GenCollectedHeap::heap(); 63 assert(0 <= level() && level() < gch->_n_gens, "Bad gen level"); 64 return gch->_gen_specs[level()]; 65 } 66 67 size_t Generation::max_capacity() const { 68 return reserved().byte_size(); 69 } 70 71 void Generation::print_heap_change(size_t prev_used) const { 72 if (PrintGCDetails && Verbose) { 73 gclog_or_tty->print(" " SIZE_FORMAT 74 "->" SIZE_FORMAT 75 "(" SIZE_FORMAT ")", 76 prev_used, used(), capacity()); 77 } else { 78 gclog_or_tty->print(" " SIZE_FORMAT "K" 79 "->" SIZE_FORMAT "K" 80 "(" SIZE_FORMAT "K)", 81 prev_used / K, used() / K, capacity() / K); 82 } 83 } 84 85 // By default we get a single threaded default reference processor; 86 // generations needing multi-threaded refs discovery override this method. 87 void Generation::ref_processor_init() { 88 assert(_ref_processor == NULL, "a reference processor already exists"); 89 assert(!_reserved.is_empty(), "empty generation?"); 90 _ref_processor = 91 new ReferenceProcessor(_reserved, // span 92 refs_discovery_is_atomic(), // atomic_discovery 93 refs_discovery_is_mt()); // mt_discovery 94 if (_ref_processor == NULL) { 95 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 96 } 97 } 98 99 void Generation::print() const { print_on(tty); } 100 101 void Generation::print_on(outputStream* st) const { 102 st->print(" %-20s", name()); 103 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 104 capacity()/K, used()/K); 105 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 106 _virtual_space.low_boundary(), 107 _virtual_space.high(), 108 _virtual_space.high_boundary()); 109 } 110 111 void Generation::print_summary_info() { print_summary_info_on(tty); } 112 113 void Generation::print_summary_info_on(outputStream* st) { 114 StatRecord* sr = stat_record(); 115 double time = sr->accumulated_time.seconds(); 116 st->print_cr("[Accumulated GC generation %d time %3.7f secs, " 117 "%d GC's, avg GC time %3.7f]", 118 level(), time, sr->invocations, 119 sr->invocations > 0 ? time / sr->invocations : 0.0); 120 } 121 122 // Utility iterator classes 123 124 class GenerationIsInReservedClosure : public SpaceClosure { 125 public: 126 const void* _p; 127 Space* sp; 128 virtual void do_space(Space* s) { 129 if (sp == NULL) { 130 if (s->is_in_reserved(_p)) sp = s; 131 } 132 } 133 GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {} 134 }; 135 136 class GenerationIsInClosure : public SpaceClosure { 137 public: 138 const void* _p; 139 Space* sp; 140 virtual void do_space(Space* s) { 141 if (sp == NULL) { 142 if (s->is_in(_p)) sp = s; 143 } 144 } 145 GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {} 146 }; 147 148 bool Generation::is_in(const void* p) const { 149 GenerationIsInClosure blk(p); 150 ((Generation*)this)->space_iterate(&blk); 151 return blk.sp != NULL; 152 } 153 154 DefNewGeneration* Generation::as_DefNewGeneration() { 155 assert((kind() == Generation::DefNew) || 156 (kind() == Generation::ParNew) || 157 (kind() == Generation::ASParNew), 158 "Wrong youngest generation type"); 159 return (DefNewGeneration*) this; 160 } 161 162 Generation* Generation::next_gen() const { 163 GenCollectedHeap* gch = GenCollectedHeap::heap(); 164 int next = level() + 1; 165 if (next < gch->_n_gens) { 166 return gch->_gens[next]; 167 } else { 168 return NULL; 169 } 170 } 171 172 size_t Generation::max_contiguous_available() const { 173 // The largest number of contiguous free words in this or any higher generation. 174 size_t max = 0; 175 for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { 176 size_t avail = gen->contiguous_available(); 177 if (avail > max) { 178 max = avail; 179 } 180 } 181 return max; 182 } 183 184 bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes, 185 bool not_used) const { 186 if (PrintGC && Verbose) { 187 gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe" 188 " contiguous_available: " SIZE_FORMAT 189 " promotion_in_bytes: " SIZE_FORMAT, 190 max_contiguous_available(), promotion_in_bytes); 191 } 192 return max_contiguous_available() >= promotion_in_bytes; 193 } 194 195 // Ignores "ref" and calls allocate(). 196 oop Generation::promote(oop obj, size_t obj_size) { 197 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); 198 199 #ifndef PRODUCT 200 if (Universe::heap()->promotion_should_fail()) { 201 return NULL; 202 } 203 #endif // #ifndef PRODUCT 204 205 HeapWord* result = allocate(obj_size, false); 206 if (result != NULL) { 207 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); 208 return oop(result); 209 } else { 210 GenCollectedHeap* gch = GenCollectedHeap::heap(); 211 return gch->handle_failed_promotion(this, obj, obj_size); 212 } 213 } 214 215 oop Generation::par_promote(int thread_num, 216 oop obj, markOop m, size_t word_sz) { 217 // Could do a bad general impl here that gets a lock. But no. 218 ShouldNotCallThis(); 219 return NULL; 220 } 221 222 void Generation::par_promote_alloc_undo(int thread_num, 223 HeapWord* obj, size_t word_sz) { 224 // Could do a bad general impl here that gets a lock. But no. 225 guarantee(false, "No good general implementation."); 226 } 227 228 Space* Generation::space_containing(const void* p) const { 229 GenerationIsInReservedClosure blk(p); 230 // Cast away const 231 ((Generation*)this)->space_iterate(&blk); 232 return blk.sp; 233 } 234 235 // Some of these are mediocre general implementations. Should be 236 // overridden to get better performance. 237 238 class GenerationBlockStartClosure : public SpaceClosure { 239 public: 240 const void* _p; 241 HeapWord* _start; 242 virtual void do_space(Space* s) { 243 if (_start == NULL && s->is_in_reserved(_p)) { 244 _start = s->block_start(_p); 245 } 246 } 247 GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; } 248 }; 249 250 HeapWord* Generation::block_start(const void* p) const { 251 GenerationBlockStartClosure blk(p); 252 // Cast away const 253 ((Generation*)this)->space_iterate(&blk); 254 return blk._start; 255 } 256 257 class GenerationBlockSizeClosure : public SpaceClosure { 258 public: 259 const HeapWord* _p; 260 size_t size; 261 virtual void do_space(Space* s) { 262 if (size == 0 && s->is_in_reserved(_p)) { 263 size = s->block_size(_p); 264 } 265 } 266 GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; } 267 }; 268 269 size_t Generation::block_size(const HeapWord* p) const { 270 GenerationBlockSizeClosure blk(p); 271 // Cast away const 272 ((Generation*)this)->space_iterate(&blk); 273 assert(blk.size > 0, "seems reasonable"); 274 return blk.size; 275 } 276 277 class GenerationBlockIsObjClosure : public SpaceClosure { 278 public: 279 const HeapWord* _p; 280 bool is_obj; 281 virtual void do_space(Space* s) { 282 if (!is_obj && s->is_in_reserved(_p)) { 283 is_obj |= s->block_is_obj(_p); 284 } 285 } 286 GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; } 287 }; 288 289 bool Generation::block_is_obj(const HeapWord* p) const { 290 GenerationBlockIsObjClosure blk(p); 291 // Cast away const 292 ((Generation*)this)->space_iterate(&blk); 293 return blk.is_obj; 294 } 295 296 class GenerationOopIterateClosure : public SpaceClosure { 297 public: 298 OopClosure* cl; 299 MemRegion mr; 300 virtual void do_space(Space* s) { 301 s->oop_iterate(mr, cl); 302 } 303 GenerationOopIterateClosure(OopClosure* _cl, MemRegion _mr) : 304 cl(_cl), mr(_mr) {} 305 }; 306 307 void Generation::oop_iterate(OopClosure* cl) { 308 GenerationOopIterateClosure blk(cl, _reserved); 309 space_iterate(&blk); 310 } 311 312 void Generation::oop_iterate(MemRegion mr, OopClosure* cl) { 313 GenerationOopIterateClosure blk(cl, mr); 314 space_iterate(&blk); 315 } 316 317 void Generation::younger_refs_in_space_iterate(Space* sp, 318 OopsInGenClosure* cl) { 319 GenRemSet* rs = SharedHeap::heap()->rem_set(); 320 rs->younger_refs_in_space_iterate(sp, cl); 321 } 322 323 class GenerationObjIterateClosure : public SpaceClosure { 324 private: 325 ObjectClosure* _cl; 326 public: 327 virtual void do_space(Space* s) { 328 s->object_iterate(_cl); 329 } 330 GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} 331 }; 332 333 void Generation::object_iterate(ObjectClosure* cl) { 334 GenerationObjIterateClosure blk(cl); 335 space_iterate(&blk); 336 } 337 338 class GenerationSafeObjIterateClosure : public SpaceClosure { 339 private: 340 ObjectClosure* _cl; 341 public: 342 virtual void do_space(Space* s) { 343 s->safe_object_iterate(_cl); 344 } 345 GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} 346 }; 347 348 void Generation::safe_object_iterate(ObjectClosure* cl) { 349 GenerationSafeObjIterateClosure blk(cl); 350 space_iterate(&blk); 351 } 352 353 void Generation::prepare_for_compaction(CompactPoint* cp) { 354 // Generic implementation, can be specialized 355 CompactibleSpace* space = first_compaction_space(); 356 while (space != NULL) { 357 space->prepare_for_compaction(cp); 358 space = space->next_compaction_space(); 359 } 360 } 361 362 class AdjustPointersClosure: public SpaceClosure { 363 public: 364 void do_space(Space* sp) { 365 sp->adjust_pointers(); 366 } 367 }; 368 369 void Generation::adjust_pointers() { 370 // Note that this is done over all spaces, not just the compactible 371 // ones. 372 AdjustPointersClosure blk; 373 space_iterate(&blk, true); 374 } 375 376 void Generation::compact() { 377 CompactibleSpace* sp = first_compaction_space(); 378 while (sp != NULL) { 379 sp->compact(); 380 sp = sp->next_compaction_space(); 381 } 382 } 383 384 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, 385 int level, 386 GenRemSet* remset) : 387 Generation(rs, initial_byte_size, level), _rs(remset) 388 { 389 HeapWord* start = (HeapWord*)rs.base(); 390 size_t reserved_byte_size = rs.size(); 391 assert((uintptr_t(start) & 3) == 0, "bad alignment"); 392 assert((reserved_byte_size & 3) == 0, "bad alignment"); 393 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); 394 _bts = new BlockOffsetSharedArray(reserved_mr, 395 heap_word_size(initial_byte_size)); 396 MemRegion committed_mr(start, heap_word_size(initial_byte_size)); 397 _rs->resize_covered_region(committed_mr); 398 if (_bts == NULL) 399 vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); 400 401 // Verify that the start and end of this generation is the start of a card. 402 // If this wasn't true, a single card could span more than on generation, 403 // which would cause problems when we commit/uncommit memory, and when we 404 // clear and dirty cards. 405 guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); 406 if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { 407 // Don't check at the very end of the heap as we'll assert that we're probing off 408 // the end if we try. 409 guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); 410 } 411 } 412 413 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { 414 assert_locked_or_safepoint(Heap_lock); 415 if (bytes == 0) { 416 return true; // That's what grow_by(0) would return 417 } 418 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); 419 if (aligned_bytes == 0){ 420 // The alignment caused the number of bytes to wrap. An expand_by(0) will 421 // return true with the implication that an expansion was done when it 422 // was not. A call to expand implies a best effort to expand by "bytes" 423 // but not a guarantee. Align down to give a best effort. This is likely 424 // the most that the generation can expand since it has some capacity to 425 // start with. 426 aligned_bytes = ReservedSpace::page_align_size_down(bytes); 427 } 428 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); 429 bool success = false; 430 if (aligned_expand_bytes > aligned_bytes) { 431 success = grow_by(aligned_expand_bytes); 432 } 433 if (!success) { 434 success = grow_by(aligned_bytes); 435 } 436 if (!success) { 437 success = grow_to_reserved(); 438 } 439 if (PrintGC && Verbose) { 440 if (success && GC_locker::is_active()) { 441 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); 442 } 443 } 444 445 return success; 446 } 447 448 449 // No young generation references, clear this generation's cards. 450 void CardGeneration::clear_remembered_set() { 451 _rs->clear(reserved()); 452 } 453 454 455 // Objects in this generation may have moved, invalidate this 456 // generation's cards. 457 void CardGeneration::invalidate_remembered_set() { 458 _rs->invalidate(used_region()); 459 } 460 461 462 // Currently nothing to do. 463 void CardGeneration::prepare_for_verify() {} 464 465 466 void OneContigSpaceCardGeneration::collect(bool full, 467 bool clear_all_soft_refs, 468 size_t size, 469 bool is_tlab) { 470 SpecializationStats::clear(); 471 // Temporarily expand the span of our ref processor, so 472 // refs discovery is over the entire heap, not just this generation 473 ReferenceProcessorSpanMutator 474 x(ref_processor(), GenCollectedHeap::heap()->reserved_region()); 475 GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); 476 SpecializationStats::print(); 477 } 478 479 HeapWord* 480 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size, 481 bool is_tlab, 482 bool parallel) { 483 assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation"); 484 if (parallel) { 485 MutexLocker x(ParGCRareEvent_lock); 486 HeapWord* result = NULL; 487 size_t byte_size = word_size * HeapWordSize; 488 while (true) { 489 expand(byte_size, _min_heap_delta_bytes); 490 if (GCExpandToAllocateDelayMillis > 0) { 491 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 492 } 493 result = _the_space->par_allocate(word_size); 494 if ( result != NULL) { 495 return result; 496 } else { 497 // If there's not enough expansion space available, give up. 498 if (_virtual_space.uncommitted_size() < byte_size) { 499 return NULL; 500 } 501 // else try again 502 } 503 } 504 } else { 505 expand(word_size*HeapWordSize, _min_heap_delta_bytes); 506 return _the_space->allocate(word_size); 507 } 508 } 509 510 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { 511 GCMutexLocker x(ExpandHeap_lock); 512 return CardGeneration::expand(bytes, expand_bytes); 513 } 514 515 516 void OneContigSpaceCardGeneration::shrink(size_t bytes) { 517 assert_locked_or_safepoint(ExpandHeap_lock); 518 size_t size = ReservedSpace::page_align_size_down(bytes); 519 if (size > 0) { 520 shrink_by(size); 521 } 522 } 523 524 525 size_t OneContigSpaceCardGeneration::capacity() const { 526 return _the_space->capacity(); 527 } 528 529 530 size_t OneContigSpaceCardGeneration::used() const { 531 return _the_space->used(); 532 } 533 534 535 size_t OneContigSpaceCardGeneration::free() const { 536 return _the_space->free(); 537 } 538 539 MemRegion OneContigSpaceCardGeneration::used_region() const { 540 return the_space()->used_region(); 541 } 542 543 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const { 544 return _the_space->free(); 545 } 546 547 size_t OneContigSpaceCardGeneration::contiguous_available() const { 548 return _the_space->free() + _virtual_space.uncommitted_size(); 549 } 550 551 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) { 552 assert_locked_or_safepoint(ExpandHeap_lock); 553 bool result = _virtual_space.expand_by(bytes); 554 if (result) { 555 size_t new_word_size = 556 heap_word_size(_virtual_space.committed_size()); 557 MemRegion mr(_the_space->bottom(), new_word_size); 558 // Expand card table 559 Universe::heap()->barrier_set()->resize_covered_region(mr); 560 // Expand shared block offset array 561 _bts->resize(new_word_size); 562 563 // Fix for bug #4668531 564 if (ZapUnusedHeapArea) { 565 MemRegion mangle_region(_the_space->end(), 566 (HeapWord*)_virtual_space.high()); 567 SpaceMangler::mangle_region(mangle_region); 568 } 569 570 // Expand space -- also expands space's BOT 571 // (which uses (part of) shared array above) 572 _the_space->set_end((HeapWord*)_virtual_space.high()); 573 574 // update the space and generation capacity counters 575 update_counters(); 576 577 if (Verbose && PrintGC) { 578 size_t new_mem_size = _virtual_space.committed_size(); 579 size_t old_mem_size = new_mem_size - bytes; 580 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " 581 SIZE_FORMAT "K to " SIZE_FORMAT "K", 582 name(), old_mem_size/K, bytes/K, new_mem_size/K); 583 } 584 } 585 return result; 586 } 587 588 589 bool OneContigSpaceCardGeneration::grow_to_reserved() { 590 assert_locked_or_safepoint(ExpandHeap_lock); 591 bool success = true; 592 const size_t remaining_bytes = _virtual_space.uncommitted_size(); 593 if (remaining_bytes > 0) { 594 success = grow_by(remaining_bytes); 595 DEBUG_ONLY(if (!success) warning("grow to reserved failed");) 596 } 597 return success; 598 } 599 600 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) { 601 assert_locked_or_safepoint(ExpandHeap_lock); 602 // Shrink committed space 603 _virtual_space.shrink_by(bytes); 604 // Shrink space; this also shrinks the space's BOT 605 _the_space->set_end((HeapWord*) _virtual_space.high()); 606 size_t new_word_size = heap_word_size(_the_space->capacity()); 607 // Shrink the shared block offset array 608 _bts->resize(new_word_size); 609 MemRegion mr(_the_space->bottom(), new_word_size); 610 // Shrink the card table 611 Universe::heap()->barrier_set()->resize_covered_region(mr); 612 613 if (Verbose && PrintGC) { 614 size_t new_mem_size = _virtual_space.committed_size(); 615 size_t old_mem_size = new_mem_size + bytes; 616 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 617 name(), old_mem_size/K, new_mem_size/K); 618 } 619 } 620 621 // Currently nothing to do. 622 void OneContigSpaceCardGeneration::prepare_for_verify() {} 623 624 625 // Override for a card-table generation with one contiguous 626 // space. NOTE: For reasons that are lost in the fog of history, 627 // this code is used when you iterate over perm gen objects, 628 // even when one uses CDS, where the perm gen has a couple of 629 // other spaces; this is because CompactingPermGenGen derives 630 // from OneContigSpaceCardGeneration. This should be cleaned up, 631 // see CR 6897789.. 632 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) { 633 _the_space->object_iterate(blk); 634 } 635 636 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk, 637 bool usedOnly) { 638 blk->do_space(_the_space); 639 } 640 641 void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) { 642 // Deal with delayed initialization of _the_space, 643 // and lack of initialization of _last_gc. 644 if (_last_gc.space() == NULL) { 645 assert(the_space() != NULL, "shouldn't be NULL"); 646 _last_gc = the_space()->bottom_mark(); 647 } 648 the_space()->object_iterate_from(_last_gc, blk); 649 } 650 651 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { 652 blk->set_generation(this); 653 younger_refs_in_space_iterate(_the_space, blk); 654 blk->reset_generation(); 655 } 656 657 void OneContigSpaceCardGeneration::save_marks() { 658 _the_space->set_saved_mark(); 659 } 660 661 662 void OneContigSpaceCardGeneration::reset_saved_marks() { 663 _the_space->reset_saved_mark(); 664 } 665 666 667 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() { 668 return _the_space->saved_mark_at_top(); 669 } 670 671 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ 672 \ 673 void OneContigSpaceCardGeneration:: \ 674 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ 675 blk->set_generation(this); \ 676 _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \ 677 blk->reset_generation(); \ 678 save_marks(); \ 679 } 680 681 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN) 682 683 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN 684 685 686 void OneContigSpaceCardGeneration::gc_epilogue(bool full) { 687 _last_gc = WaterMark(the_space(), the_space()->top()); 688 689 // update the generation and space performance counters 690 update_counters(); 691 if (ZapUnusedHeapArea) { 692 the_space()->check_mangled_unused_area_complete(); 693 } 694 } 695 696 void OneContigSpaceCardGeneration::record_spaces_top() { 697 assert(ZapUnusedHeapArea, "Not mangling unused space"); 698 the_space()->set_top_for_allocations(); 699 } 700 701 void OneContigSpaceCardGeneration::verify(bool allow_dirty) { 702 the_space()->verify(allow_dirty); 703 } 704 705 void OneContigSpaceCardGeneration::print_on(outputStream* st) const { 706 Generation::print_on(st); 707 st->print(" the"); 708 the_space()->print_on(st); 709 }