1 /*
   2  * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_generation.cpp.incl"
  27 
  28 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
  29   _level(level),
  30   _ref_processor(NULL) {
  31   if (!_virtual_space.initialize(rs, initial_size)) {
  32     vm_exit_during_initialization("Could not reserve enough space for "
  33                     "object heap");
  34   }
  35   // Mangle all of the the initial generation.
  36   if (ZapUnusedHeapArea) {
  37     MemRegion mangle_region((HeapWord*)_virtual_space.low(),
  38       (HeapWord*)_virtual_space.high());
  39     SpaceMangler::mangle_region(mangle_region);
  40   }
  41   _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
  42           (HeapWord*)_virtual_space.high_boundary());
  43 }
  44 
  45 GenerationSpec* Generation::spec() {
  46   GenCollectedHeap* gch = GenCollectedHeap::heap();
  47   assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
  48   return gch->_gen_specs[level()];
  49 }
  50 
  51 size_t Generation::max_capacity() const {
  52   return reserved().byte_size();
  53 }
  54 
  55 void Generation::print_heap_change(size_t prev_used) const {
  56   if (PrintGCDetails && Verbose) {
  57     gclog_or_tty->print(" "  SIZE_FORMAT
  58                         "->" SIZE_FORMAT
  59                         "("  SIZE_FORMAT ")",
  60                         prev_used, used(), capacity());
  61   } else {
  62     gclog_or_tty->print(" "  SIZE_FORMAT "K"
  63                         "->" SIZE_FORMAT "K"
  64                         "("  SIZE_FORMAT "K)",
  65                         prev_used / K, used() / K, capacity() / K);
  66   }
  67 }
  68 
  69 // By default we get a single threaded default reference processor;
  70 // generations needing multi-threaded refs discovery override this method.
  71 void Generation::ref_processor_init() {
  72   assert(_ref_processor == NULL, "a reference processor already exists");
  73   assert(!_reserved.is_empty(), "empty generation?");
  74   _ref_processor =
  75     new ReferenceProcessor(_reserved,                  // span
  76                            refs_discovery_is_atomic(), // atomic_discovery
  77                            refs_discovery_is_mt());    // mt_discovery
  78   if (_ref_processor == NULL) {
  79     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
  80   }
  81 }
  82 
  83 void Generation::print() const { print_on(tty); }
  84 
  85 void Generation::print_on(outputStream* st)  const {
  86   st->print(" %-20s", name());
  87   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
  88              capacity()/K, used()/K);
  89   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
  90               _virtual_space.low_boundary(),
  91               _virtual_space.high(),
  92               _virtual_space.high_boundary());
  93 }
  94 
  95 void Generation::print_summary_info() { print_summary_info_on(tty); }
  96 
  97 void Generation::print_summary_info_on(outputStream* st) {
  98   StatRecord* sr = stat_record();
  99   double time = sr->accumulated_time.seconds();
 100   st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
 101                "%d GC's, avg GC time %3.7f]",
 102                level(), time, sr->invocations,
 103                sr->invocations > 0 ? time / sr->invocations : 0.0);
 104 }
 105 
 106 // Utility iterator classes
 107 
 108 class GenerationIsInReservedClosure : public SpaceClosure {
 109  public:
 110   const void* _p;
 111   Space* sp;
 112   virtual void do_space(Space* s) {
 113     if (sp == NULL) {
 114       if (s->is_in_reserved(_p)) sp = s;
 115     }
 116   }
 117   GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
 118 };
 119 
 120 class GenerationIsInClosure : public SpaceClosure {
 121  public:
 122   const void* _p;
 123   Space* sp;
 124   virtual void do_space(Space* s) {
 125     if (sp == NULL) {
 126       if (s->is_in(_p)) sp = s;
 127     }
 128   }
 129   GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
 130 };
 131 
 132 bool Generation::is_in(const void* p) const {
 133   GenerationIsInClosure blk(p);
 134   ((Generation*)this)->space_iterate(&blk);
 135   return blk.sp != NULL;
 136 }
 137 
 138 DefNewGeneration* Generation::as_DefNewGeneration() {
 139   assert((kind() == Generation::DefNew) ||
 140          (kind() == Generation::ParNew) ||
 141          (kind() == Generation::ASParNew),
 142     "Wrong youngest generation type");
 143   return (DefNewGeneration*) this;
 144 }
 145 
 146 Generation* Generation::next_gen() const {
 147   GenCollectedHeap* gch = GenCollectedHeap::heap();
 148   int next = level() + 1;
 149   if (next < gch->_n_gens) {
 150     return gch->_gens[next];
 151   } else {
 152     return NULL;
 153   }
 154 }
 155 
 156 size_t Generation::max_contiguous_available() const {
 157   // The largest number of contiguous free words in this or any higher generation.
 158   size_t max = 0;
 159   for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
 160     size_t avail = gen->contiguous_available();
 161     if (avail > max) {
 162       max = avail;
 163     }
 164   }
 165   return max;
 166 }
 167 
 168 bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
 169                                            bool not_used) const {
 170   if (PrintGC && Verbose) {
 171     gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
 172                 " contiguous_available: " SIZE_FORMAT
 173                 " promotion_in_bytes: " SIZE_FORMAT,
 174                 max_contiguous_available(), promotion_in_bytes);
 175   }
 176   return max_contiguous_available() >= promotion_in_bytes;
 177 }
 178 
 179 // Ignores "ref" and calls allocate().
 180 oop Generation::promote(oop obj, size_t obj_size) {
 181   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 182 
 183 #ifndef PRODUCT
 184   if (Universe::heap()->promotion_should_fail()) {
 185     return NULL;
 186   }
 187 #endif  // #ifndef PRODUCT
 188 
 189   HeapWord* result = allocate(obj_size, false);
 190   if (result != NULL) {
 191     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
 192     return oop(result);
 193   } else {
 194     GenCollectedHeap* gch = GenCollectedHeap::heap();
 195     return gch->handle_failed_promotion(this, obj, obj_size);
 196   }
 197 }
 198 
 199 oop Generation::par_promote(int thread_num,
 200                             oop obj, markOop m, size_t word_sz) {
 201   // Could do a bad general impl here that gets a lock.  But no.
 202   ShouldNotCallThis();
 203   return NULL;
 204 }
 205 
 206 void Generation::par_promote_alloc_undo(int thread_num,
 207                                         HeapWord* obj, size_t word_sz) {
 208   // Could do a bad general impl here that gets a lock.  But no.
 209   guarantee(false, "No good general implementation.");
 210 }
 211 
 212 Space* Generation::space_containing(const void* p) const {
 213   GenerationIsInReservedClosure blk(p);
 214   // Cast away const
 215   ((Generation*)this)->space_iterate(&blk);
 216   return blk.sp;
 217 }
 218 
 219 // Some of these are mediocre general implementations.  Should be
 220 // overridden to get better performance.
 221 
 222 class GenerationBlockStartClosure : public SpaceClosure {
 223  public:
 224   const void* _p;
 225   HeapWord* _start;
 226   virtual void do_space(Space* s) {
 227     if (_start == NULL && s->is_in_reserved(_p)) {
 228       _start = s->block_start(_p);
 229     }
 230   }
 231   GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
 232 };
 233 
 234 HeapWord* Generation::block_start(const void* p) const {
 235   GenerationBlockStartClosure blk(p);
 236   // Cast away const
 237   ((Generation*)this)->space_iterate(&blk);
 238   return blk._start;
 239 }
 240 
 241 class GenerationBlockSizeClosure : public SpaceClosure {
 242  public:
 243   const HeapWord* _p;
 244   size_t size;
 245   virtual void do_space(Space* s) {
 246     if (size == 0 && s->is_in_reserved(_p)) {
 247       size = s->block_size(_p);
 248     }
 249   }
 250   GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
 251 };
 252 
 253 size_t Generation::block_size(const HeapWord* p) const {
 254   GenerationBlockSizeClosure blk(p);
 255   // Cast away const
 256   ((Generation*)this)->space_iterate(&blk);
 257   assert(blk.size > 0, "seems reasonable");
 258   return blk.size;
 259 }
 260 
 261 class GenerationBlockIsObjClosure : public SpaceClosure {
 262  public:
 263   const HeapWord* _p;
 264   bool is_obj;
 265   virtual void do_space(Space* s) {
 266     if (!is_obj && s->is_in_reserved(_p)) {
 267       is_obj |= s->block_is_obj(_p);
 268     }
 269   }
 270   GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
 271 };
 272 
 273 bool Generation::block_is_obj(const HeapWord* p) const {
 274   GenerationBlockIsObjClosure blk(p);
 275   // Cast away const
 276   ((Generation*)this)->space_iterate(&blk);
 277   return blk.is_obj;
 278 }
 279 
 280 class GenerationOopIterateClosure : public SpaceClosure {
 281  public:
 282   OopClosure* cl;
 283   MemRegion mr;
 284   virtual void do_space(Space* s) {
 285     s->oop_iterate(mr, cl);
 286   }
 287   GenerationOopIterateClosure(OopClosure* _cl, MemRegion _mr) :
 288     cl(_cl), mr(_mr) {}
 289 };
 290 
 291 void Generation::oop_iterate(OopClosure* cl) {
 292   GenerationOopIterateClosure blk(cl, _reserved);
 293   space_iterate(&blk);
 294 }
 295 
 296 void Generation::oop_iterate(MemRegion mr, OopClosure* cl) {
 297   GenerationOopIterateClosure blk(cl, mr);
 298   space_iterate(&blk);
 299 }
 300 
 301 void Generation::younger_refs_in_space_iterate(Space* sp,
 302                                                OopsInGenClosure* cl) {
 303   GenRemSet* rs = SharedHeap::heap()->rem_set();
 304   rs->younger_refs_in_space_iterate(sp, cl);
 305 }
 306 
 307 class GenerationObjIterateClosure : public SpaceClosure {
 308  private:
 309   ObjectClosure* _cl;
 310  public:
 311   virtual void do_space(Space* s) {
 312     s->object_iterate(_cl);
 313   }
 314   GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
 315 };
 316 
 317 void Generation::object_iterate(ObjectClosure* cl) {
 318   GenerationObjIterateClosure blk(cl);
 319   space_iterate(&blk);
 320 }
 321 
 322 class GenerationSafeObjIterateClosure : public SpaceClosure {
 323  private:
 324   ObjectClosure* _cl;
 325  public:
 326   virtual void do_space(Space* s) {
 327     s->safe_object_iterate(_cl);
 328   }
 329   GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
 330 };
 331 
 332 void Generation::safe_object_iterate(ObjectClosure* cl) {
 333   GenerationSafeObjIterateClosure blk(cl);
 334   space_iterate(&blk);
 335 }
 336 
 337 void Generation::prepare_for_compaction(CompactPoint* cp) {
 338   // Generic implementation, can be specialized
 339   CompactibleSpace* space = first_compaction_space();
 340   while (space != NULL) {
 341     space->prepare_for_compaction(cp);
 342     space = space->next_compaction_space();
 343   }
 344 }
 345 
 346 class AdjustPointersClosure: public SpaceClosure {
 347  public:
 348   void do_space(Space* sp) {
 349     sp->adjust_pointers();
 350   }
 351 };
 352 
 353 void Generation::adjust_pointers() {
 354   // Note that this is done over all spaces, not just the compactible
 355   // ones.
 356   AdjustPointersClosure blk;
 357   space_iterate(&blk, true);
 358 }
 359 
 360 void Generation::compact() {
 361   CompactibleSpace* sp = first_compaction_space();
 362   while (sp != NULL) {
 363     sp->compact();
 364     sp = sp->next_compaction_space();
 365   }
 366 }
 367 
 368 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
 369                                int level,
 370                                GenRemSet* remset) :
 371   Generation(rs, initial_byte_size, level), _rs(remset)
 372 {
 373   HeapWord* start = (HeapWord*)rs.base();
 374   size_t reserved_byte_size = rs.size();
 375   assert((uintptr_t(start) & 3) == 0, "bad alignment");
 376   assert((reserved_byte_size & 3) == 0, "bad alignment");
 377   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
 378   _bts = new BlockOffsetSharedArray(reserved_mr,
 379                                     heap_word_size(initial_byte_size));
 380   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
 381   _rs->resize_covered_region(committed_mr);
 382   if (_bts == NULL)
 383     vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
 384 
 385   // Verify that the start and end of this generation is the start of a card.
 386   // If this wasn't true, a single card could span more than on generation,
 387   // which would cause problems when we commit/uncommit memory, and when we
 388   // clear and dirty cards.
 389   guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
 390   if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
 391     // Don't check at the very end of the heap as we'll assert that we're probing off
 392     // the end if we try.
 393     guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
 394   }
 395 }
 396 
 397 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
 398   assert_locked_or_safepoint(Heap_lock);
 399   if (bytes == 0) {
 400     return true;  // That's what grow_by(0) would return
 401   }
 402   size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
 403   if (aligned_bytes == 0){
 404     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 405     // return true with the implication that an expansion was done when it
 406     // was not.  A call to expand implies a best effort to expand by "bytes"
 407     // but not a guarantee.  Align down to give a best effort.  This is likely
 408     // the most that the generation can expand since it has some capacity to
 409     // start with.
 410     aligned_bytes = ReservedSpace::page_align_size_down(bytes);
 411   }
 412   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
 413   bool success = false;
 414   if (aligned_expand_bytes > aligned_bytes) {
 415     success = grow_by(aligned_expand_bytes);
 416   }
 417   if (!success) {
 418     success = grow_by(aligned_bytes);
 419   }
 420   if (!success) {
 421     success = grow_to_reserved();
 422   }
 423   if (PrintGC && Verbose) {
 424     if (success && GC_locker::is_active()) {
 425       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
 426     }
 427   }
 428 
 429   return success;
 430 }
 431 
 432 
 433 // No young generation references, clear this generation's cards.
 434 void CardGeneration::clear_remembered_set() {
 435   _rs->clear(reserved());
 436 }
 437 
 438 
 439 // Objects in this generation may have moved, invalidate this
 440 // generation's cards.
 441 void CardGeneration::invalidate_remembered_set() {
 442   _rs->invalidate(used_region());
 443 }
 444 
 445 
 446 // Currently nothing to do.
 447 void CardGeneration::prepare_for_verify() {}
 448 
 449 
 450 void OneContigSpaceCardGeneration::collect(bool   full,
 451                                            bool   clear_all_soft_refs,
 452                                            size_t size,
 453                                            bool   is_tlab) {
 454   SpecializationStats::clear();
 455   // Temporarily expand the span of our ref processor, so
 456   // refs discovery is over the entire heap, not just this generation
 457   ReferenceProcessorSpanMutator
 458     x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
 459   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
 460   SpecializationStats::print();
 461 }
 462 
 463 HeapWord*
 464 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
 465                                                   bool is_tlab,
 466                                                   bool parallel) {
 467   assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
 468   if (parallel) {
 469     MutexLocker x(ParGCRareEvent_lock);
 470     HeapWord* result = NULL;
 471     size_t byte_size = word_size * HeapWordSize;
 472     while (true) {
 473       expand(byte_size, _min_heap_delta_bytes);
 474       if (GCExpandToAllocateDelayMillis > 0) {
 475         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 476       }
 477       result = _the_space->par_allocate(word_size);
 478       if ( result != NULL) {
 479         return result;
 480       } else {
 481         // If there's not enough expansion space available, give up.
 482         if (_virtual_space.uncommitted_size() < byte_size) {
 483           return NULL;
 484         }
 485         // else try again
 486       }
 487     }
 488   } else {
 489     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 490     return _the_space->allocate(word_size);
 491   }
 492 }
 493 
 494 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
 495   GCMutexLocker x(ExpandHeap_lock);
 496   return CardGeneration::expand(bytes, expand_bytes);
 497 }
 498 
 499 
 500 void OneContigSpaceCardGeneration::shrink(size_t bytes) {
 501   assert_locked_or_safepoint(ExpandHeap_lock);
 502   size_t size = ReservedSpace::page_align_size_down(bytes);
 503   if (size > 0) {
 504     shrink_by(size);
 505   }
 506 }
 507 
 508 
 509 size_t OneContigSpaceCardGeneration::capacity() const {
 510   return _the_space->capacity();
 511 }
 512 
 513 
 514 size_t OneContigSpaceCardGeneration::used() const {
 515   return _the_space->used();
 516 }
 517 
 518 
 519 size_t OneContigSpaceCardGeneration::free() const {
 520   return _the_space->free();
 521 }
 522 
 523 MemRegion OneContigSpaceCardGeneration::used_region() const {
 524   return the_space()->used_region();
 525 }
 526 
 527 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
 528   return _the_space->free();
 529 }
 530 
 531 size_t OneContigSpaceCardGeneration::contiguous_available() const {
 532   return _the_space->free() + _virtual_space.uncommitted_size();
 533 }
 534 
 535 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
 536   assert_locked_or_safepoint(ExpandHeap_lock);
 537   bool result = _virtual_space.expand_by(bytes);
 538   if (result) {
 539     size_t new_word_size =
 540        heap_word_size(_virtual_space.committed_size());
 541     MemRegion mr(_the_space->bottom(), new_word_size);
 542     // Expand card table
 543     Universe::heap()->barrier_set()->resize_covered_region(mr);
 544     // Expand shared block offset array
 545     _bts->resize(new_word_size);
 546 
 547     // Fix for bug #4668531
 548     if (ZapUnusedHeapArea) {
 549       MemRegion mangle_region(_the_space->end(),
 550       (HeapWord*)_virtual_space.high());
 551       SpaceMangler::mangle_region(mangle_region);
 552     }
 553 
 554     // Expand space -- also expands space's BOT
 555     // (which uses (part of) shared array above)
 556     _the_space->set_end((HeapWord*)_virtual_space.high());
 557 
 558     // update the space and generation capacity counters
 559     update_counters();
 560 
 561     if (Verbose && PrintGC) {
 562       size_t new_mem_size = _virtual_space.committed_size();
 563       size_t old_mem_size = new_mem_size - bytes;
 564       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
 565                       SIZE_FORMAT "K to " SIZE_FORMAT "K",
 566                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
 567     }
 568   }
 569   return result;
 570 }
 571 
 572 
 573 bool OneContigSpaceCardGeneration::grow_to_reserved() {
 574   assert_locked_or_safepoint(ExpandHeap_lock);
 575   bool success = true;
 576   const size_t remaining_bytes = _virtual_space.uncommitted_size();
 577   if (remaining_bytes > 0) {
 578     success = grow_by(remaining_bytes);
 579     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
 580   }
 581   return success;
 582 }
 583 
 584 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
 585   assert_locked_or_safepoint(ExpandHeap_lock);
 586   // Shrink committed space
 587   _virtual_space.shrink_by(bytes);
 588   // Shrink space; this also shrinks the space's BOT
 589   _the_space->set_end((HeapWord*) _virtual_space.high());
 590   size_t new_word_size = heap_word_size(_the_space->capacity());
 591   // Shrink the shared block offset array
 592   _bts->resize(new_word_size);
 593   MemRegion mr(_the_space->bottom(), new_word_size);
 594   // Shrink the card table
 595   Universe::heap()->barrier_set()->resize_covered_region(mr);
 596 
 597   if (Verbose && PrintGC) {
 598     size_t new_mem_size = _virtual_space.committed_size();
 599     size_t old_mem_size = new_mem_size + bytes;
 600     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 601                   name(), old_mem_size/K, new_mem_size/K);
 602   }
 603 }
 604 
 605 // Currently nothing to do.
 606 void OneContigSpaceCardGeneration::prepare_for_verify() {}
 607 
 608 
 609 // Override for a card-table generation with one contiguous
 610 // space. NOTE: For reasons that are lost in the fog of history,
 611 // this code is used when you iterate over perm gen objects,
 612 // even when one uses CDS, where the perm gen has a couple of
 613 // other spaces; this is because CompactingPermGenGen derives
 614 // from OneContigSpaceCardGeneration. This should be cleaned up,
 615 // see CR 6897789..
 616 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
 617   _the_space->object_iterate(blk);
 618 }
 619 
 620 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
 621                                                  bool usedOnly) {
 622   blk->do_space(_the_space);
 623 }
 624 
 625 void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
 626   // Deal with delayed initialization of _the_space,
 627   // and lack of initialization of _last_gc.
 628   if (_last_gc.space() == NULL) {
 629     assert(the_space() != NULL, "shouldn't be NULL");
 630     _last_gc = the_space()->bottom_mark();
 631   }
 632   the_space()->object_iterate_from(_last_gc, blk);
 633 }
 634 
 635 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
 636   blk->set_generation(this);
 637   younger_refs_in_space_iterate(_the_space, blk);
 638   blk->reset_generation();
 639 }
 640 
 641 void OneContigSpaceCardGeneration::save_marks() {
 642   _the_space->set_saved_mark();
 643 }
 644 
 645 
 646 void OneContigSpaceCardGeneration::reset_saved_marks() {
 647   _the_space->reset_saved_mark();
 648 }
 649 
 650 
 651 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
 652   return _the_space->saved_mark_at_top();
 653 }
 654 
 655 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
 656                                                                                 \
 657 void OneContigSpaceCardGeneration::                                             \
 658 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
 659   blk->set_generation(this);                                                    \
 660   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
 661   blk->reset_generation();                                                      \
 662   save_marks();                                                                 \
 663 }
 664 
 665 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
 666 
 667 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
 668 
 669 
 670 void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
 671   _last_gc = WaterMark(the_space(), the_space()->top());
 672 
 673   // update the generation and space performance counters
 674   update_counters();
 675   if (ZapUnusedHeapArea) {
 676     the_space()->check_mangled_unused_area_complete();
 677   }
 678 }
 679 
 680 void OneContigSpaceCardGeneration::record_spaces_top() {
 681   assert(ZapUnusedHeapArea, "Not mangling unused space");
 682   the_space()->set_top_for_allocations();
 683 }
 684 
 685 void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
 686   the_space()->verify(allow_dirty);
 687 }
 688 
 689 void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
 690   Generation::print_on(st);
 691   st->print("   the");
 692   the_space()->print_on(st);
 693 }