1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/spaceDecorator.hpp"
  27 #include "gc_interface/collectedHeap.inline.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/blockOffsetTable.inline.hpp"
  30 #include "memory/cardTableRS.hpp"
  31 #include "memory/gcLocker.inline.hpp"
  32 #include "memory/genCollectedHeap.hpp"
  33 #include "memory/genMarkSweep.hpp"
  34 #include "memory/genOopClosures.hpp"
  35 #include "memory/genOopClosures.inline.hpp"
  36 #include "memory/generation.hpp"
  37 #include "memory/generation.inline.hpp"
  38 #include "memory/space.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/java.hpp"
  41 #include "utilities/copy.hpp"
  42 #include "utilities/events.hpp"
  43 
  44 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
  45   _level(level),
  46   _ref_processor(NULL) {
  47   if (!_virtual_space.initialize(rs, initial_size)) {
  48     vm_exit_during_initialization("Could not reserve enough space for "
  49                     "object heap");
  50   }
  51   // Mangle all of the the initial generation.
  52   if (ZapUnusedHeapArea) {
  53     MemRegion mangle_region((HeapWord*)_virtual_space.low(),
  54       (HeapWord*)_virtual_space.high());
  55     SpaceMangler::mangle_region(mangle_region);
  56   }
  57   _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
  58           (HeapWord*)_virtual_space.high_boundary());
  59 }
  60 
  61 GenerationSpec* Generation::spec() {
  62   GenCollectedHeap* gch = GenCollectedHeap::heap();
  63   assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
  64   return gch->_gen_specs[level()];
  65 }
  66 
  67 size_t Generation::max_capacity() const {
  68   return reserved().byte_size();
  69 }
  70 
  71 void Generation::print_heap_change(size_t prev_used) const {
  72   if (PrintGCDetails && Verbose) {
  73     gclog_or_tty->print(" "  SIZE_FORMAT
  74                         "->" SIZE_FORMAT
  75                         "("  SIZE_FORMAT ")",
  76                         prev_used, used(), capacity());
  77   } else {
  78     gclog_or_tty->print(" "  SIZE_FORMAT "K"
  79                         "->" SIZE_FORMAT "K"
  80                         "("  SIZE_FORMAT "K)",
  81                         prev_used / K, used() / K, capacity() / K);
  82   }
  83 }
  84 
  85 // By default we get a single threaded default reference processor;
  86 // generations needing multi-threaded refs processing or discovery override this method.
  87 void Generation::ref_processor_init() {
  88   assert(_ref_processor == NULL, "a reference processor already exists");
  89   assert(!_reserved.is_empty(), "empty generation?");
  90   _ref_processor = new ReferenceProcessor(_reserved);    // a vanilla reference processor
  91   if (_ref_processor == NULL) {
  92     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
  93   }
  94 }
  95 
  96 void Generation::print() const { print_on(tty); }
  97 
  98 void Generation::print_on(outputStream* st)  const {
  99   st->print(" %-20s", name());
 100   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
 101              capacity()/K, used()/K);
 102   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 103               _virtual_space.low_boundary(),
 104               _virtual_space.high(),
 105               _virtual_space.high_boundary());
 106 }
 107 
 108 void Generation::print_summary_info() { print_summary_info_on(tty); }
 109 
 110 void Generation::print_summary_info_on(outputStream* st) {
 111   StatRecord* sr = stat_record();
 112   double time = sr->accumulated_time.seconds();
 113   st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
 114                "%d GC's, avg GC time %3.7f]",
 115                level(), time, sr->invocations,
 116                sr->invocations > 0 ? time / sr->invocations : 0.0);
 117 }
 118 
 119 // Utility iterator classes
 120 
 121 class GenerationIsInReservedClosure : public SpaceClosure {
 122  public:
 123   const void* _p;
 124   Space* sp;
 125   virtual void do_space(Space* s) {
 126     if (sp == NULL) {
 127       if (s->is_in_reserved(_p)) sp = s;
 128     }
 129   }
 130   GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
 131 };
 132 
 133 class GenerationIsInClosure : public SpaceClosure {
 134  public:
 135   const void* _p;
 136   Space* sp;
 137   virtual void do_space(Space* s) {
 138     if (sp == NULL) {
 139       if (s->is_in(_p)) sp = s;
 140     }
 141   }
 142   GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
 143 };
 144 
 145 bool Generation::is_in(const void* p) const {
 146   GenerationIsInClosure blk(p);
 147   ((Generation*)this)->space_iterate(&blk);
 148   return blk.sp != NULL;
 149 }
 150 
 151 DefNewGeneration* Generation::as_DefNewGeneration() {
 152   assert((kind() == Generation::DefNew) ||
 153          (kind() == Generation::ParNew) ||
 154          (kind() == Generation::ASParNew),
 155     "Wrong youngest generation type");
 156   return (DefNewGeneration*) this;
 157 }
 158 
 159 Generation* Generation::next_gen() const {
 160   GenCollectedHeap* gch = GenCollectedHeap::heap();
 161   int next = level() + 1;
 162   if (next < gch->_n_gens) {
 163     return gch->_gens[next];
 164   } else {
 165     return NULL;
 166   }
 167 }
 168 
 169 size_t Generation::max_contiguous_available() const {
 170   // The largest number of contiguous free words in this or any higher generation.
 171   size_t max = 0;
 172   for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
 173     size_t avail = gen->contiguous_available();
 174     if (avail > max) {
 175       max = avail;
 176     }
 177   }
 178   return max;
 179 }
 180 
 181 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 182   size_t available = max_contiguous_available();
 183   bool   res = (available >= max_promotion_in_bytes);
 184   if (PrintGC && Verbose) {
 185     gclog_or_tty->print_cr(
 186       "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
 187       res? "":" not", available, res? ">=":"<",
 188       max_promotion_in_bytes);
 189   }
 190   return res;
 191 }
 192 
 193 // Ignores "ref" and calls allocate().
 194 oop Generation::promote(oop obj, size_t obj_size) {
 195   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 196 
 197 #ifndef PRODUCT
 198   if (Universe::heap()->promotion_should_fail()) {
 199     return NULL;
 200   }
 201 #endif  // #ifndef PRODUCT
 202 
 203   HeapWord* result = allocate(obj_size, false);
 204   if (result != NULL) {
 205     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
 206     return oop(result);
 207   } else {
 208     GenCollectedHeap* gch = GenCollectedHeap::heap();
 209     return gch->handle_failed_promotion(this, obj, obj_size);
 210   }
 211 }
 212 
 213 oop Generation::par_promote(int thread_num,
 214                             oop obj, markOop m, size_t word_sz) {
 215   // Could do a bad general impl here that gets a lock.  But no.
 216   ShouldNotCallThis();
 217   return NULL;
 218 }
 219 
 220 void Generation::par_promote_alloc_undo(int thread_num,
 221                                         HeapWord* obj, size_t word_sz) {
 222   // Could do a bad general impl here that gets a lock.  But no.
 223   guarantee(false, "No good general implementation.");
 224 }
 225 
 226 Space* Generation::space_containing(const void* p) const {
 227   GenerationIsInReservedClosure blk(p);
 228   // Cast away const
 229   ((Generation*)this)->space_iterate(&blk);
 230   return blk.sp;
 231 }
 232 
 233 // Some of these are mediocre general implementations.  Should be
 234 // overridden to get better performance.
 235 
 236 class GenerationBlockStartClosure : public SpaceClosure {
 237  public:
 238   const void* _p;
 239   HeapWord* _start;
 240   virtual void do_space(Space* s) {
 241     if (_start == NULL && s->is_in_reserved(_p)) {
 242       _start = s->block_start(_p);
 243     }
 244   }
 245   GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
 246 };
 247 
 248 HeapWord* Generation::block_start(const void* p) const {
 249   GenerationBlockStartClosure blk(p);
 250   // Cast away const
 251   ((Generation*)this)->space_iterate(&blk);
 252   return blk._start;
 253 }
 254 
 255 class GenerationBlockSizeClosure : public SpaceClosure {
 256  public:
 257   const HeapWord* _p;
 258   size_t size;
 259   virtual void do_space(Space* s) {
 260     if (size == 0 && s->is_in_reserved(_p)) {
 261       size = s->block_size(_p);
 262     }
 263   }
 264   GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
 265 };
 266 
 267 size_t Generation::block_size(const HeapWord* p) const {
 268   GenerationBlockSizeClosure blk(p);
 269   // Cast away const
 270   ((Generation*)this)->space_iterate(&blk);
 271   assert(blk.size > 0, "seems reasonable");
 272   return blk.size;
 273 }
 274 
 275 class GenerationBlockIsObjClosure : public SpaceClosure {
 276  public:
 277   const HeapWord* _p;
 278   bool is_obj;
 279   virtual void do_space(Space* s) {
 280     if (!is_obj && s->is_in_reserved(_p)) {
 281       is_obj |= s->block_is_obj(_p);
 282     }
 283   }
 284   GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
 285 };
 286 
 287 bool Generation::block_is_obj(const HeapWord* p) const {
 288   GenerationBlockIsObjClosure blk(p);
 289   // Cast away const
 290   ((Generation*)this)->space_iterate(&blk);
 291   return blk.is_obj;
 292 }
 293 
 294 class GenerationOopIterateClosure : public SpaceClosure {
 295  public:
 296   OopClosure* cl;
 297   MemRegion mr;
 298   virtual void do_space(Space* s) {
 299     s->oop_iterate(mr, cl);
 300   }
 301   GenerationOopIterateClosure(OopClosure* _cl, MemRegion _mr) :
 302     cl(_cl), mr(_mr) {}
 303 };
 304 
 305 void Generation::oop_iterate(OopClosure* cl) {
 306   GenerationOopIterateClosure blk(cl, _reserved);
 307   space_iterate(&blk);
 308 }
 309 
 310 void Generation::oop_iterate(MemRegion mr, OopClosure* cl) {
 311   GenerationOopIterateClosure blk(cl, mr);
 312   space_iterate(&blk);
 313 }
 314 
 315 void Generation::younger_refs_in_space_iterate(Space* sp,
 316                                                OopsInGenClosure* cl) {
 317   GenRemSet* rs = SharedHeap::heap()->rem_set();
 318   rs->younger_refs_in_space_iterate(sp, cl);
 319 }
 320 
 321 class GenerationObjIterateClosure : public SpaceClosure {
 322  private:
 323   ObjectClosure* _cl;
 324  public:
 325   virtual void do_space(Space* s) {
 326     s->object_iterate(_cl);
 327   }
 328   GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
 329 };
 330 
 331 void Generation::object_iterate(ObjectClosure* cl) {
 332   GenerationObjIterateClosure blk(cl);
 333   space_iterate(&blk);
 334 }
 335 
 336 class GenerationSafeObjIterateClosure : public SpaceClosure {
 337  private:
 338   ObjectClosure* _cl;
 339  public:
 340   virtual void do_space(Space* s) {
 341     s->safe_object_iterate(_cl);
 342   }
 343   GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
 344 };
 345 
 346 void Generation::safe_object_iterate(ObjectClosure* cl) {
 347   GenerationSafeObjIterateClosure blk(cl);
 348   space_iterate(&blk);
 349 }
 350 
 351 void Generation::prepare_for_compaction(CompactPoint* cp) {
 352   // Generic implementation, can be specialized
 353   CompactibleSpace* space = first_compaction_space();
 354   while (space != NULL) {
 355     space->prepare_for_compaction(cp);
 356     space = space->next_compaction_space();
 357   }
 358 }
 359 
 360 class AdjustPointersClosure: public SpaceClosure {
 361  public:
 362   void do_space(Space* sp) {
 363     sp->adjust_pointers();
 364   }
 365 };
 366 
 367 void Generation::adjust_pointers() {
 368   // Note that this is done over all spaces, not just the compactible
 369   // ones.
 370   AdjustPointersClosure blk;
 371   space_iterate(&blk, true);
 372 }
 373 
 374 void Generation::compact() {
 375   CompactibleSpace* sp = first_compaction_space();
 376   while (sp != NULL) {
 377     sp->compact();
 378     sp = sp->next_compaction_space();
 379   }
 380 }
 381 
 382 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
 383                                int level,
 384                                GenRemSet* remset) :
 385   Generation(rs, initial_byte_size, level), _rs(remset)
 386 {
 387   HeapWord* start = (HeapWord*)rs.base();
 388   size_t reserved_byte_size = rs.size();
 389   assert((uintptr_t(start) & 3) == 0, "bad alignment");
 390   assert((reserved_byte_size & 3) == 0, "bad alignment");
 391   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
 392   _bts = new BlockOffsetSharedArray(reserved_mr,
 393                                     heap_word_size(initial_byte_size));
 394   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
 395   _rs->resize_covered_region(committed_mr);
 396   if (_bts == NULL)
 397     vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
 398 
 399   // Verify that the start and end of this generation is the start of a card.
 400   // If this wasn't true, a single card could span more than on generation,
 401   // which would cause problems when we commit/uncommit memory, and when we
 402   // clear and dirty cards.
 403   guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
 404   if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
 405     // Don't check at the very end of the heap as we'll assert that we're probing off
 406     // the end if we try.
 407     guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
 408   }
 409 }
 410 
 411 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
 412   assert_locked_or_safepoint(Heap_lock);
 413   if (bytes == 0) {
 414     return true;  // That's what grow_by(0) would return
 415   }
 416   size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
 417   if (aligned_bytes == 0){
 418     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 419     // return true with the implication that an expansion was done when it
 420     // was not.  A call to expand implies a best effort to expand by "bytes"
 421     // but not a guarantee.  Align down to give a best effort.  This is likely
 422     // the most that the generation can expand since it has some capacity to
 423     // start with.
 424     aligned_bytes = ReservedSpace::page_align_size_down(bytes);
 425   }
 426   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
 427   bool success = false;
 428   if (aligned_expand_bytes > aligned_bytes) {
 429     success = grow_by(aligned_expand_bytes);
 430   }
 431   if (!success) {
 432     success = grow_by(aligned_bytes);
 433   }
 434   if (!success) {
 435     success = grow_to_reserved();
 436   }
 437   if (PrintGC && Verbose) {
 438     if (success && GC_locker::is_active()) {
 439       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
 440     }
 441   }
 442 
 443   return success;
 444 }
 445 
 446 
 447 // No young generation references, clear this generation's cards.
 448 void CardGeneration::clear_remembered_set() {
 449   _rs->clear(reserved());
 450 }
 451 
 452 
 453 // Objects in this generation may have moved, invalidate this
 454 // generation's cards.
 455 void CardGeneration::invalidate_remembered_set() {
 456   _rs->invalidate(used_region());
 457 }
 458 
 459 
 460 // Currently nothing to do.
 461 void CardGeneration::prepare_for_verify() {}
 462 
 463 
 464 void OneContigSpaceCardGeneration::collect(bool   full,
 465                                            bool   clear_all_soft_refs,
 466                                            size_t size,
 467                                            bool   is_tlab) {
 468   SpecializationStats::clear();
 469   // Temporarily expand the span of our ref processor, so
 470   // refs discovery is over the entire heap, not just this generation
 471   ReferenceProcessorSpanMutator
 472     x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
 473   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
 474   SpecializationStats::print();
 475 }
 476 
 477 HeapWord*
 478 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
 479                                                   bool is_tlab,
 480                                                   bool parallel) {
 481   assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
 482   if (parallel) {
 483     MutexLocker x(ParGCRareEvent_lock);
 484     HeapWord* result = NULL;
 485     size_t byte_size = word_size * HeapWordSize;
 486     while (true) {
 487       expand(byte_size, _min_heap_delta_bytes);
 488       if (GCExpandToAllocateDelayMillis > 0) {
 489         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 490       }
 491       result = _the_space->par_allocate(word_size);
 492       if ( result != NULL) {
 493         return result;
 494       } else {
 495         // If there's not enough expansion space available, give up.
 496         if (_virtual_space.uncommitted_size() < byte_size) {
 497           return NULL;
 498         }
 499         // else try again
 500       }
 501     }
 502   } else {
 503     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
 504     return _the_space->allocate(word_size);
 505   }
 506 }
 507 
 508 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
 509   GCMutexLocker x(ExpandHeap_lock);
 510   return CardGeneration::expand(bytes, expand_bytes);
 511 }
 512 
 513 
 514 void OneContigSpaceCardGeneration::shrink(size_t bytes) {
 515   assert_locked_or_safepoint(ExpandHeap_lock);
 516   size_t size = ReservedSpace::page_align_size_down(bytes);
 517   if (size > 0) {
 518     shrink_by(size);
 519   }
 520 }
 521 
 522 
 523 size_t OneContigSpaceCardGeneration::capacity() const {
 524   return _the_space->capacity();
 525 }
 526 
 527 
 528 size_t OneContigSpaceCardGeneration::used() const {
 529   return _the_space->used();
 530 }
 531 
 532 
 533 size_t OneContigSpaceCardGeneration::free() const {
 534   return _the_space->free();
 535 }
 536 
 537 MemRegion OneContigSpaceCardGeneration::used_region() const {
 538   return the_space()->used_region();
 539 }
 540 
 541 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
 542   return _the_space->free();
 543 }
 544 
 545 size_t OneContigSpaceCardGeneration::contiguous_available() const {
 546   return _the_space->free() + _virtual_space.uncommitted_size();
 547 }
 548 
 549 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
 550   assert_locked_or_safepoint(ExpandHeap_lock);
 551   bool result = _virtual_space.expand_by(bytes);
 552   if (result) {
 553     size_t new_word_size =
 554        heap_word_size(_virtual_space.committed_size());
 555     MemRegion mr(_the_space->bottom(), new_word_size);
 556     // Expand card table
 557     Universe::heap()->barrier_set()->resize_covered_region(mr);
 558     // Expand shared block offset array
 559     _bts->resize(new_word_size);
 560 
 561     // Fix for bug #4668531
 562     if (ZapUnusedHeapArea) {
 563       MemRegion mangle_region(_the_space->end(),
 564       (HeapWord*)_virtual_space.high());
 565       SpaceMangler::mangle_region(mangle_region);
 566     }
 567 
 568     // Expand space -- also expands space's BOT
 569     // (which uses (part of) shared array above)
 570     _the_space->set_end((HeapWord*)_virtual_space.high());
 571 
 572     // update the space and generation capacity counters
 573     update_counters();
 574 
 575     if (Verbose && PrintGC) {
 576       size_t new_mem_size = _virtual_space.committed_size();
 577       size_t old_mem_size = new_mem_size - bytes;
 578       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
 579                       SIZE_FORMAT "K to " SIZE_FORMAT "K",
 580                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
 581     }
 582   }
 583   return result;
 584 }
 585 
 586 
 587 bool OneContigSpaceCardGeneration::grow_to_reserved() {
 588   assert_locked_or_safepoint(ExpandHeap_lock);
 589   bool success = true;
 590   const size_t remaining_bytes = _virtual_space.uncommitted_size();
 591   if (remaining_bytes > 0) {
 592     success = grow_by(remaining_bytes);
 593     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
 594   }
 595   return success;
 596 }
 597 
 598 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
 599   assert_locked_or_safepoint(ExpandHeap_lock);
 600   // Shrink committed space
 601   _virtual_space.shrink_by(bytes);
 602   // Shrink space; this also shrinks the space's BOT
 603   _the_space->set_end((HeapWord*) _virtual_space.high());
 604   size_t new_word_size = heap_word_size(_the_space->capacity());
 605   // Shrink the shared block offset array
 606   _bts->resize(new_word_size);
 607   MemRegion mr(_the_space->bottom(), new_word_size);
 608   // Shrink the card table
 609   Universe::heap()->barrier_set()->resize_covered_region(mr);
 610 
 611   if (Verbose && PrintGC) {
 612     size_t new_mem_size = _virtual_space.committed_size();
 613     size_t old_mem_size = new_mem_size + bytes;
 614     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 615                   name(), old_mem_size/K, new_mem_size/K);
 616   }
 617 }
 618 
 619 // Currently nothing to do.
 620 void OneContigSpaceCardGeneration::prepare_for_verify() {}
 621 
 622 
 623 // Override for a card-table generation with one contiguous
 624 // space. NOTE: For reasons that are lost in the fog of history,
 625 // this code is used when you iterate over perm gen objects,
 626 // even when one uses CDS, where the perm gen has a couple of
 627 // other spaces; this is because CompactingPermGenGen derives
 628 // from OneContigSpaceCardGeneration. This should be cleaned up,
 629 // see CR 6897789..
 630 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
 631   _the_space->object_iterate(blk);
 632 }
 633 
 634 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
 635                                                  bool usedOnly) {
 636   blk->do_space(_the_space);
 637 }
 638 
 639 void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
 640   // Deal with delayed initialization of _the_space,
 641   // and lack of initialization of _last_gc.
 642   if (_last_gc.space() == NULL) {
 643     assert(the_space() != NULL, "shouldn't be NULL");
 644     _last_gc = the_space()->bottom_mark();
 645   }
 646   the_space()->object_iterate_from(_last_gc, blk);
 647 }
 648 
 649 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
 650   blk->set_generation(this);
 651   younger_refs_in_space_iterate(_the_space, blk);
 652   blk->reset_generation();
 653 }
 654 
 655 void OneContigSpaceCardGeneration::save_marks() {
 656   _the_space->set_saved_mark();
 657 }
 658 
 659 
 660 void OneContigSpaceCardGeneration::reset_saved_marks() {
 661   _the_space->reset_saved_mark();
 662 }
 663 
 664 
 665 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
 666   return _the_space->saved_mark_at_top();
 667 }
 668 
 669 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
 670                                                                                 \
 671 void OneContigSpaceCardGeneration::                                             \
 672 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
 673   blk->set_generation(this);                                                    \
 674   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
 675   blk->reset_generation();                                                      \
 676   save_marks();                                                                 \
 677 }
 678 
 679 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
 680 
 681 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
 682 
 683 
 684 void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
 685   _last_gc = WaterMark(the_space(), the_space()->top());
 686 
 687   // update the generation and space performance counters
 688   update_counters();
 689   if (ZapUnusedHeapArea) {
 690     the_space()->check_mangled_unused_area_complete();
 691   }
 692 }
 693 
 694 void OneContigSpaceCardGeneration::record_spaces_top() {
 695   assert(ZapUnusedHeapArea, "Not mangling unused space");
 696   the_space()->set_top_for_allocations();
 697 }
 698 
 699 void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
 700   the_space()->verify(allow_dirty);
 701 }
 702 
 703 void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
 704   Generation::print_on(st);
 705   st->print("   the");
 706   the_space()->print_on(st);
 707 }