1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)psOldGen.cpp 1.54 07/05/05 17:05:28 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 # include "incls/_precompiled.incl"
  29 # include "incls/_psOldGen.cpp.incl"
  30 
  31 inline const char* PSOldGen::select_name() {
  32   return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
  33 }
  34 
  35 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
  36                    size_t initial_size, size_t min_size, size_t max_size,
  37                    const char* perf_data_name, int level):
  38   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  39   _max_gen_size(max_size)
  40 {
  41   initialize(rs, alignment, perf_data_name, level);
  42 }
  43 
  44 PSOldGen::PSOldGen(size_t initial_size,
  45                    size_t min_size, size_t max_size,
  46                    const char* perf_data_name, int level):
  47   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  48   _max_gen_size(max_size)
  49 {}
  50 
  51 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
  52                           const char* perf_data_name, int level) {
  53   initialize_virtual_space(rs, alignment);
  54   initialize_work(perf_data_name, level);
  55   // The old gen can grow to gen_size_limit().  _reserve reflects only
  56   // the current maximum that can be committed.
  57   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  58 }
  59 
  60 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
  61 
  62   _virtual_space = new PSVirtualSpace(rs, alignment);
  63   if (!_virtual_space->expand_by(_init_gen_size)) {
  64     vm_exit_during_initialization("Could not reserve enough space for "
  65                                   "object heap");
  66   }
  67 }
  68 
  69 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
  70   //
  71   // Basic memory initialization
  72   //
  73 
  74   MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
  75     heap_word_size(_max_gen_size));
  76   assert(limit_reserved.byte_size() == _max_gen_size, 
  77     "word vs bytes confusion");
  78   //
  79   // Object start stuff
  80   //
  81 
  82   start_array()->initialize(limit_reserved);
  83 
  84   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  85                         (HeapWord*)virtual_space()->high_boundary());
  86 
  87   //
  88   // Card table stuff
  89   //
  90 
  91   MemRegion cmr((HeapWord*)virtual_space()->low(),
  92                 (HeapWord*)virtual_space()->high());
  93   if (ZapUnusedHeapArea) {
  94     // Mangle newly committed space immediately rather than
  95     // waiting for the initialization of the space even though
  96     // mangling is related to spaces.  Doing it here eliminates
  97     // the need to carry along information that a complete mangling
  98     // (bottom to end) needs to be done.
  99     SpaceMangler::mangle_region(cmr);
 100   }
 101 
 102   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 103 
 104   CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
 105   assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
 106 
 107   // Verify that the start and end of this generation is the start of a card.
 108   // If this wasn't true, a single card could span more than one generation,
 109   // which would cause problems when we commit/uncommit memory, and when we
 110   // clear and dirty cards.
 111   guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
 112   if (_reserved.end() != Universe::heap()->reserved_region().end()) {
 113     // Don't check at the very end of the heap as we'll assert that we're probing off
 114     // the end if we try.
 115     guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
 116   }
 117 
 118   //
 119   // ObjectSpace stuff
 120   //
 121 
 122   _object_space = new MutableSpace();
 123   
 124   if (_object_space == NULL)
 125     vm_exit_during_initialization("Could not allocate an old gen space");
 126 
 127   object_space()->initialize(cmr,
 128                              SpaceDecorator::Clear,
 129                              SpaceDecorator::Mangle);
 130 
 131   _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
 132 
 133   if (_object_mark_sweep == NULL)
 134     vm_exit_during_initialization("Could not complete allocation of old generation");
 135 
 136   // Update the start_array
 137   start_array()->set_covered_region(cmr);
 138 
 139   // Generation Counters, generation 'level', 1 subspace
 140   _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
 141                                            virtual_space());
 142   _space_counters = new SpaceCounters(perf_data_name, 0,
 143                                       virtual_space()->reserved_size(),
 144                                       _object_space, _gen_counters);
 145 }
 146 
 147 // Assume that the generation has been allocated if its
 148 // reserved size is not 0.
 149 bool  PSOldGen::is_allocated() {
 150   return virtual_space()->reserved_size() != 0;
 151 }
 152 
 153 void PSOldGen::precompact() {
 154   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 155   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 156 
 157   // Reset start array first.
 158   start_array()->reset();
 159 
 160   object_mark_sweep()->precompact();
 161 
 162   // Now compact the young gen
 163   heap->young_gen()->precompact();
 164 }
 165 
 166 void PSOldGen::adjust_pointers() {
 167   object_mark_sweep()->adjust_pointers();
 168 }
 169 
 170 void PSOldGen::compact() {
 171   object_mark_sweep()->compact(ZapUnusedHeapArea);
 172 }
 173 
 174 void PSOldGen::move_and_update(ParCompactionManager* cm) {
 175   PSParallelCompact::move_and_update(cm, PSParallelCompact::old_space_id);
 176 }
 177 
 178 size_t PSOldGen::contiguous_available() const {
 179   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 180 }
 181 
 182 // Allocation. We report all successful allocations to the size policy
 183 // Note that the perm gen does not use this method, and should not!
 184 HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
 185   assert_locked_or_safepoint(Heap_lock);
 186   HeapWord* res = allocate_noexpand(word_size, is_tlab);
 187 
 188   if (res == NULL) {
 189     res = expand_and_allocate(word_size, is_tlab);
 190   }
 191 
 192   // Allocations in the old generation need to be reported
 193   if (res != NULL) {
 194     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 195     heap->size_policy()->tenured_allocation(word_size);
 196   }
 197 
 198   return res;
 199 }
 200 
 201 HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) {
 202   assert(!is_tlab, "TLAB's are not supported in PSOldGen");
 203   expand(word_size*HeapWordSize);
 204   if (GCExpandToAllocateDelayMillis > 0) {
 205     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 206   }
 207   return allocate_noexpand(word_size, is_tlab);
 208 }
 209 
 210 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
 211   expand(word_size*HeapWordSize);
 212   if (GCExpandToAllocateDelayMillis > 0) {
 213     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 214   }
 215   return cas_allocate_noexpand(word_size);
 216 }
 217 
 218 void PSOldGen::expand(size_t bytes) {
 219   if (bytes == 0) {
 220     return;
 221   }
 222   MutexLocker x(ExpandHeap_lock);
 223   const size_t alignment = virtual_space()->alignment();
 224   size_t aligned_bytes  = align_size_up(bytes, alignment);
 225   size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
 226   if (aligned_bytes == 0){
 227     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 228     // return true with the implication that and expansion was done when it
 229     // was not.  A call to expand implies a best effort to expand by "bytes"
 230     // but not a guarantee.  Align down to give a best effort.  This is likely
 231     // the most that the generation can expand since it has some capacity to
 232     // start with.
 233     aligned_bytes = align_size_down(bytes, alignment);
 234   }
 235 
 236   bool success = false;
 237   if (aligned_expand_bytes > aligned_bytes) {
 238     success = expand_by(aligned_expand_bytes);
 239   }
 240   if (!success) {
 241     success = expand_by(aligned_bytes);
 242   }
 243   if (!success) {
 244     success = expand_to_reserved();
 245   }
 246 
 247   if (PrintGC && Verbose) {
 248     if (success && GC_locker::is_active()) {
 249       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
 250     }
 251   }
 252 }
 253 
 254 bool PSOldGen::expand_by(size_t bytes) {
 255   assert_lock_strong(ExpandHeap_lock);
 256   assert_locked_or_safepoint(Heap_lock);
 257   if (bytes == 0) {
 258     return true;  // That's what virtual_space()->expand_by(0) would return
 259   }
 260   bool result = virtual_space()->expand_by(bytes);
 261   if (result) {
 262     if (ZapUnusedHeapArea) {
 263       // We need to mangle the newly expanded area. The memregion spans
 264       // end -> new_end, we assume that top -> end is already mangled.
 265       // Do the mangling before post_resize() is called because
 266       // the space is available for allocation after post_resize();
 267       HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
 268       assert(object_space()->end() < virtual_space_high,
 269         "Should be true before post_resize()");
 270       MemRegion mangle_region(object_space()->end(), virtual_space_high);
 271       // Note that the object space has not yet been updated to
 272       // coincede with the new underlying virtual space.
 273       SpaceMangler::mangle_region(mangle_region);
 274     }
 275     post_resize();
 276     if (UsePerfData) {
 277       _space_counters->update_capacity();
 278       _gen_counters->update_all();
 279     }
 280   }
 281 
 282   if (result && Verbose && PrintGC) {
 283     size_t new_mem_size = virtual_space()->committed_size();
 284     size_t old_mem_size = new_mem_size - bytes;
 285     gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " 
 286                                        SIZE_FORMAT "K to " 
 287                                        SIZE_FORMAT "K",
 288                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
 289   }
 290 
 291   return result;
 292 }
 293 
 294 bool PSOldGen::expand_to_reserved() {
 295   assert_lock_strong(ExpandHeap_lock);
 296   assert_locked_or_safepoint(Heap_lock);
 297 
 298   bool result = true;
 299   const size_t remaining_bytes = virtual_space()->uncommitted_size();
 300   if (remaining_bytes > 0) {
 301     result = expand_by(remaining_bytes);
 302     DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
 303   }
 304   return result;
 305 }
 306 
 307 void PSOldGen::shrink(size_t bytes) {
 308   assert_lock_strong(ExpandHeap_lock);
 309   assert_locked_or_safepoint(Heap_lock);
 310 
 311   size_t size = align_size_down(bytes, virtual_space()->alignment());
 312   if (size > 0) {
 313     assert_lock_strong(ExpandHeap_lock);
 314     virtual_space()->shrink_by(bytes);
 315     post_resize();
 316 
 317     if (Verbose && PrintGC) {
 318       size_t new_mem_size = virtual_space()->committed_size();
 319       size_t old_mem_size = new_mem_size + bytes;
 320       gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " 
 321                                          SIZE_FORMAT "K to " 
 322                                          SIZE_FORMAT "K",
 323                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
 324     }
 325   }
 326 }
 327 
 328 void PSOldGen::resize(size_t desired_free_space) {
 329   const size_t alignment = virtual_space()->alignment();
 330   const size_t size_before = virtual_space()->committed_size();
 331   size_t new_size = used_in_bytes() + desired_free_space;
 332   if (new_size < used_in_bytes()) {
 333     // Overflowed the addition.
 334     new_size = gen_size_limit();
 335   }
 336   // Adjust according to our min and max
 337   new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
 338 
 339   assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
 340   new_size = align_size_up(new_size, alignment);
 341 
 342   const size_t current_size = capacity_in_bytes();
 343 
 344   if (PrintAdaptiveSizePolicy && Verbose) {
 345     gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
 346       "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
 347       " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
 348       " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
 349       desired_free_space, used_in_bytes(), new_size, current_size,
 350       gen_size_limit(), min_gen_size());
 351   }
 352 
 353   if (new_size == current_size) {
 354     // No change requested
 355     return;
 356   }
 357   if (new_size > current_size) {
 358     size_t change_bytes = new_size - current_size;
 359     expand(change_bytes);
 360   } else {
 361     size_t change_bytes = current_size - new_size;
 362     // shrink doesn't grab this lock, expand does. Is that right?
 363     MutexLocker x(ExpandHeap_lock);
 364     shrink(change_bytes);
 365   }
 366 
 367   if (PrintAdaptiveSizePolicy) {
 368     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 369     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 370     gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
 371                   "collection: %d "
 372                   "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
 373                   heap->total_collections(),
 374                   size_before, virtual_space()->committed_size());
 375   }
 376 }
 377 
 378 // NOTE! We need to be careful about resizing. During a GC, multiple
 379 // allocators may be active during heap expansion. If we allow the
 380 // heap resizing to become visible before we have correctly resized
 381 // all heap related data structures, we may cause program failures.
 382 void PSOldGen::post_resize() {
 383   // First construct a memregion representing the new size
 384   MemRegion new_memregion((HeapWord*)virtual_space()->low(), 
 385     (HeapWord*)virtual_space()->high());
 386   size_t new_word_size = new_memregion.word_size();
 387 
 388   start_array()->set_covered_region(new_memregion);
 389   Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
 390 
 391   HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
 392 
 393   // ALWAYS do this last!!
 394   object_space()->set_end(virtual_space_high);
 395 
 396   assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
 397     "Sanity");
 398 }
 399 
 400 size_t PSOldGen::gen_size_limit() { 
 401   return _max_gen_size;
 402 }
 403 
 404 void PSOldGen::reset_after_change() {
 405   ShouldNotReachHere();
 406   return;
 407 }
 408 
 409 size_t PSOldGen::available_for_expansion() {
 410   ShouldNotReachHere();
 411   return 0;
 412 }
 413 
 414 size_t PSOldGen::available_for_contraction() {
 415   ShouldNotReachHere();
 416   return 0;
 417 }
 418 
 419 void PSOldGen::print() const { print_on(tty);}
 420 void PSOldGen::print_on(outputStream* st) const {
 421   st->print(" %-15s", name());
 422   if (PrintGCDetails && Verbose) {
 423     st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT, 
 424                 capacity_in_bytes(), used_in_bytes());
 425   } else {
 426     st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 
 427                 capacity_in_bytes()/K, used_in_bytes()/K);
 428   }
 429   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 430                 virtual_space()->low_boundary(),
 431                 virtual_space()->high(),
 432                 virtual_space()->high_boundary());
 433 
 434   st->print("  object"); object_space()->print_on(st);
 435 }
 436 
 437 void PSOldGen::print_used_change(size_t prev_used) const {
 438   gclog_or_tty->print(" [%s:", name());
 439   gclog_or_tty->print(" "  SIZE_FORMAT "K"
 440                       "->" SIZE_FORMAT "K"
 441                       "("  SIZE_FORMAT "K)",
 442                       prev_used / K, used_in_bytes() / K,
 443                       capacity_in_bytes() / K);
 444   gclog_or_tty->print("]");
 445 }
 446 
 447 void PSOldGen::update_counters() {
 448   if (UsePerfData) {
 449     _space_counters->update_all();
 450     _gen_counters->update_all();
 451   }
 452 }
 453 
 454 #ifndef PRODUCT
 455 
 456 void PSOldGen::space_invariants() {
 457   assert(object_space()->end() == (HeapWord*) virtual_space()->high(), 
 458     "Space invariant");
 459   assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(), 
 460     "Space invariant");
 461   assert(virtual_space()->low_boundary() <= virtual_space()->low(), 
 462     "Space invariant");
 463   assert(virtual_space()->high_boundary() >= virtual_space()->high(), 
 464     "Space invariant");
 465   assert(virtual_space()->low_boundary() == (char*) _reserved.start(), 
 466     "Space invariant");
 467   assert(virtual_space()->high_boundary() == (char*) _reserved.end(), 
 468     "Space invariant");
 469   assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
 470     "Space invariant");
 471 }
 472 #endif
 473 
 474 void PSOldGen::verify(bool allow_dirty) {
 475   object_space()->verify(allow_dirty);
 476 }
 477 class VerifyObjectStartArrayClosure : public ObjectClosure {
 478   PSOldGen* _gen;
 479   ObjectStartArray* _start_array;
 480 
 481  public:
 482   VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
 483     _gen(gen), _start_array(start_array) { }
 484 
 485   virtual void do_object(oop obj) {
 486     HeapWord* test_addr = (HeapWord*)obj + 1;
 487     guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
 488     guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
 489   }
 490 };
 491 
 492 void PSOldGen::verify_object_start_array() {
 493   VerifyObjectStartArrayClosure check( this, &_start_array );
 494   object_iterate(&check);
 495 }
 496 
 497 #ifndef PRODUCT
 498 void PSOldGen::record_spaces_top() {
 499   assert(ZapUnusedHeapArea, "Not mangling unused space");
 500   object_space()->set_top_for_allocations();
 501 }
 502 #endif