1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/parallel/parallelScavengeHeap.hpp"
  27 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  28 #include "gc/parallel/psMarkSweepDecorator.hpp"
  29 #include "gc/parallel/psOldGen.hpp"
  30 #include "gc/shared/cardTableModRefBS.hpp"
  31 #include "gc/shared/gcLocker.inline.hpp"
  32 #include "gc/shared/spaceDecorator.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/java.hpp"
  35 
  36 inline const char* PSOldGen::select_name() {
  37   return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
  38 }
  39 
  40 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
  41                    size_t initial_size, size_t min_size, size_t max_size,
  42                    const char* perf_data_name, int level):
  43   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  44   _max_gen_size(max_size)
  45 {
  46   initialize(rs, alignment, perf_data_name, level);
  47 }
  48 
  49 PSOldGen::PSOldGen(size_t initial_size,
  50                    size_t min_size, size_t max_size,
  51                    const char* perf_data_name, int level):
  52   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  53   _max_gen_size(max_size)
  54 {}
  55 
  56 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
  57                           const char* perf_data_name, int level) {
  58   initialize_virtual_space(rs, alignment);
  59   initialize_work(perf_data_name, level);
  60 
  61   // The old gen can grow to gen_size_limit().  _reserve reflects only
  62   // the current maximum that can be committed.
  63   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  64 
  65   initialize_performance_counters(perf_data_name, level);
  66 }
  67 
  68 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
  69 
  70   _virtual_space = new PSVirtualSpace(rs, alignment);
  71   if (!_virtual_space->expand_by(_init_gen_size)) {
  72     vm_exit_during_initialization("Could not reserve enough space for "
  73                                   "object heap");
  74   }
  75 }
  76 
  77 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
  78   //
  79   // Basic memory initialization
  80   //
  81 
  82   MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
  83     heap_word_size(_max_gen_size));
  84   assert(limit_reserved.byte_size() == _max_gen_size,
  85     "word vs bytes confusion");
  86   //
  87   // Object start stuff
  88   //
  89 
  90   start_array()->initialize(limit_reserved);
  91 
  92   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  93                         (HeapWord*)virtual_space()->high_boundary());
  94 
  95   //
  96   // Card table stuff
  97   //
  98 
  99   MemRegion cmr((HeapWord*)virtual_space()->low(),
 100                 (HeapWord*)virtual_space()->high());
 101   if (ZapUnusedHeapArea) {
 102     // Mangle newly committed space immediately rather than
 103     // waiting for the initialization of the space even though
 104     // mangling is related to spaces.  Doing it here eliminates
 105     // the need to carry along information that a complete mangling
 106     // (bottom to end) needs to be done.
 107     SpaceMangler::mangle_region(cmr);
 108   }
 109 
 110   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 111   BarrierSet* bs = heap->barrier_set();
 112 
 113   bs->resize_covered_region(cmr);
 114 
 115   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
 116 
 117   // Verify that the start and end of this generation is the start of a card.
 118   // If this wasn't true, a single card could span more than one generation,
 119   // which would cause problems when we commit/uncommit memory, and when we
 120   // clear and dirty cards.
 121   guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
 122   if (_reserved.end() != heap->reserved_region().end()) {
 123     // Don't check at the very end of the heap as we'll assert that we're probing off
 124     // the end if we try.
 125     guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
 126   }
 127 
 128   //
 129   // ObjectSpace stuff
 130   //
 131 
 132   _object_space = new MutableSpace(virtual_space()->alignment());
 133 
 134   if (_object_space == NULL)
 135     vm_exit_during_initialization("Could not allocate an old gen space");
 136 
 137   object_space()->initialize(cmr,
 138                              SpaceDecorator::Clear,
 139                              SpaceDecorator::Mangle);
 140 
 141   _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
 142 
 143   if (_object_mark_sweep == NULL)
 144     vm_exit_during_initialization("Could not complete allocation of old generation");
 145 
 146   // Update the start_array
 147   start_array()->set_covered_region(cmr);
 148 }
 149 
 150 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
 151   // Generation Counters, generation 'level', 1 subspace
 152   _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
 153                                            _max_gen_size, virtual_space());
 154   _space_counters = new SpaceCounters(perf_data_name, 0,
 155                                       virtual_space()->reserved_size(),
 156                                       _object_space, _gen_counters);
 157 }
 158 
 159 // Assume that the generation has been allocated if its
 160 // reserved size is not 0.
 161 bool  PSOldGen::is_allocated() {
 162   return virtual_space()->reserved_size() != 0;
 163 }
 164 
 165 void PSOldGen::precompact() {
 166   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 167 
 168   // Reset start array first.
 169   start_array()->reset();
 170 
 171   object_mark_sweep()->precompact();
 172 
 173   // Now compact the young gen
 174   heap->young_gen()->precompact();
 175 }
 176 
 177 void PSOldGen::adjust_pointers() {
 178   object_mark_sweep()->adjust_pointers();
 179 }
 180 
 181 void PSOldGen::compact() {
 182   object_mark_sweep()->compact(ZapUnusedHeapArea);
 183 }
 184 
 185 size_t PSOldGen::contiguous_available() const {
 186   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 187 }
 188 
 189 // Allocation. We report all successful allocations to the size policy
 190 // Note that the perm gen does not use this method, and should not!
 191 HeapWord* PSOldGen::allocate(size_t word_size) {
 192   assert_locked_or_safepoint(Heap_lock);
 193   HeapWord* res = allocate_noexpand(word_size);
 194 
 195   if (res == NULL) {
 196     res = expand_and_allocate(word_size);
 197   }
 198 
 199   // Allocations in the old generation need to be reported
 200   if (res != NULL) {
 201     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 202     heap->size_policy()->tenured_allocation(word_size * HeapWordSize);
 203   }
 204 
 205   return res;
 206 }
 207 
 208 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
 209   expand(word_size*HeapWordSize);
 210   if (GCExpandToAllocateDelayMillis > 0) {
 211     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 212   }
 213   return allocate_noexpand(word_size);
 214 }
 215 
 216 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
 217   expand(word_size*HeapWordSize);
 218   if (GCExpandToAllocateDelayMillis > 0) {
 219     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 220   }
 221   return cas_allocate_noexpand(word_size);
 222 }
 223 
 224 void PSOldGen::expand(size_t bytes) {
 225   if (bytes == 0) {
 226     return;
 227   }
 228   MutexLocker x(ExpandHeap_lock);
 229   const size_t alignment = virtual_space()->alignment();
 230   size_t aligned_bytes  = align_size_up(bytes, alignment);
 231   size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
 232 
 233   if (UseNUMA) {
 234     // With NUMA we use round-robin page allocation for the old gen. Expand by at least
 235     // providing a page per lgroup. Alignment is larger or equal to the page size.
 236     aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
 237   }
 238   if (aligned_bytes == 0){
 239     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 240     // return true with the implication that and expansion was done when it
 241     // was not.  A call to expand implies a best effort to expand by "bytes"
 242     // but not a guarantee.  Align down to give a best effort.  This is likely
 243     // the most that the generation can expand since it has some capacity to
 244     // start with.
 245     aligned_bytes = align_size_down(bytes, alignment);
 246   }
 247 
 248   bool success = false;
 249   if (aligned_expand_bytes > aligned_bytes) {
 250     success = expand_by(aligned_expand_bytes);
 251   }
 252   if (!success) {
 253     success = expand_by(aligned_bytes);
 254   }
 255   if (!success) {
 256     success = expand_to_reserved();
 257   }
 258 
 259   if (PrintGC && Verbose) {
 260     if (success && GC_locker::is_active_and_needs_gc()) {
 261       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
 262     }
 263   }
 264 }
 265 
 266 bool PSOldGen::expand_by(size_t bytes) {
 267   assert_lock_strong(ExpandHeap_lock);
 268   assert_locked_or_safepoint(Heap_lock);
 269   if (bytes == 0) {
 270     return true;  // That's what virtual_space()->expand_by(0) would return
 271   }
 272   bool result = virtual_space()->expand_by(bytes);
 273   if (result) {
 274     if (ZapUnusedHeapArea) {
 275       // We need to mangle the newly expanded area. The memregion spans
 276       // end -> new_end, we assume that top -> end is already mangled.
 277       // Do the mangling before post_resize() is called because
 278       // the space is available for allocation after post_resize();
 279       HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
 280       assert(object_space()->end() < virtual_space_high,
 281         "Should be true before post_resize()");
 282       MemRegion mangle_region(object_space()->end(), virtual_space_high);
 283       // Note that the object space has not yet been updated to
 284       // coincide with the new underlying virtual space.
 285       SpaceMangler::mangle_region(mangle_region);
 286     }
 287     post_resize();
 288     if (UsePerfData) {
 289       _space_counters->update_capacity();
 290       _gen_counters->update_all();
 291     }
 292   }
 293 
 294   if (result && Verbose && PrintGC) {
 295     size_t new_mem_size = virtual_space()->committed_size();
 296     size_t old_mem_size = new_mem_size - bytes;
 297     gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
 298                                        SIZE_FORMAT "K to "
 299                                        SIZE_FORMAT "K",
 300                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
 301   }
 302 
 303   return result;
 304 }
 305 
 306 bool PSOldGen::expand_to_reserved() {
 307   assert_lock_strong(ExpandHeap_lock);
 308   assert_locked_or_safepoint(Heap_lock);
 309 
 310   bool result = true;
 311   const size_t remaining_bytes = virtual_space()->uncommitted_size();
 312   if (remaining_bytes > 0) {
 313     result = expand_by(remaining_bytes);
 314     DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
 315   }
 316   return result;
 317 }
 318 
 319 void PSOldGen::shrink(size_t bytes) {
 320   assert_lock_strong(ExpandHeap_lock);
 321   assert_locked_or_safepoint(Heap_lock);
 322 
 323   size_t size = align_size_down(bytes, virtual_space()->alignment());
 324   if (size > 0) {
 325     assert_lock_strong(ExpandHeap_lock);
 326     virtual_space()->shrink_by(bytes);
 327     post_resize();
 328 
 329     if (Verbose && PrintGC) {
 330       size_t new_mem_size = virtual_space()->committed_size();
 331       size_t old_mem_size = new_mem_size + bytes;
 332       gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
 333                                          SIZE_FORMAT "K to "
 334                                          SIZE_FORMAT "K",
 335                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
 336     }
 337   }
 338 }
 339 
 340 void PSOldGen::resize(size_t desired_free_space) {
 341   const size_t alignment = virtual_space()->alignment();
 342   const size_t size_before = virtual_space()->committed_size();
 343   size_t new_size = used_in_bytes() + desired_free_space;
 344   if (new_size < used_in_bytes()) {
 345     // Overflowed the addition.
 346     new_size = gen_size_limit();
 347   }
 348   // Adjust according to our min and max
 349   new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
 350 
 351   assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
 352   new_size = align_size_up(new_size, alignment);
 353 
 354   const size_t current_size = capacity_in_bytes();
 355 
 356   if (PrintAdaptiveSizePolicy && Verbose) {
 357     gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
 358       "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
 359       " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
 360       " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
 361       desired_free_space, used_in_bytes(), new_size, current_size,
 362       gen_size_limit(), min_gen_size());
 363   }
 364 
 365   if (new_size == current_size) {
 366     // No change requested
 367     return;
 368   }
 369   if (new_size > current_size) {
 370     size_t change_bytes = new_size - current_size;
 371     expand(change_bytes);
 372   } else {
 373     size_t change_bytes = current_size - new_size;
 374     // shrink doesn't grab this lock, expand does. Is that right?
 375     MutexLocker x(ExpandHeap_lock);
 376     shrink(change_bytes);
 377   }
 378 
 379   if (PrintAdaptiveSizePolicy) {
 380     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 381     gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
 382                   "collection: %d "
 383                   "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
 384                   heap->total_collections(),
 385                   size_before, virtual_space()->committed_size());
 386   }
 387 }
 388 
 389 // NOTE! We need to be careful about resizing. During a GC, multiple
 390 // allocators may be active during heap expansion. If we allow the
 391 // heap resizing to become visible before we have correctly resized
 392 // all heap related data structures, we may cause program failures.
 393 void PSOldGen::post_resize() {
 394   // First construct a memregion representing the new size
 395   MemRegion new_memregion((HeapWord*)virtual_space()->low(),
 396     (HeapWord*)virtual_space()->high());
 397   size_t new_word_size = new_memregion.word_size();
 398 
 399   start_array()->set_covered_region(new_memregion);
 400   ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
 401 
 402   // ALWAYS do this last!!
 403   object_space()->initialize(new_memregion,
 404                              SpaceDecorator::DontClear,
 405                              SpaceDecorator::DontMangle);
 406 
 407   assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
 408     "Sanity");
 409 }
 410 
 411 size_t PSOldGen::gen_size_limit() {
 412   return _max_gen_size;
 413 }
 414 
 415 void PSOldGen::reset_after_change() {
 416   ShouldNotReachHere();
 417   return;
 418 }
 419 
 420 size_t PSOldGen::available_for_expansion() {
 421   ShouldNotReachHere();
 422   return 0;
 423 }
 424 
 425 size_t PSOldGen::available_for_contraction() {
 426   ShouldNotReachHere();
 427   return 0;
 428 }
 429 
 430 void PSOldGen::print() const { print_on(tty);}
 431 void PSOldGen::print_on(outputStream* st) const {
 432   st->print(" %-15s", name());
 433   if (PrintGCDetails && Verbose) {
 434     st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
 435                 capacity_in_bytes(), used_in_bytes());
 436   } else {
 437     st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
 438                 capacity_in_bytes()/K, used_in_bytes()/K);
 439   }
 440   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 441                 p2i(virtual_space()->low_boundary()),
 442                 p2i(virtual_space()->high()),
 443                 p2i(virtual_space()->high_boundary()));
 444 
 445   st->print("  object"); object_space()->print_on(st);
 446 }
 447 
 448 void PSOldGen::print_used_change(size_t prev_used) const {
 449   gclog_or_tty->print(" [%s:", name());
 450   gclog_or_tty->print(" "  SIZE_FORMAT "K"
 451                       "->" SIZE_FORMAT "K"
 452                       "("  SIZE_FORMAT "K)",
 453                       prev_used / K, used_in_bytes() / K,
 454                       capacity_in_bytes() / K);
 455   gclog_or_tty->print("]");
 456 }
 457 
 458 void PSOldGen::update_counters() {
 459   if (UsePerfData) {
 460     _space_counters->update_all();
 461     _gen_counters->update_all();
 462   }
 463 }
 464 
 465 #ifndef PRODUCT
 466 
 467 void PSOldGen::space_invariants() {
 468   assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
 469     "Space invariant");
 470   assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
 471     "Space invariant");
 472   assert(virtual_space()->low_boundary() <= virtual_space()->low(),
 473     "Space invariant");
 474   assert(virtual_space()->high_boundary() >= virtual_space()->high(),
 475     "Space invariant");
 476   assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
 477     "Space invariant");
 478   assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
 479     "Space invariant");
 480   assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
 481     "Space invariant");
 482 }
 483 #endif
 484 
 485 void PSOldGen::verify() {
 486   object_space()->verify();
 487 }
 488 class VerifyObjectStartArrayClosure : public ObjectClosure {
 489   PSOldGen* _gen;
 490   ObjectStartArray* _start_array;
 491 
 492  public:
 493   VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
 494     _gen(gen), _start_array(start_array) { }
 495 
 496   virtual void do_object(oop obj) {
 497     HeapWord* test_addr = (HeapWord*)obj + 1;
 498     guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
 499     guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
 500   }
 501 };
 502 
 503 void PSOldGen::verify_object_start_array() {
 504   VerifyObjectStartArrayClosure check( this, &_start_array );
 505   object_iterate(&check);
 506 }
 507 
 508 #ifndef PRODUCT
 509 void PSOldGen::record_spaces_top() {
 510   assert(ZapUnusedHeapArea, "Not mangling unused space");
 511   object_space()->set_top_for_allocations();
 512 }
 513 #endif