1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/parallel/objectStartArray.inline.hpp"
  27 #include "gc/parallel/parallelArguments.hpp"
  28 #include "gc/parallel/parallelScavengeHeap.hpp"
  29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  30 #include "gc/parallel/psCardTable.hpp"
  31 #include "gc/parallel/psFileBackedVirtualspace.hpp"
  32 #include "gc/parallel/psMarkSweepDecorator.hpp"
  33 #include "gc/parallel/psOldGen.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "gc/shared/gcLocker.hpp"
  36 #include "gc/shared/spaceDecorator.hpp"
  37 #include "logging/log.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/java.hpp"
  40 #include "utilities/align.hpp"
  41 
  42 inline const char* PSOldGen::select_name() {
  43   return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
  44 }
  45 
  46 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
  47                    size_t initial_size, size_t min_size, size_t max_size,
  48                    const char* perf_data_name, int level):
  49   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  50   _max_gen_size(max_size)
  51 {
  52   initialize(rs, alignment, perf_data_name, level);
  53 }
  54 
  55 PSOldGen::PSOldGen(size_t initial_size,
  56                    size_t min_size, size_t max_size,
  57                    const char* perf_data_name, int level):
  58   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  59   _max_gen_size(max_size)
  60 {}
  61 
  62 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
  63                           const char* perf_data_name, int level) {
  64   initialize_virtual_space(rs, alignment);
  65   initialize_work(perf_data_name, level);
  66 
  67   // The old gen can grow to gen_size_limit().  _reserve reflects only
  68   // the current maximum that can be committed.
  69   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  70 
  71   initialize_performance_counters(perf_data_name, level);
  72 }
  73 
  74 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
  75 
  76   if(ParallelArguments::is_heterogeneous_heap()) {
  77     _virtual_space = new PSFileBackedVirtualSpace(rs, alignment, AllocateOldGenAt);
  78     if (!(static_cast <PSFileBackedVirtualSpace*>(_virtual_space))->initialize()) {
  79       vm_exit_during_initialization("Could not map space for PSOldGen at given AllocateOldGenAt path");
  80     }
  81   } else {
  82     _virtual_space = new PSVirtualSpace(rs, alignment);
  83   }
  84   if (!_virtual_space->expand_by(_init_gen_size)) {
  85     vm_exit_during_initialization("Could not reserve enough space for "
  86                                   "object heap");
  87   }
  88 }
  89 
  90 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
  91   //
  92   // Basic memory initialization
  93   //
  94 
  95   MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
  96     heap_word_size(_max_gen_size));
  97   assert(limit_reserved.byte_size() == _max_gen_size,
  98     "word vs bytes confusion");
  99   //
 100   // Object start stuff
 101   //
 102 
 103   start_array()->initialize(limit_reserved);
 104 
 105   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 106                         (HeapWord*)virtual_space()->high_boundary());
 107 
 108   //
 109   // Card table stuff
 110   //
 111 
 112   MemRegion cmr((HeapWord*)virtual_space()->low(),
 113                 (HeapWord*)virtual_space()->high());
 114   if (ZapUnusedHeapArea) {
 115     // Mangle newly committed space immediately rather than
 116     // waiting for the initialization of the space even though
 117     // mangling is related to spaces.  Doing it here eliminates
 118     // the need to carry along information that a complete mangling
 119     // (bottom to end) needs to be done.
 120     SpaceMangler::mangle_region(cmr);
 121   }
 122 
 123   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 124   PSCardTable* ct = heap->card_table();
 125   ct->resize_covered_region(cmr);
 126 
 127   // Verify that the start and end of this generation is the start of a card.
 128   // If this wasn't true, a single card could span more than one generation,
 129   // which would cause problems when we commit/uncommit memory, and when we
 130   // clear and dirty cards.
 131   guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
 132   if (_reserved.end() != heap->reserved_region().end()) {
 133     // Don't check at the very end of the heap as we'll assert that we're probing off
 134     // the end if we try.
 135     guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
 136   }
 137 
 138   //
 139   // ObjectSpace stuff
 140   //
 141 
 142   _object_space = new MutableSpace(virtual_space()->alignment());
 143 
 144   if (_object_space == NULL)
 145     vm_exit_during_initialization("Could not allocate an old gen space");
 146 
 147   object_space()->initialize(cmr,
 148                              SpaceDecorator::Clear,
 149                              SpaceDecorator::Mangle);
 150 
 151 #if INCLUDE_SERIALGC
 152   _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
 153 
 154   if (_object_mark_sweep == NULL) {
 155     vm_exit_during_initialization("Could not complete allocation of old generation");
 156   }
 157 #endif // INCLUDE_SERIALGC
 158 
 159   // Update the start_array
 160   start_array()->set_covered_region(cmr);
 161 }
 162 
 163 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
 164   // Generation Counters, generation 'level', 1 subspace
 165   _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, _min_gen_size,
 166                                            _max_gen_size, virtual_space());
 167   _space_counters = new SpaceCounters(perf_data_name, 0,
 168                                       virtual_space()->reserved_size(),
 169                                       _object_space, _gen_counters);
 170 }
 171 
 172 // Assume that the generation has been allocated if its
 173 // reserved size is not 0.
 174 bool  PSOldGen::is_allocated() {
 175   return virtual_space()->reserved_size() != 0;
 176 }
 177 
 178 #if INCLUDE_SERIALGC
 179 
 180 void PSOldGen::precompact() {
 181   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 182 
 183   // Reset start array first.
 184   start_array()->reset();
 185 
 186   object_mark_sweep()->precompact();
 187 
 188   // Now compact the young gen
 189   heap->young_gen()->precompact();
 190 }
 191 
 192 void PSOldGen::adjust_pointers() {
 193   object_mark_sweep()->adjust_pointers();
 194 }
 195 
 196 void PSOldGen::compact() {
 197   object_mark_sweep()->compact(ZapUnusedHeapArea);
 198 }
 199 
 200 #endif // INCLUDE_SERIALGC
 201 
 202 size_t PSOldGen::contiguous_available() const {
 203   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 204 }
 205 
 206 // Allocation. We report all successful allocations to the size policy
 207 // Note that the perm gen does not use this method, and should not!
 208 HeapWord* PSOldGen::allocate(size_t word_size) {
 209   assert_locked_or_safepoint(Heap_lock);
 210   HeapWord* res = allocate_noexpand(word_size);
 211 
 212   if (res == NULL) {
 213     res = expand_and_allocate(word_size);
 214   }
 215 
 216   // Allocations in the old generation need to be reported
 217   if (res != NULL) {
 218     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 219     heap->size_policy()->tenured_allocation(word_size * HeapWordSize);
 220   }
 221 
 222   return res;
 223 }
 224 
 225 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
 226   expand(word_size*HeapWordSize);
 227   if (GCExpandToAllocateDelayMillis > 0) {
 228     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 229   }
 230   return allocate_noexpand(word_size);
 231 }
 232 
 233 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
 234   expand(word_size*HeapWordSize);
 235   if (GCExpandToAllocateDelayMillis > 0) {
 236     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 237   }
 238   return cas_allocate_noexpand(word_size);
 239 }
 240 
 241 void PSOldGen::expand(size_t bytes) {
 242   if (bytes == 0) {
 243     return;
 244   }
 245   MutexLocker x(ExpandHeap_lock);
 246   const size_t alignment = virtual_space()->alignment();
 247   size_t aligned_bytes  = align_up(bytes, alignment);
 248   size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
 249 
 250   if (UseNUMA) {
 251     // With NUMA we use round-robin page allocation for the old gen. Expand by at least
 252     // providing a page per lgroup. Alignment is larger or equal to the page size.
 253     aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
 254   }
 255   if (aligned_bytes == 0){
 256     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 257     // return true with the implication that and expansion was done when it
 258     // was not.  A call to expand implies a best effort to expand by "bytes"
 259     // but not a guarantee.  Align down to give a best effort.  This is likely
 260     // the most that the generation can expand since it has some capacity to
 261     // start with.
 262     aligned_bytes = align_down(bytes, alignment);
 263   }
 264 
 265   bool success = false;
 266   if (aligned_expand_bytes > aligned_bytes) {
 267     success = expand_by(aligned_expand_bytes);
 268   }
 269   if (!success) {
 270     success = expand_by(aligned_bytes);
 271   }
 272   if (!success) {
 273     success = expand_to_reserved();
 274   }
 275 
 276   if (success && GCLocker::is_active_and_needs_gc()) {
 277     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 278   }
 279 }
 280 
 281 bool PSOldGen::expand_by(size_t bytes) {
 282   assert_lock_strong(ExpandHeap_lock);
 283   assert_locked_or_safepoint(Heap_lock);
 284   if (bytes == 0) {
 285     return true;  // That's what virtual_space()->expand_by(0) would return
 286   }
 287   bool result = virtual_space()->expand_by(bytes);
 288   if (result) {
 289     if (ZapUnusedHeapArea) {
 290       // We need to mangle the newly expanded area. The memregion spans
 291       // end -> new_end, we assume that top -> end is already mangled.
 292       // Do the mangling before post_resize() is called because
 293       // the space is available for allocation after post_resize();
 294       HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
 295       assert(object_space()->end() < virtual_space_high,
 296         "Should be true before post_resize()");
 297       MemRegion mangle_region(object_space()->end(), virtual_space_high);
 298       // Note that the object space has not yet been updated to
 299       // coincide with the new underlying virtual space.
 300       SpaceMangler::mangle_region(mangle_region);
 301     }
 302     post_resize();
 303     if (UsePerfData) {
 304       _space_counters->update_capacity();
 305       _gen_counters->update_all();
 306     }
 307   }
 308 
 309   if (result) {
 310     size_t new_mem_size = virtual_space()->committed_size();
 311     size_t old_mem_size = new_mem_size - bytes;
 312     log_debug(gc)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 313                   name(), old_mem_size/K, bytes/K, new_mem_size/K);
 314   }
 315 
 316   return result;
 317 }
 318 
 319 bool PSOldGen::expand_to_reserved() {
 320   assert_lock_strong(ExpandHeap_lock);
 321   assert_locked_or_safepoint(Heap_lock);
 322 
 323   bool result = true;
 324   const size_t remaining_bytes = virtual_space()->uncommitted_size();
 325   if (remaining_bytes > 0) {
 326     result = expand_by(remaining_bytes);
 327     DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));
 328   }
 329   return result;
 330 }
 331 
 332 void PSOldGen::shrink(size_t bytes) {
 333   assert_lock_strong(ExpandHeap_lock);
 334   assert_locked_or_safepoint(Heap_lock);
 335 
 336   size_t size = align_down(bytes, virtual_space()->alignment());
 337   if (size > 0) {
 338     assert_lock_strong(ExpandHeap_lock);
 339     virtual_space()->shrink_by(bytes);
 340     post_resize();
 341 
 342     size_t new_mem_size = virtual_space()->committed_size();
 343     size_t old_mem_size = new_mem_size + bytes;
 344     log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 345                   name(), old_mem_size/K, bytes/K, new_mem_size/K);
 346   }
 347 }
 348 
 349 void PSOldGen::resize(size_t desired_free_space) {
 350   const size_t alignment = virtual_space()->alignment();
 351   const size_t size_before = virtual_space()->committed_size();
 352   size_t new_size = used_in_bytes() + desired_free_space;
 353   if (new_size < used_in_bytes()) {
 354     // Overflowed the addition.
 355     new_size = gen_size_limit();
 356   }
 357   // Adjust according to our min and max
 358   new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
 359 
 360   assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
 361   new_size = align_up(new_size, alignment);
 362 
 363   const size_t current_size = capacity_in_bytes();
 364 
 365   log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "
 366     "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
 367     " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
 368     " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
 369     desired_free_space, used_in_bytes(), new_size, current_size,
 370     gen_size_limit(), min_gen_size());
 371 
 372   if (new_size == current_size) {
 373     // No change requested
 374     return;
 375   }
 376   if (new_size > current_size) {
 377     size_t change_bytes = new_size - current_size;
 378     expand(change_bytes);
 379   } else {
 380     size_t change_bytes = current_size - new_size;
 381     // shrink doesn't grab this lock, expand does. Is that right?
 382     MutexLocker x(ExpandHeap_lock);
 383     shrink(change_bytes);
 384   }
 385 
 386   log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
 387                       ParallelScavengeHeap::heap()->total_collections(),
 388                       size_before,
 389                       virtual_space()->committed_size());
 390 }
 391 
 392 // NOTE! We need to be careful about resizing. During a GC, multiple
 393 // allocators may be active during heap expansion. If we allow the
 394 // heap resizing to become visible before we have correctly resized
 395 // all heap related data structures, we may cause program failures.
 396 void PSOldGen::post_resize() {
 397   // First construct a memregion representing the new size
 398   MemRegion new_memregion((HeapWord*)virtual_space()->low(),
 399     (HeapWord*)virtual_space()->high());
 400   size_t new_word_size = new_memregion.word_size();
 401 
 402   start_array()->set_covered_region(new_memregion);
 403   ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
 404 
 405   // ALWAYS do this last!!
 406   object_space()->initialize(new_memregion,
 407                              SpaceDecorator::DontClear,
 408                              SpaceDecorator::DontMangle);
 409 
 410   assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
 411     "Sanity");
 412 }
 413 
 414 size_t PSOldGen::gen_size_limit() {
 415   return _max_gen_size;
 416 }
 417 
 418 void PSOldGen::reset_after_change() {
 419   ShouldNotReachHere();
 420   return;
 421 }
 422 
 423 size_t PSOldGen::available_for_expansion() {
 424   ShouldNotReachHere();
 425   return 0;
 426 }
 427 
 428 size_t PSOldGen::available_for_contraction() {
 429   ShouldNotReachHere();
 430   return 0;
 431 }
 432 
 433 void PSOldGen::print() const { print_on(tty);}
 434 void PSOldGen::print_on(outputStream* st) const {
 435   st->print(" %-15s", name());
 436   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
 437               capacity_in_bytes()/K, used_in_bytes()/K);
 438   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 439                 p2i(virtual_space()->low_boundary()),
 440                 p2i(virtual_space()->high()),
 441                 p2i(virtual_space()->high_boundary()));
 442 
 443   st->print("  object"); object_space()->print_on(st);
 444 }
 445 
 446 void PSOldGen::update_counters() {
 447   if (UsePerfData) {
 448     _space_counters->update_all();
 449     _gen_counters->update_all();
 450   }
 451 }
 452 
 453 #ifndef PRODUCT
 454 
 455 void PSOldGen::space_invariants() {
 456   assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
 457     "Space invariant");
 458   assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
 459     "Space invariant");
 460   assert(virtual_space()->low_boundary() <= virtual_space()->low(),
 461     "Space invariant");
 462   assert(virtual_space()->high_boundary() >= virtual_space()->high(),
 463     "Space invariant");
 464   assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
 465     "Space invariant");
 466   assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
 467     "Space invariant");
 468   assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
 469     "Space invariant");
 470 }
 471 #endif
 472 
 473 void PSOldGen::verify() {
 474   object_space()->verify();
 475 }
 476 class VerifyObjectStartArrayClosure : public ObjectClosure {
 477   PSOldGen* _old_gen;
 478   ObjectStartArray* _start_array;
 479 
 480  public:
 481   VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) :
 482     _old_gen(old_gen), _start_array(start_array) { }
 483 
 484   virtual void do_object(oop obj) {
 485     HeapWord* test_addr = (HeapWord*)obj + 1;
 486     guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
 487     guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
 488   }
 489 };
 490 
 491 void PSOldGen::verify_object_start_array() {
 492   VerifyObjectStartArrayClosure check( this, &_start_array );
 493   object_iterate(&check);
 494 }
 495 
 496 #ifndef PRODUCT
 497 void PSOldGen::record_spaces_top() {
 498   assert(ZapUnusedHeapArea, "Not mangling unused space");
 499   object_space()->set_top_for_allocations();
 500 }
 501 #endif