1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/parallel/asPSYoungGen.hpp"
  27 #include "gc/parallel/parallelScavengeHeap.hpp"
  28 #include "gc/parallel/psMarkSweepDecorator.hpp"
  29 #include "gc/parallel/psScavenge.hpp"
  30 #include "gc/parallel/psYoungGen.hpp"
  31 #include "gc/shared/gcUtil.hpp"
  32 #include "gc/shared/spaceDecorator.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/java.hpp"
  35 
  36 ASPSYoungGen::ASPSYoungGen(size_t init_byte_size,
  37                            size_t minimum_byte_size,
  38                            size_t byte_size_limit) :
  39   PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
  40   _gen_size_limit(byte_size_limit) {
  41 }
  42 
  43 
  44 ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs,
  45                            size_t init_byte_size,
  46                            size_t minimum_byte_size,
  47                            size_t byte_size_limit) :
  48   //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
  49   PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit),
  50   _gen_size_limit(byte_size_limit) {
  51 
  52   assert(vs->committed_size() == init_byte_size, "Cannot replace with");
  53 
  54   _virtual_space = vs;
  55 }
  56 
  57 void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
  58                                             size_t alignment) {
  59   assert(_init_gen_size != 0, "Should have a finite size");
  60   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
  61   if (!_virtual_space->expand_by(_init_gen_size)) {
  62     vm_exit_during_initialization("Could not reserve enough space for "
  63                                   "object heap");
  64   }
  65 }
  66 
  67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
  68   initialize_virtual_space(rs, alignment);
  69   initialize_work();
  70 }
  71 
  72 size_t ASPSYoungGen::available_for_expansion() {
  73   size_t current_committed_size = virtual_space()->committed_size();
  74   assert((gen_size_limit() >= current_committed_size),
  75     "generation size limit is wrong");
  76   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  77   size_t result =  gen_size_limit() - current_committed_size;
  78   size_t result_aligned = align_size_down(result, heap->generation_alignment());
  79   return result_aligned;
  80 }
  81 
  82 // Return the number of bytes the young gen is willing give up.
  83 //
  84 // Future implementations could check the survivors and if to_space is in the
  85 // right place (below from_space), take a chunk from to_space.
  86 size_t ASPSYoungGen::available_for_contraction() {
  87   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  88   if (uncommitted_bytes != 0) {
  89     return uncommitted_bytes;
  90   }
  91 
  92   if (eden_space()->is_empty()) {
  93     // Respect the minimum size for eden and for the young gen as a whole.
  94     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  95     const size_t eden_alignment = heap->space_alignment();
  96     const size_t gen_alignment = heap->generation_alignment();
  97 
  98     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
  99       "Alignment is wrong");
 100     size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
 101     eden_avail = align_size_down(eden_avail, gen_alignment);
 102 
 103     assert(virtual_space()->committed_size() >= min_gen_size(),
 104       "minimum gen size is wrong");
 105     size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
 106     assert(virtual_space()->is_aligned(gen_avail), "not aligned");
 107 
 108     const size_t max_contraction = MIN2(eden_avail, gen_avail);
 109     // See comment for ASPSOldGen::available_for_contraction()
 110     // for reasons the "increment" fraction is used.
 111     PSAdaptiveSizePolicy* policy = heap->size_policy();
 112     size_t result = policy->eden_increment_aligned_down(max_contraction);
 113     size_t result_aligned = align_size_down(result, gen_alignment);
 114 
 115     log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
 116     log_trace(gc, ergo)("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
 117     log_trace(gc, ergo)("  eden_avail " SIZE_FORMAT " K", eden_avail/K);
 118     log_trace(gc, ergo)("  gen_avail " SIZE_FORMAT " K", gen_avail/K);
 119 
 120     return result_aligned;
 121   }
 122 
 123   return 0;
 124 }
 125 
 126 // The current implementation only considers to the end of eden.
 127 // If to_space is below from_space, to_space is not considered.
 128 // to_space can be.
 129 size_t ASPSYoungGen::available_to_live() {
 130   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 131   const size_t alignment = heap->space_alignment();
 132 
 133   // Include any space that is committed but is not in eden.
 134   size_t available = pointer_delta(eden_space()->bottom(),
 135                                    virtual_space()->low(),
 136                                    sizeof(char));
 137 
 138   const size_t eden_capacity = eden_space()->capacity_in_bytes();
 139   if (eden_space()->is_empty() && eden_capacity > alignment) {
 140     available += eden_capacity - alignment;
 141   }
 142   return available;
 143 }
 144 
 145 // Similar to PSYoungGen::resize_generation() but
 146 //  allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
 147 //  expands at the low end of the virtual space
 148 //  moves the boundary between the generations in order to expand
 149 //  some additional diagnostics
 150 // If no additional changes are required, this can be deleted
 151 // and the changes factored back into PSYoungGen::resize_generation().
 152 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
 153   const size_t alignment = virtual_space()->alignment();
 154   size_t orig_size = virtual_space()->committed_size();
 155   bool size_changed = false;
 156 
 157   // There used to be a guarantee here that
 158   //   (eden_size + 2*survivor_size)  <= _max_gen_size
 159   // This requirement is enforced by the calculation of desired_size
 160   // below.  It may not be true on entry since the size of the
 161   // eden_size is no bounded by the generation size.
 162 
 163   assert(max_size() == reserved().byte_size(), "max gen size problem?");
 164   assert(min_gen_size() <= orig_size && orig_size <= max_size(),
 165          "just checking");
 166 
 167   // Adjust new generation size
 168   const size_t eden_plus_survivors =
 169     align_size_up(eden_size + 2 * survivor_size, alignment);
 170   size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
 171                              min_gen_size());
 172   assert(desired_size <= gen_size_limit(), "just checking");
 173 
 174   if (desired_size > orig_size) {
 175     // Grow the generation
 176     size_t change = desired_size - orig_size;
 177     HeapWord* prev_low = (HeapWord*) virtual_space()->low();
 178     if (!virtual_space()->expand_by(change)) {
 179       return false;
 180     }
 181     if (ZapUnusedHeapArea) {
 182       // Mangle newly committed space immediately because it
 183       // can be done here more simply that after the new
 184       // spaces have been computed.
 185       HeapWord* new_low = (HeapWord*) virtual_space()->low();
 186       assert(new_low < prev_low, "Did not grow");
 187 
 188       MemRegion mangle_region(new_low, prev_low);
 189       SpaceMangler::mangle_region(mangle_region);
 190     }
 191     size_changed = true;
 192   } else if (desired_size < orig_size) {
 193     size_t desired_change = orig_size - desired_size;
 194 
 195     // How much is available for shrinking.
 196     size_t available_bytes = limit_gen_shrink(desired_change);
 197     size_t change = MIN2(desired_change, available_bytes);
 198     virtual_space()->shrink_by(change);
 199     size_changed = true;
 200   } else {
 201     if (orig_size == gen_size_limit()) {
 202       log_trace(gc)("ASPSYoung generation size at maximum: " SIZE_FORMAT "K", orig_size/K);
 203     } else if (orig_size == min_gen_size()) {
 204       log_trace(gc)("ASPSYoung generation size at minium: " SIZE_FORMAT "K", orig_size/K);
 205     }
 206   }
 207 
 208   if (size_changed) {
 209     reset_after_change();
 210     log_trace(gc)("ASPSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
 211                   orig_size/K, virtual_space()->committed_size()/K);
 212   }
 213 
 214   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
 215             virtual_space()->committed_size() == max_size(), "Sanity");
 216 
 217   return true;
 218 }
 219 
 220 // Similar to PSYoungGen::resize_spaces() but
 221 //  eden always starts at the low end of the committed virtual space
 222 //  current implementation does not allow holes between the spaces
 223 //  _young_generation_boundary has to be reset because it changes.
 224 //  so additional verification
 225 
 226 void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
 227                                  size_t requested_survivor_size) {
 228   assert(UseAdaptiveSizePolicy, "sanity check");
 229   assert(requested_eden_size > 0 && requested_survivor_size > 0,
 230          "just checking");
 231 
 232   space_invariants();
 233 
 234   // We require eden and to space to be empty
 235   if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
 236     return;
 237   }
 238 
 239   log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: "
 240                       SIZE_FORMAT
 241                       ", requested_survivor_size: " SIZE_FORMAT ")",
 242                       requested_eden_size, requested_survivor_size);
 243   log_trace(gc, ergo)("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
 244                       SIZE_FORMAT,
 245                       p2i(eden_space()->bottom()),
 246                       p2i(eden_space()->end()),
 247                       pointer_delta(eden_space()->end(), eden_space()->bottom(), sizeof(char)));
 248   log_trace(gc, ergo)("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
 249                       SIZE_FORMAT,
 250                       p2i(from_space()->bottom()),
 251                       p2i(from_space()->end()),
 252                       pointer_delta(from_space()->end(), from_space()->bottom(), sizeof(char)));
 253   log_trace(gc, ergo)("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
 254                       SIZE_FORMAT,
 255                       p2i(to_space()->bottom()),
 256                       p2i(to_space()->end()),
 257                       pointer_delta(  to_space()->end(), to_space()->bottom(), sizeof(char)));
 258 
 259   // There's nothing to do if the new sizes are the same as the current
 260   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
 261       requested_survivor_size == from_space()->capacity_in_bytes() &&
 262       requested_eden_size == eden_space()->capacity_in_bytes()) {
 263     log_trace(gc, ergo)("    capacities are the right sizes, returning");
 264     return;
 265   }
 266 
 267   char* eden_start = (char*)virtual_space()->low();
 268   char* eden_end   = (char*)eden_space()->end();
 269   char* from_start = (char*)from_space()->bottom();
 270   char* from_end   = (char*)from_space()->end();
 271   char* to_start   = (char*)to_space()->bottom();
 272   char* to_end     = (char*)to_space()->end();
 273 
 274   assert(eden_start < from_start, "Cannot push into from_space");
 275 
 276   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 277   const size_t alignment = heap->space_alignment();
 278   const bool maintain_minimum =
 279     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
 280 
 281   bool eden_from_to_order = from_start < to_start;
 282   // Check whether from space is below to space
 283   if (eden_from_to_order) {
 284     // Eden, from, to
 285 
 286     log_trace(gc, ergo)("  Eden, from, to:");
 287 
 288     // Set eden
 289     // "requested_eden_size" is a goal for the size of eden
 290     // and may not be attainable.  "eden_size" below is
 291     // calculated based on the location of from-space and
 292     // the goal for the size of eden.  from-space is
 293     // fixed in place because it contains live data.
 294     // The calculation is done this way to avoid 32bit
 295     // overflow (i.e., eden_start + requested_eden_size
 296     // may too large for representation in 32bits).
 297     size_t eden_size;
 298     if (maintain_minimum) {
 299       // Only make eden larger than the requested size if
 300       // the minimum size of the generation has to be maintained.
 301       // This could be done in general but policy at a higher
 302       // level is determining a requested size for eden and that
 303       // should be honored unless there is a fundamental reason.
 304       eden_size = pointer_delta(from_start,
 305                                 eden_start,
 306                                 sizeof(char));
 307     } else {
 308       eden_size = MIN2(requested_eden_size,
 309                        pointer_delta(from_start, eden_start, sizeof(char)));
 310     }
 311 
 312     eden_end = eden_start + eden_size;
 313     assert(eden_end >= eden_start, "addition overflowed");
 314 
 315     // To may resize into from space as long as it is clear of live data.
 316     // From space must remain page aligned, though, so we need to do some
 317     // extra calculations.
 318 
 319     // First calculate an optimal to-space
 320     to_end   = (char*)virtual_space()->high();
 321     to_start = (char*)pointer_delta(to_end,
 322                                     (char*)requested_survivor_size,
 323                                     sizeof(char));
 324 
 325     // Does the optimal to-space overlap from-space?
 326     if (to_start < (char*)from_space()->end()) {
 327       // Calculate the minimum offset possible for from_end
 328       size_t from_size =
 329         pointer_delta(from_space()->top(), from_start, sizeof(char));
 330 
 331       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
 332       if (from_size == 0) {
 333         from_size = alignment;
 334       } else {
 335         from_size = align_size_up(from_size, alignment);
 336       }
 337 
 338       from_end = from_start + from_size;
 339       assert(from_end > from_start, "addition overflow or from_size problem");
 340 
 341       guarantee(from_end <= (char*)from_space()->end(),
 342         "from_end moved to the right");
 343 
 344       // Now update to_start with the new from_end
 345       to_start = MAX2(from_end, to_start);
 346     }
 347 
 348     guarantee(to_start != to_end, "to space is zero sized");
 349 
 350     log_trace(gc, ergo)("    [eden_start .. eden_end): "
 351                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 352                         p2i(eden_start),
 353                         p2i(eden_end),
 354                         pointer_delta(eden_end, eden_start, sizeof(char)));
 355     log_trace(gc, ergo)("    [from_start .. from_end): "
 356                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 357                         p2i(from_start),
 358                         p2i(from_end),
 359                         pointer_delta(from_end, from_start, sizeof(char)));
 360     log_trace(gc, ergo)("    [  to_start ..   to_end): "
 361                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 362                         p2i(to_start),
 363                         p2i(to_end),
 364                         pointer_delta(  to_end,   to_start, sizeof(char)));
 365   } else {
 366     // Eden, to, from
 367     log_trace(gc, ergo)("  Eden, to, from:");
 368 
 369     // To space gets priority over eden resizing. Note that we position
 370     // to space as if we were able to resize from space, even though from
 371     // space is not modified.
 372     // Giving eden priority was tried and gave poorer performance.
 373     to_end   = (char*)pointer_delta(virtual_space()->high(),
 374                                     (char*)requested_survivor_size,
 375                                     sizeof(char));
 376     to_end   = MIN2(to_end, from_start);
 377     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
 378                                     sizeof(char));
 379     // if the space sizes are to be increased by several times then
 380     // 'to_start' will point beyond the young generation. In this case
 381     // 'to_start' should be adjusted.
 382     to_start = MAX2(to_start, eden_start + alignment);
 383 
 384     // Compute how big eden can be, then adjust end.
 385     // See  comments above on calculating eden_end.
 386     size_t eden_size;
 387     if (maintain_minimum) {
 388       eden_size = pointer_delta(to_start, eden_start, sizeof(char));
 389     } else {
 390       eden_size = MIN2(requested_eden_size,
 391                        pointer_delta(to_start, eden_start, sizeof(char)));
 392     }
 393     eden_end = eden_start + eden_size;
 394     assert(eden_end >= eden_start, "addition overflowed");
 395 
 396     // Don't let eden shrink down to 0 or less.
 397     eden_end = MAX2(eden_end, eden_start + alignment);
 398     to_start = MAX2(to_start, eden_end);
 399 
 400     log_trace(gc, ergo)("    [eden_start .. eden_end): "
 401                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 402                         p2i(eden_start),
 403                         p2i(eden_end),
 404                         pointer_delta(eden_end, eden_start, sizeof(char)));
 405     log_trace(gc, ergo)("    [  to_start ..   to_end): "
 406                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 407                         p2i(to_start),
 408                         p2i(to_end),
 409                         pointer_delta(  to_end,   to_start, sizeof(char)));
 410     log_trace(gc, ergo)("    [from_start .. from_end): "
 411                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 412                         p2i(from_start),
 413                         p2i(from_end),
 414                         pointer_delta(from_end, from_start, sizeof(char)));
 415   }
 416 
 417 
 418   guarantee((HeapWord*)from_start <= from_space()->bottom(),
 419             "from start moved to the right");
 420   guarantee((HeapWord*)from_end >= from_space()->top(),
 421             "from end moved into live data");
 422   assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
 423   assert(is_object_aligned((intptr_t)from_start), "checking alignment");
 424   assert(is_object_aligned((intptr_t)to_start), "checking alignment");
 425 
 426   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
 427   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
 428   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
 429 
 430   // Let's make sure the call to initialize doesn't reset "top"!
 431   DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
 432 
 433   // For logging block  below
 434   size_t old_from = from_space()->capacity_in_bytes();
 435   size_t old_to   = to_space()->capacity_in_bytes();
 436 
 437   if (ZapUnusedHeapArea) {
 438     // NUMA is a special case because a numa space is not mangled
 439     // in order to not prematurely bind its address to memory to
 440     // the wrong memory (i.e., don't want the GC thread to first
 441     // touch the memory).  The survivor spaces are not numa
 442     // spaces and are mangled.
 443     if (UseNUMA) {
 444       if (eden_from_to_order) {
 445         mangle_survivors(from_space(), fromMR, to_space(), toMR);
 446       } else {
 447         mangle_survivors(to_space(), toMR, from_space(), fromMR);
 448       }
 449     }
 450 
 451     // If not mangling the spaces, do some checking to verify that
 452     // the spaces are already mangled.
 453     // The spaces should be correctly mangled at this point so
 454     // do some checking here. Note that they are not being mangled
 455     // in the calls to initialize().
 456     // Must check mangling before the spaces are reshaped.  Otherwise,
 457     // the bottom or end of one space may have moved into an area
 458     // covered by another space and a failure of the check may
 459     // not correctly indicate which space is not properly mangled.
 460 
 461     HeapWord* limit = (HeapWord*) virtual_space()->high();
 462     eden_space()->check_mangled_unused_area(limit);
 463     from_space()->check_mangled_unused_area(limit);
 464       to_space()->check_mangled_unused_area(limit);
 465   }
 466   // When an existing space is being initialized, it is not
 467   // mangled because the space has been previously mangled.
 468   eden_space()->initialize(edenMR,
 469                            SpaceDecorator::Clear,
 470                            SpaceDecorator::DontMangle);
 471     to_space()->initialize(toMR,
 472                            SpaceDecorator::Clear,
 473                            SpaceDecorator::DontMangle);
 474   from_space()->initialize(fromMR,
 475                            SpaceDecorator::DontClear,
 476                            SpaceDecorator::DontMangle);
 477 
 478   PSScavenge::set_young_generation_boundary(eden_space()->bottom());
 479 
 480   assert(from_space()->top() == old_from_top, "from top changed!");
 481 
 482   log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: "
 483                 "collection: %d "
 484                 "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
 485                 "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
 486                 ParallelScavengeHeap::heap()->total_collections(),
 487                 old_from, old_to,
 488                 from_space()->capacity_in_bytes(),
 489                 to_space()->capacity_in_bytes());
 490 
 491     space_invariants();
 492 }
 493 void ASPSYoungGen::reset_after_change() {
 494   assert_locked_or_safepoint(Heap_lock);
 495 
 496   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 497                         (HeapWord*)virtual_space()->high_boundary());
 498   PSScavenge::reference_processor()->set_span(_reserved);
 499 
 500   HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
 501   HeapWord* eden_bottom = eden_space()->bottom();
 502   if (new_eden_bottom != eden_bottom) {
 503     MemRegion eden_mr(new_eden_bottom, eden_space()->end());
 504     eden_space()->initialize(eden_mr,
 505                              SpaceDecorator::Clear,
 506                              SpaceDecorator::Mangle);
 507     PSScavenge::set_young_generation_boundary(eden_space()->bottom());
 508   }
 509   MemRegion cmr((HeapWord*)virtual_space()->low(),
 510                 (HeapWord*)virtual_space()->high());
 511   ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
 512 
 513   space_invariants();
 514 }