1 /*
  2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/parallel/mutableNUMASpace.hpp"
 27 #include "gc/parallel/parallelScavengeHeap.hpp"
 28 #include "gc/parallel/psMarkSweepDecorator.hpp"
 29 #include "gc/parallel/psScavenge.hpp"
 30 #include "gc/parallel/psYoungGen.hpp"
 31 #include "gc/shared/gcUtil.hpp"
 32 #include "gc/shared/spaceDecorator.hpp"
 33 #include "logging/log.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "runtime/java.hpp"
 36 #include "utilities/align.hpp"
 37 
 38 PSYoungGen::PSYoungGen(size_t initial_size, size_t min_size, size_t max_size) :
 39   _reserved(),
 40   _virtual_space(NULL),
 41   _eden_space(NULL),
 42   _from_space(NULL),
 43   _to_space(NULL),
 44   _eden_mark_sweep(NULL),
 45   _from_mark_sweep(NULL),
 46   _to_mark_sweep(NULL),
 47   _init_gen_size(initial_size),
 48   _min_gen_size(min_size),
 49   _max_gen_size(max_size),
 50   _gen_counters(NULL),
 51   _eden_counters(NULL),
 52   _from_counters(NULL),
 53   _to_counters(NULL)
 54 {}
 55 
 56 void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
 57   assert(_init_gen_size != 0, "Should have a finite size");
 58   _virtual_space = new PSVirtualSpace(rs, alignment);
 59   if (!virtual_space()->expand_by(_init_gen_size)) {
 60     vm_exit_during_initialization("Could not reserve enough space for "
 61                                   "object heap");
 62   }
 63 }
 64 
 65 void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
 66   initialize_virtual_space(rs, alignment);
 67   initialize_work();
 68 }
 69 
 70 void PSYoungGen::initialize_work() {
 71 
 72   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 73                         (HeapWord*)virtual_space()->high_boundary());
 74 
 75   MemRegion cmr((HeapWord*)virtual_space()->low(),
 76                 (HeapWord*)virtual_space()->high());
 77   ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
 78 
 79   if (ZapUnusedHeapArea) {
 80     // Mangle newly committed space immediately because it
 81     // can be done here more simply that after the new
 82     // spaces have been computed.
 83     SpaceMangler::mangle_region(cmr);
 84   }
 85 
 86   if (UseNUMA) {
 87     _eden_space = new MutableNUMASpace(virtual_space()->alignment());
 88   } else {
 89     _eden_space = new MutableSpace(virtual_space()->alignment());
 90   }
 91   _from_space = new MutableSpace(virtual_space()->alignment());
 92   _to_space   = new MutableSpace(virtual_space()->alignment());
 93 
 94   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
 95     vm_exit_during_initialization("Could not allocate a young gen space");
 96   }
 97 
 98   // Allocate the mark sweep views of spaces
 99   _eden_mark_sweep =
100       new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
101   _from_mark_sweep =
102       new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
103   _to_mark_sweep =
104       new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
105 
106   if (_eden_mark_sweep == NULL ||
107       _from_mark_sweep == NULL ||
108       _to_mark_sweep == NULL) {
109     vm_exit_during_initialization("Could not complete allocation"
110                                   " of the young generation");
111   }
112 
113   // Generation Counters - generation 0, 3 subspaces
114   _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
115                                            _max_gen_size, _virtual_space);
116 
117   // Compute maximum space sizes for performance counters
118   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
119   size_t alignment = heap->space_alignment();
120   size_t size = virtual_space()->reserved_size();
121 
122   size_t max_survivor_size;
123   size_t max_eden_size;
124 
125   if (UseAdaptiveSizePolicy) {
126     max_survivor_size = size / MinSurvivorRatio;
127 
128     // round the survivor space size down to the nearest alignment
129     // and make sure its size is greater than 0.
130     max_survivor_size = align_down(max_survivor_size, alignment);
131     max_survivor_size = MAX2(max_survivor_size, alignment);
132 
133     // set the maximum size of eden to be the size of the young gen
134     // less two times the minimum survivor size. The minimum survivor
135     // size for UseAdaptiveSizePolicy is one alignment.
136     max_eden_size = size - 2 * alignment;
137   } else {
138     max_survivor_size = size / InitialSurvivorRatio;
139 
140     // round the survivor space size down to the nearest alignment
141     // and make sure its size is greater than 0.
142     max_survivor_size = align_down(max_survivor_size, alignment);
143     max_survivor_size = MAX2(max_survivor_size, alignment);
144 
145     // set the maximum size of eden to be the size of the young gen
146     // less two times the survivor size when the generation is 100%
147     // committed. The minimum survivor size for -UseAdaptiveSizePolicy
148     // is dependent on the committed portion (current capacity) of the
149     // generation - the less space committed, the smaller the survivor
150     // space, possibly as small as an alignment. However, we are interested
151     // in the case where the young generation is 100% committed, as this
152     // is the point where eden reaches its maximum size. At this point,
153     // the size of a survivor space is max_survivor_size.
154     max_eden_size = size - 2 * max_survivor_size;
155   }
156 
157   _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
158                                      _gen_counters);
159   _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
160                                      _gen_counters);
161   _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
162                                    _gen_counters);
163 
164   compute_initial_space_boundaries();
165 }
166 
167 void PSYoungGen::compute_initial_space_boundaries() {
168   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
169 
170   // Compute sizes
171   size_t alignment = heap->space_alignment();
172   size_t size = virtual_space()->committed_size();
173   assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
174 
175   size_t survivor_size = size / InitialSurvivorRatio;
176   survivor_size = align_down(survivor_size, alignment);
177   // ... but never less than an alignment
178   survivor_size = MAX2(survivor_size, alignment);
179 
180   // Young generation is eden + 2 survivor spaces
181   size_t eden_size = size - (2 * survivor_size);
182 
183   // Now go ahead and set 'em.
184   set_space_boundaries(eden_size, survivor_size);
185   space_invariants();
186 
187   if (UsePerfData) {
188     _eden_counters->update_capacity();
189     _from_counters->update_capacity();
190     _to_counters->update_capacity();
191   }
192 }
193 
194 void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
195   assert(eden_size < virtual_space()->committed_size(), "just checking");
196   assert(eden_size > 0  && survivor_size > 0, "just checking");
197 
198   // Initial layout is Eden, to, from. After swapping survivor spaces,
199   // that leaves us with Eden, from, to, which is step one in our two
200   // step resize-with-live-data procedure.
201   char *eden_start = virtual_space()->low();
202   char *to_start   = eden_start + eden_size;
203   char *from_start = to_start   + survivor_size;
204   char *from_end   = from_start + survivor_size;
205 
206   assert(from_end == virtual_space()->high(), "just checking");
207   assert(is_object_aligned(eden_start), "checking alignment");
208   assert(is_object_aligned(to_start),   "checking alignment");
209   assert(is_object_aligned(from_start), "checking alignment");
210 
211   MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
212   MemRegion to_mr  ((HeapWord*)to_start, (HeapWord*)from_start);
213   MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
214 
215   eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
216     to_space()->initialize(to_mr  , true, ZapUnusedHeapArea);
217   from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
218 }
219 
220 #ifndef PRODUCT
221 void PSYoungGen::space_invariants() {
222   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
223   const size_t alignment = heap->space_alignment();
224 
225   // Currently, our eden size cannot shrink to zero
226   guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
227   guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
228   guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
229 
230   // Relationship of spaces to each other
231   char* eden_start = (char*)eden_space()->bottom();
232   char* eden_end   = (char*)eden_space()->end();
233   char* from_start = (char*)from_space()->bottom();
234   char* from_end   = (char*)from_space()->end();
235   char* to_start   = (char*)to_space()->bottom();
236   char* to_end     = (char*)to_space()->end();
237 
238   guarantee(eden_start >= virtual_space()->low(), "eden bottom");
239   guarantee(eden_start < eden_end, "eden space consistency");
240   guarantee(from_start < from_end, "from space consistency");
241   guarantee(to_start < to_end, "to space consistency");
242 
243   // Check whether from space is below to space
244   if (from_start < to_start) {
245     // Eden, from, to
246     guarantee(eden_end <= from_start, "eden/from boundary");
247     guarantee(from_end <= to_start,   "from/to boundary");
248     guarantee(to_end <= virtual_space()->high(), "to end");
249   } else {
250     // Eden, to, from
251     guarantee(eden_end <= to_start, "eden/to boundary");
252     guarantee(to_end <= from_start, "to/from boundary");
253     guarantee(from_end <= virtual_space()->high(), "from end");
254   }
255 
256   // More checks that the virtual space is consistent with the spaces
257   assert(virtual_space()->committed_size() >=
258     (eden_space()->capacity_in_bytes() +
259      to_space()->capacity_in_bytes() +
260      from_space()->capacity_in_bytes()), "Committed size is inconsistent");
261   assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
262     "Space invariant");
263   char* eden_top = (char*)eden_space()->top();
264   char* from_top = (char*)from_space()->top();
265   char* to_top = (char*)to_space()->top();
266   assert(eden_top <= virtual_space()->high(), "eden top");
267   assert(from_top <= virtual_space()->high(), "from top");
268   assert(to_top <= virtual_space()->high(), "to top");
269 
270   virtual_space()->verify();
271 }
272 #endif
273 
274 void PSYoungGen::resize(size_t eden_size, size_t survivor_size) {
275   // Resize the generation if needed. If the generation resize
276   // reports false, do not attempt to resize the spaces.
277   if (resize_generation(eden_size, survivor_size)) {
278     // Then we lay out the spaces inside the generation
279     resize_spaces(eden_size, survivor_size);
280 
281     space_invariants();
282 
283     log_trace(gc, ergo)("Young generation size: "
284                         "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
285                         " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
286                         " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
287                         eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(),
288                         _max_gen_size, min_gen_size());
289   }
290 }
291 
292 
293 bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
294   const size_t alignment = virtual_space()->alignment();
295   size_t orig_size = virtual_space()->committed_size();
296   bool size_changed = false;
297 
298   // There used to be this guarantee there.
299   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
300   // Code below forces this requirement.  In addition the desired eden
301   // size and desired survivor sizes are desired goals and may
302   // exceed the total generation size.
303 
304   assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
305 
306   // Adjust new generation size
307   const size_t eden_plus_survivors =
308           align_up(eden_size + 2 * survivor_size, alignment);
309   size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
310                              min_gen_size());
311   assert(desired_size <= max_size(), "just checking");
312 
313   if (desired_size > orig_size) {
314     // Grow the generation
315     size_t change = desired_size - orig_size;
316     assert(change % alignment == 0, "just checking");
317     HeapWord* prev_high = (HeapWord*) virtual_space()->high();
318     if (!virtual_space()->expand_by(change)) {
319       return false; // Error if we fail to resize!
320     }
321     if (ZapUnusedHeapArea) {
322       // Mangle newly committed space immediately because it
323       // can be done here more simply that after the new
324       // spaces have been computed.
325       HeapWord* new_high = (HeapWord*) virtual_space()->high();
326       MemRegion mangle_region(prev_high, new_high);
327       SpaceMangler::mangle_region(mangle_region);
328     }
329     size_changed = true;
330   } else if (desired_size < orig_size) {
331     size_t desired_change = orig_size - desired_size;
332     assert(desired_change % alignment == 0, "just checking");
333 
334     desired_change = limit_gen_shrink(desired_change);
335 
336     if (desired_change > 0) {
337       virtual_space()->shrink_by(desired_change);
338       reset_survivors_after_shrink();
339 
340       size_changed = true;
341     }
342   } else {
343     if (orig_size == gen_size_limit()) {
344       log_trace(gc)("PSYoung generation size at maximum: " SIZE_FORMAT "K", orig_size/K);
345     } else if (orig_size == min_gen_size()) {
346       log_trace(gc)("PSYoung generation size at minium: " SIZE_FORMAT "K", orig_size/K);
347     }
348   }
349 
350   if (size_changed) {
351     post_resize();
352     log_trace(gc)("PSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
353                   orig_size/K, virtual_space()->committed_size()/K);
354   }
355 
356   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
357             virtual_space()->committed_size() == max_size(), "Sanity");
358 
359   return true;
360 }
361 
362 #ifndef PRODUCT
363 // In the numa case eden is not mangled so a survivor space
364 // moving into a region previously occupied by a survivor
365 // may find an unmangled region.  Also in the PS case eden
366 // to-space and from-space may not touch (i.e., there may be
367 // gaps between them due to movement while resizing the
368 // spaces).  Those gaps must be mangled.
369 void PSYoungGen::mangle_survivors(MutableSpace* s1,
370                                   MemRegion s1MR,
371                                   MutableSpace* s2,
372                                   MemRegion s2MR) {
373   // Check eden and gap between eden and from-space, in deciding
374   // what to mangle in from-space.  Check the gap between from-space
375   // and to-space when deciding what to mangle.
376   //
377   //      +--------+   +----+    +---+
378   //      | eden   |   |s1  |    |s2 |
379   //      +--------+   +----+    +---+
380   //                 +-------+ +-----+
381   //                 |s1MR   | |s2MR |
382   //                 +-------+ +-----+
383   // All of survivor-space is properly mangled so find the
384   // upper bound on the mangling for any portion above current s1.
385   HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
386   MemRegion delta1_left;
387   if (s1MR.start() < delta_end) {
388     delta1_left = MemRegion(s1MR.start(), delta_end);
389     s1->mangle_region(delta1_left);
390   }
391   // Find any portion to the right of the current s1.
392   HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
393   MemRegion delta1_right;
394   if (delta_start < s1MR.end()) {
395     delta1_right = MemRegion(delta_start, s1MR.end());
396     s1->mangle_region(delta1_right);
397   }
398 
399   // Similarly for the second survivor space except that
400   // any of the new region that overlaps with the current
401   // region of the first survivor space has already been
402   // mangled.
403   delta_end = MIN2(s2->bottom(), s2MR.end());
404   delta_start = MAX2(s2MR.start(), s1->end());
405   MemRegion delta2_left;
406   if (s2MR.start() < delta_end) {
407     delta2_left = MemRegion(s2MR.start(), delta_end);
408     s2->mangle_region(delta2_left);
409   }
410   delta_start = MAX2(s2->end(), s2MR.start());
411   MemRegion delta2_right;
412   if (delta_start < s2MR.end()) {
413     s2->mangle_region(delta2_right);
414   }
415 
416   // s1
417   log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
418     "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
419     p2i(s1->bottom()), p2i(s1->end()),
420     p2i(s1MR.start()), p2i(s1MR.end()));
421   log_develop_trace(gc)("    Mangle before: [" PTR_FORMAT ", "
422     PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
423     p2i(delta1_left.start()), p2i(delta1_left.end()),
424     p2i(delta1_right.start()), p2i(delta1_right.end()));
425 
426   // s2
427   log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
428     "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
429     p2i(s2->bottom()), p2i(s2->end()),
430     p2i(s2MR.start()), p2i(s2MR.end()));
431   log_develop_trace(gc)("    Mangle before: [" PTR_FORMAT ", "
432     PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
433     p2i(delta2_left.start()), p2i(delta2_left.end()),
434     p2i(delta2_right.start()), p2i(delta2_right.end()));
435 }
436 #endif // NOT PRODUCT
437 
438 void PSYoungGen::resize_spaces(size_t requested_eden_size,
439                                size_t requested_survivor_size) {
440   assert(UseAdaptiveSizePolicy, "sanity check");
441   assert(requested_eden_size > 0  && requested_survivor_size > 0,
442          "just checking");
443 
444   // We require eden and to space to be empty
445   if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
446     return;
447   }
448 
449   log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: " SIZE_FORMAT ", requested_survivor_size: " SIZE_FORMAT ")",
450                       requested_eden_size, requested_survivor_size);
451   log_trace(gc, ergo)("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
452                       p2i(eden_space()->bottom()),
453                       p2i(eden_space()->end()),
454                       pointer_delta(eden_space()->end(),
455                                     eden_space()->bottom(),
456                                     sizeof(char)));
457   log_trace(gc, ergo)("    from: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
458                       p2i(from_space()->bottom()),
459                       p2i(from_space()->end()),
460                       pointer_delta(from_space()->end(),
461                                     from_space()->bottom(),
462                                     sizeof(char)));
463   log_trace(gc, ergo)("      to: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT,
464                       p2i(to_space()->bottom()),
465                       p2i(to_space()->end()),
466                       pointer_delta(  to_space()->end(),
467                                       to_space()->bottom(),
468                                       sizeof(char)));
469 
470   // There's nothing to do if the new sizes are the same as the current
471   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
472       requested_survivor_size == from_space()->capacity_in_bytes() &&
473       requested_eden_size == eden_space()->capacity_in_bytes()) {
474     log_trace(gc, ergo)("    capacities are the right sizes, returning");
475     return;
476   }
477 
478   char* eden_start = (char*)eden_space()->bottom();
479   char* eden_end   = (char*)eden_space()->end();
480   char* from_start = (char*)from_space()->bottom();
481   char* from_end   = (char*)from_space()->end();
482   char* to_start   = (char*)to_space()->bottom();
483   char* to_end     = (char*)to_space()->end();
484 
485   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
486   const size_t alignment = heap->space_alignment();
487   const bool maintain_minimum =
488     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
489 
490   bool eden_from_to_order = from_start < to_start;
491   // Check whether from space is below to space
492   if (eden_from_to_order) {
493     // Eden, from, to
494     eden_from_to_order = true;
495     log_trace(gc, ergo)("  Eden, from, to:");
496 
497     // Set eden
498     // "requested_eden_size" is a goal for the size of eden
499     // and may not be attainable.  "eden_size" below is
500     // calculated based on the location of from-space and
501     // the goal for the size of eden.  from-space is
502     // fixed in place because it contains live data.
503     // The calculation is done this way to avoid 32bit
504     // overflow (i.e., eden_start + requested_eden_size
505     // may too large for representation in 32bits).
506     size_t eden_size;
507     if (maintain_minimum) {
508       // Only make eden larger than the requested size if
509       // the minimum size of the generation has to be maintained.
510       // This could be done in general but policy at a higher
511       // level is determining a requested size for eden and that
512       // should be honored unless there is a fundamental reason.
513       eden_size = pointer_delta(from_start,
514                                 eden_start,
515                                 sizeof(char));
516     } else {
517       eden_size = MIN2(requested_eden_size,
518                        pointer_delta(from_start, eden_start, sizeof(char)));
519     }
520 
521     eden_end = eden_start + eden_size;
522     assert(eden_end >= eden_start, "addition overflowed");
523 
524     // To may resize into from space as long as it is clear of live data.
525     // From space must remain page aligned, though, so we need to do some
526     // extra calculations.
527 
528     // First calculate an optimal to-space
529     to_end   = (char*)virtual_space()->high();
530     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
531                                     sizeof(char));
532 
533     // Does the optimal to-space overlap from-space?
534     if (to_start < (char*)from_space()->end()) {
535       // Calculate the minimum offset possible for from_end
536       size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
537 
538       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
539       if (from_size == 0) {
540         from_size = alignment;
541       } else {
542         from_size = align_up(from_size, alignment);
543       }
544 
545       from_end = from_start + from_size;
546       assert(from_end > from_start, "addition overflow or from_size problem");
547 
548       guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
549 
550       // Now update to_start with the new from_end
551       to_start = MAX2(from_end, to_start);
552     }
553 
554     guarantee(to_start != to_end, "to space is zero sized");
555 
556     log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
557                         p2i(eden_start),
558                         p2i(eden_end),
559                         pointer_delta(eden_end, eden_start, sizeof(char)));
560     log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
561                         p2i(from_start),
562                         p2i(from_end),
563                         pointer_delta(from_end, from_start, sizeof(char)));
564     log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
565                         p2i(to_start),
566                         p2i(to_end),
567                         pointer_delta(  to_end,   to_start, sizeof(char)));
568   } else {
569     // Eden, to, from
570     log_trace(gc, ergo)("  Eden, to, from:");
571 
572     // To space gets priority over eden resizing. Note that we position
573     // to space as if we were able to resize from space, even though from
574     // space is not modified.
575     // Giving eden priority was tried and gave poorer performance.
576     to_end   = (char*)pointer_delta(virtual_space()->high(),
577                                     (char*)requested_survivor_size,
578                                     sizeof(char));
579     to_end   = MIN2(to_end, from_start);
580     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
581                                     sizeof(char));
582     // if the space sizes are to be increased by several times then
583     // 'to_start' will point beyond the young generation. In this case
584     // 'to_start' should be adjusted.
585     to_start = MAX2(to_start, eden_start + alignment);
586 
587     // Compute how big eden can be, then adjust end.
588     // See  comments above on calculating eden_end.
589     size_t eden_size;
590     if (maintain_minimum) {
591       eden_size = pointer_delta(to_start, eden_start, sizeof(char));
592     } else {
593       eden_size = MIN2(requested_eden_size,
594                        pointer_delta(to_start, eden_start, sizeof(char)));
595     }
596     eden_end = eden_start + eden_size;
597     assert(eden_end >= eden_start, "addition overflowed");
598 
599     // Could choose to not let eden shrink
600     // to_start = MAX2(to_start, eden_end);
601 
602     // Don't let eden shrink down to 0 or less.
603     eden_end = MAX2(eden_end, eden_start + alignment);
604     to_start = MAX2(to_start, eden_end);
605 
606     log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
607                         p2i(eden_start),
608                         p2i(eden_end),
609                         pointer_delta(eden_end, eden_start, sizeof(char)));
610     log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
611                         p2i(to_start),
612                         p2i(to_end),
613                         pointer_delta(  to_end,   to_start, sizeof(char)));
614     log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
615                         p2i(from_start),
616                         p2i(from_end),
617                         pointer_delta(from_end, from_start, sizeof(char)));
618   }
619 
620 
621   guarantee((HeapWord*)from_start <= from_space()->bottom(),
622             "from start moved to the right");
623   guarantee((HeapWord*)from_end >= from_space()->top(),
624             "from end moved into live data");
625   assert(is_object_aligned(eden_start), "checking alignment");
626   assert(is_object_aligned(from_start), "checking alignment");
627   assert(is_object_aligned(to_start), "checking alignment");
628 
629   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
630   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
631   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
632 
633   // Let's make sure the call to initialize doesn't reset "top"!
634   HeapWord* old_from_top = from_space()->top();
635 
636   // For logging block  below
637   size_t old_from = from_space()->capacity_in_bytes();
638   size_t old_to   = to_space()->capacity_in_bytes();
639 
640   if (ZapUnusedHeapArea) {
641     // NUMA is a special case because a numa space is not mangled
642     // in order to not prematurely bind its address to memory to
643     // the wrong memory (i.e., don't want the GC thread to first
644     // touch the memory).  The survivor spaces are not numa
645     // spaces and are mangled.
646     if (UseNUMA) {
647       if (eden_from_to_order) {
648         mangle_survivors(from_space(), fromMR, to_space(), toMR);
649       } else {
650         mangle_survivors(to_space(), toMR, from_space(), fromMR);
651       }
652     }
653 
654     // If not mangling the spaces, do some checking to verify that
655     // the spaces are already mangled.
656     // The spaces should be correctly mangled at this point so
657     // do some checking here. Note that they are not being mangled
658     // in the calls to initialize().
659     // Must check mangling before the spaces are reshaped.  Otherwise,
660     // the bottom or end of one space may have moved into an area
661     // covered by another space and a failure of the check may
662     // not correctly indicate which space is not properly mangled.
663     HeapWord* limit = (HeapWord*) virtual_space()->high();
664     eden_space()->check_mangled_unused_area(limit);
665     from_space()->check_mangled_unused_area(limit);
666       to_space()->check_mangled_unused_area(limit);
667   }
668   // When an existing space is being initialized, it is not
669   // mangled because the space has been previously mangled.
670   eden_space()->initialize(edenMR,
671                            SpaceDecorator::Clear,
672                            SpaceDecorator::DontMangle);
673     to_space()->initialize(toMR,
674                            SpaceDecorator::Clear,
675                            SpaceDecorator::DontMangle);
676   from_space()->initialize(fromMR,
677                            SpaceDecorator::DontClear,
678                            SpaceDecorator::DontMangle);
679 
680   assert(from_space()->top() == old_from_top, "from top changed!");
681 
682   log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: collection: %d (" SIZE_FORMAT ", " SIZE_FORMAT ") -> (" SIZE_FORMAT ", " SIZE_FORMAT ") ",
683                       ParallelScavengeHeap::heap()->total_collections(),
684                       old_from, old_to,
685                       from_space()->capacity_in_bytes(),
686                       to_space()->capacity_in_bytes());
687 }
688 
689 void PSYoungGen::swap_spaces() {
690   MutableSpace* s    = from_space();
691   _from_space        = to_space();
692   _to_space          = s;
693 
694   // Now update the decorators.
695   PSMarkSweepDecorator* md = from_mark_sweep();
696   _from_mark_sweep           = to_mark_sweep();
697   _to_mark_sweep             = md;
698 
699   assert(from_mark_sweep()->space() == from_space(), "Sanity");
700   assert(to_mark_sweep()->space() == to_space(), "Sanity");
701 }
702 
703 size_t PSYoungGen::capacity_in_bytes() const {
704   return eden_space()->capacity_in_bytes()
705        + from_space()->capacity_in_bytes();  // to_space() is only used during scavenge
706 }
707 
708 
709 size_t PSYoungGen::used_in_bytes() const {
710   return eden_space()->used_in_bytes()
711        + from_space()->used_in_bytes();      // to_space() is only used during scavenge
712 }
713 
714 
715 size_t PSYoungGen::free_in_bytes() const {
716   return eden_space()->free_in_bytes()
717        + from_space()->free_in_bytes();      // to_space() is only used during scavenge
718 }
719 
720 size_t PSYoungGen::capacity_in_words() const {
721   return eden_space()->capacity_in_words()
722        + from_space()->capacity_in_words();  // to_space() is only used during scavenge
723 }
724 
725 
726 size_t PSYoungGen::used_in_words() const {
727   return eden_space()->used_in_words()
728        + from_space()->used_in_words();      // to_space() is only used during scavenge
729 }
730 
731 
732 size_t PSYoungGen::free_in_words() const {
733   return eden_space()->free_in_words()
734        + from_space()->free_in_words();      // to_space() is only used during scavenge
735 }
736 
737 void PSYoungGen::object_iterate(ObjectClosure* blk) {
738   eden_space()->object_iterate(blk);
739   from_space()->object_iterate(blk);
740   to_space()->object_iterate(blk);
741 }
742 
743 #if INCLUDE_SERIALGC
744 
745 void PSYoungGen::precompact() {
746   eden_mark_sweep()->precompact();
747   from_mark_sweep()->precompact();
748   to_mark_sweep()->precompact();
749 }
750 
751 void PSYoungGen::adjust_pointers() {
752   eden_mark_sweep()->adjust_pointers();
753   from_mark_sweep()->adjust_pointers();
754   to_mark_sweep()->adjust_pointers();
755 }
756 
757 void PSYoungGen::compact() {
758   eden_mark_sweep()->compact(ZapUnusedHeapArea);
759   from_mark_sweep()->compact(ZapUnusedHeapArea);
760   // Mark sweep stores preserved markOops in to space, don't disturb!
761   to_mark_sweep()->compact(false);
762 }
763 
764 #endif // INCLUDE_SERIALGC
765 
766 void PSYoungGen::print() const { print_on(tty); }
767 void PSYoungGen::print_on(outputStream* st) const {
768   st->print(" %-15s", "PSYoungGen");
769   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
770              capacity_in_bytes()/K, used_in_bytes()/K);
771   virtual_space()->print_space_boundaries_on(st);
772   st->print("  eden"); eden_space()->print_on(st);
773   st->print("  from"); from_space()->print_on(st);
774   st->print("  to  "); to_space()->print_on(st);
775 }
776 
777 // Note that a space is not printed before the [NAME:
778 void PSYoungGen::print_used_change(size_t prev_used) const {
779   log_info(gc, heap)("%s: "  SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
780       name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K);
781 }
782 
783 size_t PSYoungGen::available_for_expansion() {
784   ShouldNotReachHere();
785   return 0;
786 }
787 
788 size_t PSYoungGen::available_for_contraction() {
789   ShouldNotReachHere();
790   return 0;
791 }
792 
793 size_t PSYoungGen::available_to_min_gen() {
794   assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
795   return virtual_space()->committed_size() - min_gen_size();
796 }
797 
798 // This method assumes that from-space has live data and that
799 // any shrinkage of the young gen is limited by location of
800 // from-space.
801 size_t PSYoungGen::available_to_live() {
802   size_t delta_in_survivor = 0;
803   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
804   const size_t space_alignment = heap->space_alignment();
805   const size_t gen_alignment = heap->generation_alignment();
806 
807   MutableSpace* space_shrinking = NULL;
808   if (from_space()->end() > to_space()->end()) {
809     space_shrinking = from_space();
810   } else {
811     space_shrinking = to_space();
812   }
813 
814   // Include any space that is committed but not included in
815   // the survivor spaces.
816   assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
817     "Survivor space beyond high end");
818   size_t unused_committed = pointer_delta(virtual_space()->high(),
819     space_shrinking->end(), sizeof(char));
820 
821   if (space_shrinking->is_empty()) {
822     // Don't let the space shrink to 0
823     assert(space_shrinking->capacity_in_bytes() >= space_alignment,
824       "Space is too small");
825     delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
826   } else {
827     delta_in_survivor = pointer_delta(space_shrinking->end(),
828                                       space_shrinking->top(),
829                                       sizeof(char));
830   }
831 
832   size_t delta_in_bytes = unused_committed + delta_in_survivor;
833   delta_in_bytes = align_down(delta_in_bytes, gen_alignment);
834   return delta_in_bytes;
835 }
836 
837 // Return the number of bytes available for resizing down the young
838 // generation.  This is the minimum of
839 //      input "bytes"
840 //      bytes to the minimum young gen size
841 //      bytes to the size currently being used + some small extra
842 size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
843   // Allow shrinkage into the current eden but keep eden large enough
844   // to maintain the minimum young gen size
845   bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
846   return align_down(bytes, virtual_space()->alignment());
847 }
848 
849 void PSYoungGen::reset_after_change() {
850   ShouldNotReachHere();
851 }
852 
853 void PSYoungGen::reset_survivors_after_shrink() {
854   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
855                         (HeapWord*)virtual_space()->high_boundary());
856   PSScavenge::set_subject_to_discovery_span(_reserved);
857 
858   MutableSpace* space_shrinking = NULL;
859   if (from_space()->end() > to_space()->end()) {
860     space_shrinking = from_space();
861   } else {
862     space_shrinking = to_space();
863   }
864 
865   HeapWord* new_end = (HeapWord*)virtual_space()->high();
866   assert(new_end >= space_shrinking->bottom(), "Shrink was too large");
867   // Was there a shrink of the survivor space?
868   if (new_end < space_shrinking->end()) {
869     MemRegion mr(space_shrinking->bottom(), new_end);
870     space_shrinking->initialize(mr,
871                                 SpaceDecorator::DontClear,
872                                 SpaceDecorator::Mangle);
873   }
874 }
875 
876 // This method currently does not expect to expand into eden (i.e.,
877 // the virtual space boundaries is expected to be consistent
878 // with the eden boundaries..
879 void PSYoungGen::post_resize() {
880   assert_locked_or_safepoint(Heap_lock);
881   assert((eden_space()->bottom() < to_space()->bottom()) &&
882          (eden_space()->bottom() < from_space()->bottom()),
883          "Eden is assumed to be below the survivor spaces");
884 
885   MemRegion cmr((HeapWord*)virtual_space()->low(),
886                 (HeapWord*)virtual_space()->high());
887   ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
888   space_invariants();
889 }
890 
891 
892 
893 void PSYoungGen::update_counters() {
894   if (UsePerfData) {
895     _eden_counters->update_all();
896     _from_counters->update_all();
897     _to_counters->update_all();
898     _gen_counters->update_all();
899   }
900 }
901 
902 void PSYoungGen::verify() {
903   eden_space()->verify();
904   from_space()->verify();
905   to_space()->verify();
906 }
907 
908 #ifndef PRODUCT
909 void PSYoungGen::record_spaces_top() {
910   assert(ZapUnusedHeapArea, "Not mangling unused space");
911   eden_space()->set_top_for_allocations();
912   from_space()->set_top_for_allocations();
913   to_space()->set_top_for_allocations();
914 }
915 #endif