< prev index next >

src/share/vm/gc/parallel/psYoungGen.cpp

Print this page




  99                                   " of the young generation");
 100   }
 101 
 102   // Generation Counters - generation 0, 3 subspaces
 103   _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
 104                                            _max_gen_size, _virtual_space);
 105 
 106   // Compute maximum space sizes for performance counters
 107   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 108   size_t alignment = heap->space_alignment();
 109   size_t size = virtual_space()->reserved_size();
 110 
 111   size_t max_survivor_size;
 112   size_t max_eden_size;
 113 
 114   if (UseAdaptiveSizePolicy) {
 115     max_survivor_size = size / MinSurvivorRatio;
 116 
 117     // round the survivor space size down to the nearest alignment
 118     // and make sure its size is greater than 0.
 119     max_survivor_size = align_size_down(max_survivor_size, alignment);
 120     max_survivor_size = MAX2(max_survivor_size, alignment);
 121 
 122     // set the maximum size of eden to be the size of the young gen
 123     // less two times the minimum survivor size. The minimum survivor
 124     // size for UseAdaptiveSizePolicy is one alignment.
 125     max_eden_size = size - 2 * alignment;
 126   } else {
 127     max_survivor_size = size / InitialSurvivorRatio;
 128 
 129     // round the survivor space size down to the nearest alignment
 130     // and make sure its size is greater than 0.
 131     max_survivor_size = align_size_down(max_survivor_size, alignment);
 132     max_survivor_size = MAX2(max_survivor_size, alignment);
 133 
 134     // set the maximum size of eden to be the size of the young gen
 135     // less two times the survivor size when the generation is 100%
 136     // committed. The minimum survivor size for -UseAdaptiveSizePolicy
 137     // is dependent on the committed portion (current capacity) of the
 138     // generation - the less space committed, the smaller the survivor
 139     // space, possibly as small as an alignment. However, we are interested
 140     // in the case where the young generation is 100% committed, as this
 141     // is the point where eden reaches its maximum size. At this point,
 142     // the size of a survivor space is max_survivor_size.
 143     max_eden_size = size - 2 * max_survivor_size;
 144   }
 145 
 146   _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
 147                                      _gen_counters);
 148   _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
 149                                      _gen_counters);
 150   _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
 151                                    _gen_counters);
 152 
 153   compute_initial_space_boundaries();
 154 }
 155 
 156 void PSYoungGen::compute_initial_space_boundaries() {
 157   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 158 
 159   // Compute sizes
 160   size_t alignment = heap->space_alignment();
 161   size_t size = virtual_space()->committed_size();
 162   assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
 163 
 164   size_t survivor_size = size / InitialSurvivorRatio;
 165   survivor_size = align_size_down(survivor_size, alignment);
 166   // ... but never less than an alignment
 167   survivor_size = MAX2(survivor_size, alignment);
 168 
 169   // Young generation is eden + 2 survivor spaces
 170   size_t eden_size = size - (2 * survivor_size);
 171 
 172   // Now go ahead and set 'em.
 173   set_space_boundaries(eden_size, survivor_size);
 174   space_invariants();
 175 
 176   if (UsePerfData) {
 177     _eden_counters->update_capacity();
 178     _from_counters->update_capacity();
 179     _to_counters->update_capacity();
 180   }
 181 }
 182 
 183 void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
 184   assert(eden_size < virtual_space()->committed_size(), "just checking");
 185   assert(eden_size > 0  && survivor_size > 0, "just checking");
 186 
 187   // Initial layout is Eden, to, from. After swapping survivor spaces,
 188   // that leaves us with Eden, from, to, which is step one in our two
 189   // step resize-with-live-data procedure.
 190   char *eden_start = virtual_space()->low();
 191   char *to_start   = eden_start + eden_size;
 192   char *from_start = to_start   + survivor_size;
 193   char *from_end   = from_start + survivor_size;
 194 
 195   assert(from_end == virtual_space()->high(), "just checking");
 196   assert(is_ptr_object_aligned(eden_start), "checking alignment");
 197   assert(is_ptr_object_aligned(to_start),   "checking alignment");
 198   assert(is_ptr_object_aligned(from_start), "checking alignment");
 199 
 200   MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
 201   MemRegion to_mr  ((HeapWord*)to_start, (HeapWord*)from_start);
 202   MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
 203 
 204   eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
 205     to_space()->initialize(to_mr  , true, ZapUnusedHeapArea);
 206   from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
 207 }
 208 
 209 #ifndef PRODUCT
 210 void PSYoungGen::space_invariants() {
 211   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 212   const size_t alignment = heap->space_alignment();
 213 
 214   // Currently, our eden size cannot shrink to zero
 215   guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
 216   guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
 217   guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
 218 


 277                         _max_gen_size, min_gen_size());
 278   }
 279 }
 280 
 281 
 282 bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
 283   const size_t alignment = virtual_space()->alignment();
 284   size_t orig_size = virtual_space()->committed_size();
 285   bool size_changed = false;
 286 
 287   // There used to be this guarantee there.
 288   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
 289   // Code below forces this requirement.  In addition the desired eden
 290   // size and desired survivor sizes are desired goals and may
 291   // exceed the total generation size.
 292 
 293   assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
 294 
 295   // Adjust new generation size
 296   const size_t eden_plus_survivors =
 297           align_size_up(eden_size + 2 * survivor_size, alignment);
 298   size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
 299                              min_gen_size());
 300   assert(desired_size <= max_size(), "just checking");
 301 
 302   if (desired_size > orig_size) {
 303     // Grow the generation
 304     size_t change = desired_size - orig_size;
 305     assert(change % alignment == 0, "just checking");
 306     HeapWord* prev_high = (HeapWord*) virtual_space()->high();
 307     if (!virtual_space()->expand_by(change)) {
 308       return false; // Error if we fail to resize!
 309     }
 310     if (ZapUnusedHeapArea) {
 311       // Mangle newly committed space immediately because it
 312       // can be done here more simply that after the new
 313       // spaces have been computed.
 314       HeapWord* new_high = (HeapWord*) virtual_space()->high();
 315       MemRegion mangle_region(prev_high, new_high);
 316       SpaceMangler::mangle_region(mangle_region);
 317     }


 511     assert(eden_end >= eden_start, "addition overflowed");
 512 
 513     // To may resize into from space as long as it is clear of live data.
 514     // From space must remain page aligned, though, so we need to do some
 515     // extra calculations.
 516 
 517     // First calculate an optimal to-space
 518     to_end   = (char*)virtual_space()->high();
 519     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
 520                                     sizeof(char));
 521 
 522     // Does the optimal to-space overlap from-space?
 523     if (to_start < (char*)from_space()->end()) {
 524       // Calculate the minimum offset possible for from_end
 525       size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
 526 
 527       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
 528       if (from_size == 0) {
 529         from_size = alignment;
 530       } else {
 531         from_size = align_size_up(from_size, alignment);
 532       }
 533 
 534       from_end = from_start + from_size;
 535       assert(from_end > from_start, "addition overflow or from_size problem");
 536 
 537       guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
 538 
 539       // Now update to_start with the new from_end
 540       to_start = MAX2(from_end, to_start);
 541     }
 542 
 543     guarantee(to_start != to_end, "to space is zero sized");
 544 
 545     log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 546                         p2i(eden_start),
 547                         p2i(eden_end),
 548                         pointer_delta(eden_end, eden_start, sizeof(char)));
 549     log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 550                         p2i(from_start),
 551                         p2i(from_end),


 594 
 595     log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 596                         p2i(eden_start),
 597                         p2i(eden_end),
 598                         pointer_delta(eden_end, eden_start, sizeof(char)));
 599     log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 600                         p2i(to_start),
 601                         p2i(to_end),
 602                         pointer_delta(  to_end,   to_start, sizeof(char)));
 603     log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 604                         p2i(from_start),
 605                         p2i(from_end),
 606                         pointer_delta(from_end, from_start, sizeof(char)));
 607   }
 608 
 609 
 610   guarantee((HeapWord*)from_start <= from_space()->bottom(),
 611             "from start moved to the right");
 612   guarantee((HeapWord*)from_end >= from_space()->top(),
 613             "from end moved into live data");
 614   assert(is_ptr_object_aligned(eden_start), "checking alignment");
 615   assert(is_ptr_object_aligned(from_start), "checking alignment");
 616   assert(is_ptr_object_aligned(to_start), "checking alignment");
 617 
 618   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
 619   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
 620   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
 621 
 622   // Let's make sure the call to initialize doesn't reset "top"!
 623   HeapWord* old_from_top = from_space()->top();
 624 
 625   // For logging block  below
 626   size_t old_from = from_space()->capacity_in_bytes();
 627   size_t old_to   = to_space()->capacity_in_bytes();
 628 
 629   if (ZapUnusedHeapArea) {
 630     // NUMA is a special case because a numa space is not mangled
 631     // in order to not prematurely bind its address to memory to
 632     // the wrong memory (i.e., don't want the GC thread to first
 633     // touch the memory).  The survivor spaces are not numa
 634     // spaces and are mangled.
 635     if (UseNUMA) {
 636       if (eden_from_to_order) {


 798 
 799   // Include any space that is committed but not included in
 800   // the survivor spaces.
 801   assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
 802     "Survivor space beyond high end");
 803   size_t unused_committed = pointer_delta(virtual_space()->high(),
 804     space_shrinking->end(), sizeof(char));
 805 
 806   if (space_shrinking->is_empty()) {
 807     // Don't let the space shrink to 0
 808     assert(space_shrinking->capacity_in_bytes() >= space_alignment,
 809       "Space is too small");
 810     delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
 811   } else {
 812     delta_in_survivor = pointer_delta(space_shrinking->end(),
 813                                       space_shrinking->top(),
 814                                       sizeof(char));
 815   }
 816 
 817   size_t delta_in_bytes = unused_committed + delta_in_survivor;
 818   delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
 819   return delta_in_bytes;
 820 }
 821 
 822 // Return the number of bytes available for resizing down the young
 823 // generation.  This is the minimum of
 824 //      input "bytes"
 825 //      bytes to the minimum young gen size
 826 //      bytes to the size currently being used + some small extra
 827 size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
 828   // Allow shrinkage into the current eden but keep eden large enough
 829   // to maintain the minimum young gen size
 830   bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
 831   return align_size_down(bytes, virtual_space()->alignment());
 832 }
 833 
 834 void PSYoungGen::reset_after_change() {
 835   ShouldNotReachHere();
 836 }
 837 
 838 void PSYoungGen::reset_survivors_after_shrink() {
 839   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 840                         (HeapWord*)virtual_space()->high_boundary());
 841   PSScavenge::reference_processor()->set_span(_reserved);
 842 
 843   MutableSpace* space_shrinking = NULL;
 844   if (from_space()->end() > to_space()->end()) {
 845     space_shrinking = from_space();
 846   } else {
 847     space_shrinking = to_space();
 848   }
 849 
 850   HeapWord* new_end = (HeapWord*)virtual_space()->high();
 851   assert(new_end >= space_shrinking->bottom(), "Shrink was too large");




  99                                   " of the young generation");
 100   }
 101 
 102   // Generation Counters - generation 0, 3 subspaces
 103   _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
 104                                            _max_gen_size, _virtual_space);
 105 
 106   // Compute maximum space sizes for performance counters
 107   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 108   size_t alignment = heap->space_alignment();
 109   size_t size = virtual_space()->reserved_size();
 110 
 111   size_t max_survivor_size;
 112   size_t max_eden_size;
 113 
 114   if (UseAdaptiveSizePolicy) {
 115     max_survivor_size = size / MinSurvivorRatio;
 116 
 117     // round the survivor space size down to the nearest alignment
 118     // and make sure its size is greater than 0.
 119     max_survivor_size = align_down(max_survivor_size, alignment);
 120     max_survivor_size = MAX2(max_survivor_size, alignment);
 121 
 122     // set the maximum size of eden to be the size of the young gen
 123     // less two times the minimum survivor size. The minimum survivor
 124     // size for UseAdaptiveSizePolicy is one alignment.
 125     max_eden_size = size - 2 * alignment;
 126   } else {
 127     max_survivor_size = size / InitialSurvivorRatio;
 128 
 129     // round the survivor space size down to the nearest alignment
 130     // and make sure its size is greater than 0.
 131     max_survivor_size = align_down(max_survivor_size, alignment);
 132     max_survivor_size = MAX2(max_survivor_size, alignment);
 133 
 134     // set the maximum size of eden to be the size of the young gen
 135     // less two times the survivor size when the generation is 100%
 136     // committed. The minimum survivor size for -UseAdaptiveSizePolicy
 137     // is dependent on the committed portion (current capacity) of the
 138     // generation - the less space committed, the smaller the survivor
 139     // space, possibly as small as an alignment. However, we are interested
 140     // in the case where the young generation is 100% committed, as this
 141     // is the point where eden reaches its maximum size. At this point,
 142     // the size of a survivor space is max_survivor_size.
 143     max_eden_size = size - 2 * max_survivor_size;
 144   }
 145 
 146   _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
 147                                      _gen_counters);
 148   _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
 149                                      _gen_counters);
 150   _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
 151                                    _gen_counters);
 152 
 153   compute_initial_space_boundaries();
 154 }
 155 
 156 void PSYoungGen::compute_initial_space_boundaries() {
 157   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 158 
 159   // Compute sizes
 160   size_t alignment = heap->space_alignment();
 161   size_t size = virtual_space()->committed_size();
 162   assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
 163 
 164   size_t survivor_size = size / InitialSurvivorRatio;
 165   survivor_size = align_down(survivor_size, alignment);
 166   // ... but never less than an alignment
 167   survivor_size = MAX2(survivor_size, alignment);
 168 
 169   // Young generation is eden + 2 survivor spaces
 170   size_t eden_size = size - (2 * survivor_size);
 171 
 172   // Now go ahead and set 'em.
 173   set_space_boundaries(eden_size, survivor_size);
 174   space_invariants();
 175 
 176   if (UsePerfData) {
 177     _eden_counters->update_capacity();
 178     _from_counters->update_capacity();
 179     _to_counters->update_capacity();
 180   }
 181 }
 182 
 183 void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
 184   assert(eden_size < virtual_space()->committed_size(), "just checking");
 185   assert(eden_size > 0  && survivor_size > 0, "just checking");
 186 
 187   // Initial layout is Eden, to, from. After swapping survivor spaces,
 188   // that leaves us with Eden, from, to, which is step one in our two
 189   // step resize-with-live-data procedure.
 190   char *eden_start = virtual_space()->low();
 191   char *to_start   = eden_start + eden_size;
 192   char *from_start = to_start   + survivor_size;
 193   char *from_end   = from_start + survivor_size;
 194 
 195   assert(from_end == virtual_space()->high(), "just checking");
 196   assert(is_object_aligned(eden_start), "checking alignment");
 197   assert(is_object_aligned(to_start),   "checking alignment");
 198   assert(is_object_aligned(from_start), "checking alignment");
 199 
 200   MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
 201   MemRegion to_mr  ((HeapWord*)to_start, (HeapWord*)from_start);
 202   MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
 203 
 204   eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
 205     to_space()->initialize(to_mr  , true, ZapUnusedHeapArea);
 206   from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
 207 }
 208 
 209 #ifndef PRODUCT
 210 void PSYoungGen::space_invariants() {
 211   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 212   const size_t alignment = heap->space_alignment();
 213 
 214   // Currently, our eden size cannot shrink to zero
 215   guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
 216   guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
 217   guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
 218 


 277                         _max_gen_size, min_gen_size());
 278   }
 279 }
 280 
 281 
 282 bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
 283   const size_t alignment = virtual_space()->alignment();
 284   size_t orig_size = virtual_space()->committed_size();
 285   bool size_changed = false;
 286 
 287   // There used to be this guarantee there.
 288   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
 289   // Code below forces this requirement.  In addition the desired eden
 290   // size and desired survivor sizes are desired goals and may
 291   // exceed the total generation size.
 292 
 293   assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
 294 
 295   // Adjust new generation size
 296   const size_t eden_plus_survivors =
 297           align_up(eden_size + 2 * survivor_size, alignment);
 298   size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
 299                              min_gen_size());
 300   assert(desired_size <= max_size(), "just checking");
 301 
 302   if (desired_size > orig_size) {
 303     // Grow the generation
 304     size_t change = desired_size - orig_size;
 305     assert(change % alignment == 0, "just checking");
 306     HeapWord* prev_high = (HeapWord*) virtual_space()->high();
 307     if (!virtual_space()->expand_by(change)) {
 308       return false; // Error if we fail to resize!
 309     }
 310     if (ZapUnusedHeapArea) {
 311       // Mangle newly committed space immediately because it
 312       // can be done here more simply that after the new
 313       // spaces have been computed.
 314       HeapWord* new_high = (HeapWord*) virtual_space()->high();
 315       MemRegion mangle_region(prev_high, new_high);
 316       SpaceMangler::mangle_region(mangle_region);
 317     }


 511     assert(eden_end >= eden_start, "addition overflowed");
 512 
 513     // To may resize into from space as long as it is clear of live data.
 514     // From space must remain page aligned, though, so we need to do some
 515     // extra calculations.
 516 
 517     // First calculate an optimal to-space
 518     to_end   = (char*)virtual_space()->high();
 519     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
 520                                     sizeof(char));
 521 
 522     // Does the optimal to-space overlap from-space?
 523     if (to_start < (char*)from_space()->end()) {
 524       // Calculate the minimum offset possible for from_end
 525       size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
 526 
 527       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
 528       if (from_size == 0) {
 529         from_size = alignment;
 530       } else {
 531         from_size = align_up(from_size, alignment);
 532       }
 533 
 534       from_end = from_start + from_size;
 535       assert(from_end > from_start, "addition overflow or from_size problem");
 536 
 537       guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
 538 
 539       // Now update to_start with the new from_end
 540       to_start = MAX2(from_end, to_start);
 541     }
 542 
 543     guarantee(to_start != to_end, "to space is zero sized");
 544 
 545     log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 546                         p2i(eden_start),
 547                         p2i(eden_end),
 548                         pointer_delta(eden_end, eden_start, sizeof(char)));
 549     log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 550                         p2i(from_start),
 551                         p2i(from_end),


 594 
 595     log_trace(gc, ergo)("    [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 596                         p2i(eden_start),
 597                         p2i(eden_end),
 598                         pointer_delta(eden_end, eden_start, sizeof(char)));
 599     log_trace(gc, ergo)("    [  to_start ..   to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 600                         p2i(to_start),
 601                         p2i(to_end),
 602                         pointer_delta(  to_end,   to_start, sizeof(char)));
 603     log_trace(gc, ergo)("    [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 604                         p2i(from_start),
 605                         p2i(from_end),
 606                         pointer_delta(from_end, from_start, sizeof(char)));
 607   }
 608 
 609 
 610   guarantee((HeapWord*)from_start <= from_space()->bottom(),
 611             "from start moved to the right");
 612   guarantee((HeapWord*)from_end >= from_space()->top(),
 613             "from end moved into live data");
 614   assert(is_object_aligned(eden_start), "checking alignment");
 615   assert(is_object_aligned(from_start), "checking alignment");
 616   assert(is_object_aligned(to_start), "checking alignment");
 617 
 618   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
 619   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
 620   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
 621 
 622   // Let's make sure the call to initialize doesn't reset "top"!
 623   HeapWord* old_from_top = from_space()->top();
 624 
 625   // For logging block  below
 626   size_t old_from = from_space()->capacity_in_bytes();
 627   size_t old_to   = to_space()->capacity_in_bytes();
 628 
 629   if (ZapUnusedHeapArea) {
 630     // NUMA is a special case because a numa space is not mangled
 631     // in order to not prematurely bind its address to memory to
 632     // the wrong memory (i.e., don't want the GC thread to first
 633     // touch the memory).  The survivor spaces are not numa
 634     // spaces and are mangled.
 635     if (UseNUMA) {
 636       if (eden_from_to_order) {


 798 
 799   // Include any space that is committed but not included in
 800   // the survivor spaces.
 801   assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
 802     "Survivor space beyond high end");
 803   size_t unused_committed = pointer_delta(virtual_space()->high(),
 804     space_shrinking->end(), sizeof(char));
 805 
 806   if (space_shrinking->is_empty()) {
 807     // Don't let the space shrink to 0
 808     assert(space_shrinking->capacity_in_bytes() >= space_alignment,
 809       "Space is too small");
 810     delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
 811   } else {
 812     delta_in_survivor = pointer_delta(space_shrinking->end(),
 813                                       space_shrinking->top(),
 814                                       sizeof(char));
 815   }
 816 
 817   size_t delta_in_bytes = unused_committed + delta_in_survivor;
 818   delta_in_bytes = align_down(delta_in_bytes, gen_alignment);
 819   return delta_in_bytes;
 820 }
 821 
 822 // Return the number of bytes available for resizing down the young
 823 // generation.  This is the minimum of
 824 //      input "bytes"
 825 //      bytes to the minimum young gen size
 826 //      bytes to the size currently being used + some small extra
 827 size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
 828   // Allow shrinkage into the current eden but keep eden large enough
 829   // to maintain the minimum young gen size
 830   bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
 831   return align_down(bytes, virtual_space()->alignment());
 832 }
 833 
 834 void PSYoungGen::reset_after_change() {
 835   ShouldNotReachHere();
 836 }
 837 
 838 void PSYoungGen::reset_survivors_after_shrink() {
 839   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 840                         (HeapWord*)virtual_space()->high_boundary());
 841   PSScavenge::reference_processor()->set_span(_reserved);
 842 
 843   MutableSpace* space_shrinking = NULL;
 844   if (from_space()->end() > to_space()->end()) {
 845     space_shrinking = from_space();
 846   } else {
 847     space_shrinking = to_space();
 848   }
 849 
 850   HeapWord* new_end = (HeapWord*)virtual_space()->high();
 851   assert(new_end >= space_shrinking->bottom(), "Shrink was too large");


< prev index next >