< prev index next >

src/share/vm/gc/parallel/asPSYoungGen.cpp

Print this page




  58                                             size_t alignment) {
  59   assert(_init_gen_size != 0, "Should have a finite size");
  60   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
  61   if (!_virtual_space->expand_by(_init_gen_size)) {
  62     vm_exit_during_initialization("Could not reserve enough space for "
  63                                   "object heap");
  64   }
  65 }
  66 
  67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
  68   initialize_virtual_space(rs, alignment);
  69   initialize_work();
  70 }
  71 
  72 size_t ASPSYoungGen::available_for_expansion() {
  73   size_t current_committed_size = virtual_space()->committed_size();
  74   assert((gen_size_limit() >= current_committed_size),
  75     "generation size limit is wrong");
  76   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  77   size_t result =  gen_size_limit() - current_committed_size;
  78   size_t result_aligned = align_size_down(result, heap->generation_alignment());
  79   return result_aligned;
  80 }
  81 
  82 // Return the number of bytes the young gen is willing give up.
  83 //
  84 // Future implementations could check the survivors and if to_space is in the
  85 // right place (below from_space), take a chunk from to_space.
  86 size_t ASPSYoungGen::available_for_contraction() {
  87   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  88   if (uncommitted_bytes != 0) {
  89     return uncommitted_bytes;
  90   }
  91 
  92   if (eden_space()->is_empty()) {
  93     // Respect the minimum size for eden and for the young gen as a whole.
  94     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  95     const size_t eden_alignment = heap->space_alignment();
  96     const size_t gen_alignment = heap->generation_alignment();
  97 
  98     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
  99       "Alignment is wrong");
 100     size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
 101     eden_avail = align_size_down(eden_avail, gen_alignment);
 102 
 103     assert(virtual_space()->committed_size() >= min_gen_size(),
 104       "minimum gen size is wrong");
 105     size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
 106     assert(virtual_space()->is_aligned(gen_avail), "not aligned");
 107 
 108     const size_t max_contraction = MIN2(eden_avail, gen_avail);
 109     // See comment for ASPSOldGen::available_for_contraction()
 110     // for reasons the "increment" fraction is used.
 111     PSAdaptiveSizePolicy* policy = heap->size_policy();
 112     size_t result = policy->eden_increment_aligned_down(max_contraction);
 113     size_t result_aligned = align_size_down(result, gen_alignment);
 114 
 115     log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
 116     log_trace(gc, ergo)("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
 117     log_trace(gc, ergo)("  eden_avail " SIZE_FORMAT " K", eden_avail/K);
 118     log_trace(gc, ergo)("  gen_avail " SIZE_FORMAT " K", gen_avail/K);
 119 
 120     return result_aligned;
 121   }
 122 
 123   return 0;
 124 }
 125 
 126 // The current implementation only considers to the end of eden.
 127 // If to_space is below from_space, to_space is not considered.
 128 // to_space can be.
 129 size_t ASPSYoungGen::available_to_live() {
 130   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 131   const size_t alignment = heap->space_alignment();
 132 
 133   // Include any space that is committed but is not in eden.


 149 //  some additional diagnostics
 150 // If no additional changes are required, this can be deleted
 151 // and the changes factored back into PSYoungGen::resize_generation().
 152 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
 153   const size_t alignment = virtual_space()->alignment();
 154   size_t orig_size = virtual_space()->committed_size();
 155   bool size_changed = false;
 156 
 157   // There used to be a guarantee here that
 158   //   (eden_size + 2*survivor_size)  <= _max_gen_size
 159   // This requirement is enforced by the calculation of desired_size
 160   // below.  It may not be true on entry since the size of the
 161   // eden_size is no bounded by the generation size.
 162 
 163   assert(max_size() == reserved().byte_size(), "max gen size problem?");
 164   assert(min_gen_size() <= orig_size && orig_size <= max_size(),
 165          "just checking");
 166 
 167   // Adjust new generation size
 168   const size_t eden_plus_survivors =
 169     align_size_up(eden_size + 2 * survivor_size, alignment);
 170   size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
 171                              min_gen_size());
 172   assert(desired_size <= gen_size_limit(), "just checking");
 173 
 174   if (desired_size > orig_size) {
 175     // Grow the generation
 176     size_t change = desired_size - orig_size;
 177     HeapWord* prev_low = (HeapWord*) virtual_space()->low();
 178     if (!virtual_space()->expand_by(change)) {
 179       return false;
 180     }
 181     if (ZapUnusedHeapArea) {
 182       // Mangle newly committed space immediately because it
 183       // can be done here more simply that after the new
 184       // spaces have been computed.
 185       HeapWord* new_low = (HeapWord*) virtual_space()->low();
 186       assert(new_low < prev_low, "Did not grow");
 187 
 188       MemRegion mangle_region(new_low, prev_low);
 189       SpaceMangler::mangle_region(mangle_region);


 315     // To may resize into from space as long as it is clear of live data.
 316     // From space must remain page aligned, though, so we need to do some
 317     // extra calculations.
 318 
 319     // First calculate an optimal to-space
 320     to_end   = (char*)virtual_space()->high();
 321     to_start = (char*)pointer_delta(to_end,
 322                                     (char*)requested_survivor_size,
 323                                     sizeof(char));
 324 
 325     // Does the optimal to-space overlap from-space?
 326     if (to_start < (char*)from_space()->end()) {
 327       // Calculate the minimum offset possible for from_end
 328       size_t from_size =
 329         pointer_delta(from_space()->top(), from_start, sizeof(char));
 330 
 331       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
 332       if (from_size == 0) {
 333         from_size = alignment;
 334       } else {
 335         from_size = align_size_up(from_size, alignment);
 336       }
 337 
 338       from_end = from_start + from_size;
 339       assert(from_end > from_start, "addition overflow or from_size problem");
 340 
 341       guarantee(from_end <= (char*)from_space()->end(),
 342         "from_end moved to the right");
 343 
 344       // Now update to_start with the new from_end
 345       to_start = MAX2(from_end, to_start);
 346     }
 347 
 348     guarantee(to_start != to_end, "to space is zero sized");
 349 
 350     log_trace(gc, ergo)("    [eden_start .. eden_end): "
 351                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 352                         p2i(eden_start),
 353                         p2i(eden_end),
 354                         pointer_delta(eden_end, eden_start, sizeof(char)));
 355     log_trace(gc, ergo)("    [from_start .. from_end): "


 402                         p2i(eden_start),
 403                         p2i(eden_end),
 404                         pointer_delta(eden_end, eden_start, sizeof(char)));
 405     log_trace(gc, ergo)("    [  to_start ..   to_end): "
 406                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 407                         p2i(to_start),
 408                         p2i(to_end),
 409                         pointer_delta(  to_end,   to_start, sizeof(char)));
 410     log_trace(gc, ergo)("    [from_start .. from_end): "
 411                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 412                         p2i(from_start),
 413                         p2i(from_end),
 414                         pointer_delta(from_end, from_start, sizeof(char)));
 415   }
 416 
 417 
 418   guarantee((HeapWord*)from_start <= from_space()->bottom(),
 419             "from start moved to the right");
 420   guarantee((HeapWord*)from_end >= from_space()->top(),
 421             "from end moved into live data");
 422   assert(is_ptr_object_aligned(eden_start), "checking alignment");
 423   assert(is_ptr_object_aligned(from_start), "checking alignment");
 424   assert(is_ptr_object_aligned(to_start), "checking alignment");
 425 
 426   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
 427   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
 428   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
 429 
 430   // Let's make sure the call to initialize doesn't reset "top"!
 431   DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
 432 
 433   // For logging block  below
 434   size_t old_from = from_space()->capacity_in_bytes();
 435   size_t old_to   = to_space()->capacity_in_bytes();
 436 
 437   if (ZapUnusedHeapArea) {
 438     // NUMA is a special case because a numa space is not mangled
 439     // in order to not prematurely bind its address to memory to
 440     // the wrong memory (i.e., don't want the GC thread to first
 441     // touch the memory).  The survivor spaces are not numa
 442     // spaces and are mangled.
 443     if (UseNUMA) {
 444       if (eden_from_to_order) {




  58                                             size_t alignment) {
  59   assert(_init_gen_size != 0, "Should have a finite size");
  60   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
  61   if (!_virtual_space->expand_by(_init_gen_size)) {
  62     vm_exit_during_initialization("Could not reserve enough space for "
  63                                   "object heap");
  64   }
  65 }
  66 
  67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
  68   initialize_virtual_space(rs, alignment);
  69   initialize_work();
  70 }
  71 
  72 size_t ASPSYoungGen::available_for_expansion() {
  73   size_t current_committed_size = virtual_space()->committed_size();
  74   assert((gen_size_limit() >= current_committed_size),
  75     "generation size limit is wrong");
  76   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  77   size_t result =  gen_size_limit() - current_committed_size;
  78   size_t result_aligned = align_down(result, heap->generation_alignment());
  79   return result_aligned;
  80 }
  81 
  82 // Return the number of bytes the young gen is willing give up.
  83 //
  84 // Future implementations could check the survivors and if to_space is in the
  85 // right place (below from_space), take a chunk from to_space.
  86 size_t ASPSYoungGen::available_for_contraction() {
  87   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  88   if (uncommitted_bytes != 0) {
  89     return uncommitted_bytes;
  90   }
  91 
  92   if (eden_space()->is_empty()) {
  93     // Respect the minimum size for eden and for the young gen as a whole.
  94     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  95     const size_t eden_alignment = heap->space_alignment();
  96     const size_t gen_alignment = heap->generation_alignment();
  97 
  98     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
  99       "Alignment is wrong");
 100     size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
 101     eden_avail = align_down(eden_avail, gen_alignment);
 102 
 103     assert(virtual_space()->committed_size() >= min_gen_size(),
 104       "minimum gen size is wrong");
 105     size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
 106     assert(virtual_space()->is_aligned(gen_avail), "not aligned");
 107 
 108     const size_t max_contraction = MIN2(eden_avail, gen_avail);
 109     // See comment for ASPSOldGen::available_for_contraction()
 110     // for reasons the "increment" fraction is used.
 111     PSAdaptiveSizePolicy* policy = heap->size_policy();
 112     size_t result = policy->eden_increment_aligned_down(max_contraction);
 113     size_t result_aligned = align_down(result, gen_alignment);
 114 
 115     log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K", result_aligned/K);
 116     log_trace(gc, ergo)("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
 117     log_trace(gc, ergo)("  eden_avail " SIZE_FORMAT " K", eden_avail/K);
 118     log_trace(gc, ergo)("  gen_avail " SIZE_FORMAT " K", gen_avail/K);
 119 
 120     return result_aligned;
 121   }
 122 
 123   return 0;
 124 }
 125 
 126 // The current implementation only considers to the end of eden.
 127 // If to_space is below from_space, to_space is not considered.
 128 // to_space can be.
 129 size_t ASPSYoungGen::available_to_live() {
 130   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 131   const size_t alignment = heap->space_alignment();
 132 
 133   // Include any space that is committed but is not in eden.


 149 //  some additional diagnostics
 150 // If no additional changes are required, this can be deleted
 151 // and the changes factored back into PSYoungGen::resize_generation().
 152 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
 153   const size_t alignment = virtual_space()->alignment();
 154   size_t orig_size = virtual_space()->committed_size();
 155   bool size_changed = false;
 156 
 157   // There used to be a guarantee here that
 158   //   (eden_size + 2*survivor_size)  <= _max_gen_size
 159   // This requirement is enforced by the calculation of desired_size
 160   // below.  It may not be true on entry since the size of the
 161   // eden_size is no bounded by the generation size.
 162 
 163   assert(max_size() == reserved().byte_size(), "max gen size problem?");
 164   assert(min_gen_size() <= orig_size && orig_size <= max_size(),
 165          "just checking");
 166 
 167   // Adjust new generation size
 168   const size_t eden_plus_survivors =
 169     align_up(eden_size + 2 * survivor_size, alignment);
 170   size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
 171                              min_gen_size());
 172   assert(desired_size <= gen_size_limit(), "just checking");
 173 
 174   if (desired_size > orig_size) {
 175     // Grow the generation
 176     size_t change = desired_size - orig_size;
 177     HeapWord* prev_low = (HeapWord*) virtual_space()->low();
 178     if (!virtual_space()->expand_by(change)) {
 179       return false;
 180     }
 181     if (ZapUnusedHeapArea) {
 182       // Mangle newly committed space immediately because it
 183       // can be done here more simply that after the new
 184       // spaces have been computed.
 185       HeapWord* new_low = (HeapWord*) virtual_space()->low();
 186       assert(new_low < prev_low, "Did not grow");
 187 
 188       MemRegion mangle_region(new_low, prev_low);
 189       SpaceMangler::mangle_region(mangle_region);


 315     // To may resize into from space as long as it is clear of live data.
 316     // From space must remain page aligned, though, so we need to do some
 317     // extra calculations.
 318 
 319     // First calculate an optimal to-space
 320     to_end   = (char*)virtual_space()->high();
 321     to_start = (char*)pointer_delta(to_end,
 322                                     (char*)requested_survivor_size,
 323                                     sizeof(char));
 324 
 325     // Does the optimal to-space overlap from-space?
 326     if (to_start < (char*)from_space()->end()) {
 327       // Calculate the minimum offset possible for from_end
 328       size_t from_size =
 329         pointer_delta(from_space()->top(), from_start, sizeof(char));
 330 
 331       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
 332       if (from_size == 0) {
 333         from_size = alignment;
 334       } else {
 335         from_size = align_up(from_size, alignment);
 336       }
 337 
 338       from_end = from_start + from_size;
 339       assert(from_end > from_start, "addition overflow or from_size problem");
 340 
 341       guarantee(from_end <= (char*)from_space()->end(),
 342         "from_end moved to the right");
 343 
 344       // Now update to_start with the new from_end
 345       to_start = MAX2(from_end, to_start);
 346     }
 347 
 348     guarantee(to_start != to_end, "to space is zero sized");
 349 
 350     log_trace(gc, ergo)("    [eden_start .. eden_end): "
 351                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 352                         p2i(eden_start),
 353                         p2i(eden_end),
 354                         pointer_delta(eden_end, eden_start, sizeof(char)));
 355     log_trace(gc, ergo)("    [from_start .. from_end): "


 402                         p2i(eden_start),
 403                         p2i(eden_end),
 404                         pointer_delta(eden_end, eden_start, sizeof(char)));
 405     log_trace(gc, ergo)("    [  to_start ..   to_end): "
 406                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 407                         p2i(to_start),
 408                         p2i(to_end),
 409                         pointer_delta(  to_end,   to_start, sizeof(char)));
 410     log_trace(gc, ergo)("    [from_start .. from_end): "
 411                         "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
 412                         p2i(from_start),
 413                         p2i(from_end),
 414                         pointer_delta(from_end, from_start, sizeof(char)));
 415   }
 416 
 417 
 418   guarantee((HeapWord*)from_start <= from_space()->bottom(),
 419             "from start moved to the right");
 420   guarantee((HeapWord*)from_end >= from_space()->top(),
 421             "from end moved into live data");
 422   assert(is_object_aligned(eden_start), "checking alignment");
 423   assert(is_object_aligned(from_start), "checking alignment");
 424   assert(is_object_aligned(to_start), "checking alignment");
 425 
 426   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
 427   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
 428   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
 429 
 430   // Let's make sure the call to initialize doesn't reset "top"!
 431   DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
 432 
 433   // For logging block  below
 434   size_t old_from = from_space()->capacity_in_bytes();
 435   size_t old_to   = to_space()->capacity_in_bytes();
 436 
 437   if (ZapUnusedHeapArea) {
 438     // NUMA is a special case because a numa space is not mangled
 439     // in order to not prematurely bind its address to memory to
 440     // the wrong memory (i.e., don't want the GC thread to first
 441     // touch the memory).  The survivor spaces are not numa
 442     // spaces and are mangled.
 443     if (UseNUMA) {
 444       if (eden_from_to_order) {


< prev index next >