< prev index next >

src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp

Print this page
rev 8068 : imported patch parallelscavenge_cleanup


  56 
  57 void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
  58                                             size_t alignment) {
  59   assert(_init_gen_size != 0, "Should have a finite size");
  60   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
  61   if (!_virtual_space->expand_by(_init_gen_size)) {
  62     vm_exit_during_initialization("Could not reserve enough space for "
  63                                   "object heap");
  64   }
  65 }
  66 
  67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
  68   initialize_virtual_space(rs, alignment);
  69   initialize_work();
  70 }
  71 
  72 size_t ASPSYoungGen::available_for_expansion() {
  73   size_t current_committed_size = virtual_space()->committed_size();
  74   assert((gen_size_limit() >= current_committed_size),
  75     "generation size limit is wrong");
  76   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  77   size_t result =  gen_size_limit() - current_committed_size;
  78   size_t result_aligned = align_size_down(result, heap->generation_alignment());
  79   return result_aligned;
  80 }
  81 
  82 // Return the number of bytes the young gen is willing give up.
  83 //
  84 // Future implementations could check the survivors and if to_space is in the
  85 // right place (below from_space), take a chunk from to_space.
  86 size_t ASPSYoungGen::available_for_contraction() {
  87   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  88   if (uncommitted_bytes != 0) {
  89     return uncommitted_bytes;
  90   }
  91 
  92   if (eden_space()->is_empty()) {
  93     // Respect the minimum size for eden and for the young gen as a whole.
  94     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  95     const size_t eden_alignment = heap->space_alignment();
  96     const size_t gen_alignment = heap->generation_alignment();
  97 
  98     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
  99       "Alignment is wrong");
 100     size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
 101     eden_avail = align_size_down(eden_avail, gen_alignment);
 102 
 103     assert(virtual_space()->committed_size() >= min_gen_size(),
 104       "minimum gen size is wrong");
 105     size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
 106     assert(virtual_space()->is_aligned(gen_avail), "not aligned");
 107 
 108     const size_t max_contraction = MIN2(eden_avail, gen_avail);
 109     // See comment for ASPSOldGen::available_for_contraction()
 110     // for reasons the "increment" fraction is used.
 111     PSAdaptiveSizePolicy* policy = heap->size_policy();
 112     size_t result = policy->eden_increment_aligned_down(max_contraction);
 113     size_t result_aligned = align_size_down(result, gen_alignment);
 114     if (PrintAdaptiveSizePolicy && Verbose) {
 115       gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K",
 116         result_aligned/K);
 117       gclog_or_tty->print_cr("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
 118       gclog_or_tty->print_cr("  eden_avail " SIZE_FORMAT " K", eden_avail/K);
 119       gclog_or_tty->print_cr("  gen_avail " SIZE_FORMAT " K", gen_avail/K);
 120     }
 121     return result_aligned;
 122   }
 123 
 124   return 0;
 125 }
 126 
 127 // The current implementation only considers to the end of eden.
 128 // If to_space is below from_space, to_space is not considered.
 129 // to_space can be.
 130 size_t ASPSYoungGen::available_to_live() {
 131   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 132   const size_t alignment = heap->space_alignment();
 133 
 134   // Include any space that is committed but is not in eden.
 135   size_t available = pointer_delta(eden_space()->bottom(),
 136                                    virtual_space()->low(),
 137                                    sizeof(char));
 138 
 139   const size_t eden_capacity = eden_space()->capacity_in_bytes();
 140   if (eden_space()->is_empty() && eden_capacity > alignment) {
 141     available += eden_capacity - alignment;
 142   }
 143   return available;
 144 }
 145 
 146 // Similar to PSYoungGen::resize_generation() but
 147 //  allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
 148 //  expands at the low end of the virtual space
 149 //  moves the boundary between the generations in order to expand
 150 //  some additional diagnostics
 151 // If no additional changes are required, this can be deleted


 275 
 276   // There's nothing to do if the new sizes are the same as the current
 277   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
 278       requested_survivor_size == from_space()->capacity_in_bytes() &&
 279       requested_eden_size == eden_space()->capacity_in_bytes()) {
 280     if (PrintAdaptiveSizePolicy && Verbose) {
 281       gclog_or_tty->print_cr("    capacities are the right sizes, returning");
 282     }
 283     return;
 284   }
 285 
 286   char* eden_start = (char*)virtual_space()->low();
 287   char* eden_end   = (char*)eden_space()->end();
 288   char* from_start = (char*)from_space()->bottom();
 289   char* from_end   = (char*)from_space()->end();
 290   char* to_start   = (char*)to_space()->bottom();
 291   char* to_end     = (char*)to_space()->end();
 292 
 293   assert(eden_start < from_start, "Cannot push into from_space");
 294 
 295   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 296   const size_t alignment = heap->space_alignment();
 297   const bool maintain_minimum =
 298     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
 299 
 300   bool eden_from_to_order = from_start < to_start;
 301   // Check whether from space is below to space
 302   if (eden_from_to_order) {
 303     // Eden, from, to
 304 
 305     if (PrintAdaptiveSizePolicy && Verbose) {
 306       gclog_or_tty->print_cr("  Eden, from, to:");
 307     }
 308 
 309     // Set eden
 310     // "requested_eden_size" is a goal for the size of eden
 311     // and may not be attainable.  "eden_size" below is
 312     // calculated based on the location of from-space and
 313     // the goal for the size of eden.  from-space is
 314     // fixed in place because it contains live data.
 315     // The calculation is done this way to avoid 32bit


 328     } else {
 329       eden_size = MIN2(requested_eden_size,
 330                        pointer_delta(from_start, eden_start, sizeof(char)));
 331     }
 332 
 333     eden_end = eden_start + eden_size;
 334     assert(eden_end >= eden_start, "addition overflowed");
 335 
 336     // To may resize into from space as long as it is clear of live data.
 337     // From space must remain page aligned, though, so we need to do some
 338     // extra calculations.
 339 
 340     // First calculate an optimal to-space
 341     to_end   = (char*)virtual_space()->high();
 342     to_start = (char*)pointer_delta(to_end,
 343                                     (char*)requested_survivor_size,
 344                                     sizeof(char));
 345 
 346     // Does the optimal to-space overlap from-space?
 347     if (to_start < (char*)from_space()->end()) {
 348       assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 349 
 350       // Calculate the minimum offset possible for from_end
 351       size_t from_size =
 352         pointer_delta(from_space()->top(), from_start, sizeof(char));
 353 
 354       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
 355       if (from_size == 0) {
 356         from_size = alignment;
 357       } else {
 358         from_size = align_size_up(from_size, alignment);
 359       }
 360 
 361       from_end = from_start + from_size;
 362       assert(from_end > from_start, "addition overflow or from_size problem");
 363 
 364       guarantee(from_end <= (char*)from_space()->end(),
 365         "from_end moved to the right");
 366 
 367       // Now update to_start with the new from_end
 368       to_start = MAX2(from_end, to_start);
 369     }


 492     from_space()->check_mangled_unused_area(limit);
 493       to_space()->check_mangled_unused_area(limit);
 494   }
 495   // When an existing space is being initialized, it is not
 496   // mangled because the space has been previously mangled.
 497   eden_space()->initialize(edenMR,
 498                            SpaceDecorator::Clear,
 499                            SpaceDecorator::DontMangle);
 500     to_space()->initialize(toMR,
 501                            SpaceDecorator::Clear,
 502                            SpaceDecorator::DontMangle);
 503   from_space()->initialize(fromMR,
 504                            SpaceDecorator::DontClear,
 505                            SpaceDecorator::DontMangle);
 506 
 507   PSScavenge::set_young_generation_boundary(eden_space()->bottom());
 508 
 509   assert(from_space()->top() == old_from_top, "from top changed!");
 510 
 511   if (PrintAdaptiveSizePolicy) {
 512     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 513     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 514 
 515     gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
 516                   "collection: %d "
 517                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
 518                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
 519                   heap->total_collections(),
 520                   old_from, old_to,
 521                   from_space()->capacity_in_bytes(),
 522                   to_space()->capacity_in_bytes());
 523     gclog_or_tty->cr();
 524   }
 525   space_invariants();
 526 }
 527 void ASPSYoungGen::reset_after_change() {
 528   assert_locked_or_safepoint(Heap_lock);
 529 
 530   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 531                         (HeapWord*)virtual_space()->high_boundary());
 532   PSScavenge::reference_processor()->set_span(_reserved);
 533 
 534   HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
 535   HeapWord* eden_bottom = eden_space()->bottom();
 536   if (new_eden_bottom != eden_bottom) {
 537     MemRegion eden_mr(new_eden_bottom, eden_space()->end());
 538     eden_space()->initialize(eden_mr,
 539                              SpaceDecorator::Clear,
 540                              SpaceDecorator::Mangle);
 541     PSScavenge::set_young_generation_boundary(eden_space()->bottom());
 542   }
 543   MemRegion cmr((HeapWord*)virtual_space()->low(),
 544                 (HeapWord*)virtual_space()->high());
 545   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 546 
 547   space_invariants();
 548 }


  56 
  57 void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
  58                                             size_t alignment) {
  59   assert(_init_gen_size != 0, "Should have a finite size");
  60   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
  61   if (!_virtual_space->expand_by(_init_gen_size)) {
  62     vm_exit_during_initialization("Could not reserve enough space for "
  63                                   "object heap");
  64   }
  65 }
  66 
  67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
  68   initialize_virtual_space(rs, alignment);
  69   initialize_work();
  70 }
  71 
  72 size_t ASPSYoungGen::available_for_expansion() {
  73   size_t current_committed_size = virtual_space()->committed_size();
  74   assert((gen_size_limit() >= current_committed_size),
  75     "generation size limit is wrong");
  76   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  77   size_t result =  gen_size_limit() - current_committed_size;
  78   size_t result_aligned = align_size_down(result, heap->generation_alignment());
  79   return result_aligned;
  80 }
  81 
  82 // Return the number of bytes the young gen is willing give up.
  83 //
  84 // Future implementations could check the survivors and if to_space is in the
  85 // right place (below from_space), take a chunk from to_space.
  86 size_t ASPSYoungGen::available_for_contraction() {
  87   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
  88   if (uncommitted_bytes != 0) {
  89     return uncommitted_bytes;
  90   }
  91 
  92   if (eden_space()->is_empty()) {
  93     // Respect the minimum size for eden and for the young gen as a whole.
  94     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  95     const size_t eden_alignment = heap->space_alignment();
  96     const size_t gen_alignment = heap->generation_alignment();
  97 
  98     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
  99       "Alignment is wrong");
 100     size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
 101     eden_avail = align_size_down(eden_avail, gen_alignment);
 102 
 103     assert(virtual_space()->committed_size() >= min_gen_size(),
 104       "minimum gen size is wrong");
 105     size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
 106     assert(virtual_space()->is_aligned(gen_avail), "not aligned");
 107 
 108     const size_t max_contraction = MIN2(eden_avail, gen_avail);
 109     // See comment for ASPSOldGen::available_for_contraction()
 110     // for reasons the "increment" fraction is used.
 111     PSAdaptiveSizePolicy* policy = heap->size_policy();
 112     size_t result = policy->eden_increment_aligned_down(max_contraction);
 113     size_t result_aligned = align_size_down(result, gen_alignment);
 114     if (PrintAdaptiveSizePolicy && Verbose) {
 115       gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K",
 116         result_aligned/K);
 117       gclog_or_tty->print_cr("  max_contraction " SIZE_FORMAT " K", max_contraction/K);
 118       gclog_or_tty->print_cr("  eden_avail " SIZE_FORMAT " K", eden_avail/K);
 119       gclog_or_tty->print_cr("  gen_avail " SIZE_FORMAT " K", gen_avail/K);
 120     }
 121     return result_aligned;
 122   }
 123 
 124   return 0;
 125 }
 126 
 127 // The current implementation only considers to the end of eden.
 128 // If to_space is below from_space, to_space is not considered.
 129 // to_space can be.
 130 size_t ASPSYoungGen::available_to_live() {
 131   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 132   const size_t alignment = heap->space_alignment();
 133 
 134   // Include any space that is committed but is not in eden.
 135   size_t available = pointer_delta(eden_space()->bottom(),
 136                                    virtual_space()->low(),
 137                                    sizeof(char));
 138 
 139   const size_t eden_capacity = eden_space()->capacity_in_bytes();
 140   if (eden_space()->is_empty() && eden_capacity > alignment) {
 141     available += eden_capacity - alignment;
 142   }
 143   return available;
 144 }
 145 
 146 // Similar to PSYoungGen::resize_generation() but
 147 //  allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
 148 //  expands at the low end of the virtual space
 149 //  moves the boundary between the generations in order to expand
 150 //  some additional diagnostics
 151 // If no additional changes are required, this can be deleted


 275 
 276   // There's nothing to do if the new sizes are the same as the current
 277   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
 278       requested_survivor_size == from_space()->capacity_in_bytes() &&
 279       requested_eden_size == eden_space()->capacity_in_bytes()) {
 280     if (PrintAdaptiveSizePolicy && Verbose) {
 281       gclog_or_tty->print_cr("    capacities are the right sizes, returning");
 282     }
 283     return;
 284   }
 285 
 286   char* eden_start = (char*)virtual_space()->low();
 287   char* eden_end   = (char*)eden_space()->end();
 288   char* from_start = (char*)from_space()->bottom();
 289   char* from_end   = (char*)from_space()->end();
 290   char* to_start   = (char*)to_space()->bottom();
 291   char* to_end     = (char*)to_space()->end();
 292 
 293   assert(eden_start < from_start, "Cannot push into from_space");
 294 
 295   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 296   const size_t alignment = heap->space_alignment();
 297   const bool maintain_minimum =
 298     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
 299 
 300   bool eden_from_to_order = from_start < to_start;
 301   // Check whether from space is below to space
 302   if (eden_from_to_order) {
 303     // Eden, from, to
 304 
 305     if (PrintAdaptiveSizePolicy && Verbose) {
 306       gclog_or_tty->print_cr("  Eden, from, to:");
 307     }
 308 
 309     // Set eden
 310     // "requested_eden_size" is a goal for the size of eden
 311     // and may not be attainable.  "eden_size" below is
 312     // calculated based on the location of from-space and
 313     // the goal for the size of eden.  from-space is
 314     // fixed in place because it contains live data.
 315     // The calculation is done this way to avoid 32bit


 328     } else {
 329       eden_size = MIN2(requested_eden_size,
 330                        pointer_delta(from_start, eden_start, sizeof(char)));
 331     }
 332 
 333     eden_end = eden_start + eden_size;
 334     assert(eden_end >= eden_start, "addition overflowed");
 335 
 336     // To may resize into from space as long as it is clear of live data.
 337     // From space must remain page aligned, though, so we need to do some
 338     // extra calculations.
 339 
 340     // First calculate an optimal to-space
 341     to_end   = (char*)virtual_space()->high();
 342     to_start = (char*)pointer_delta(to_end,
 343                                     (char*)requested_survivor_size,
 344                                     sizeof(char));
 345 
 346     // Does the optimal to-space overlap from-space?
 347     if (to_start < (char*)from_space()->end()) {


 348       // Calculate the minimum offset possible for from_end
 349       size_t from_size =
 350         pointer_delta(from_space()->top(), from_start, sizeof(char));
 351 
 352       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
 353       if (from_size == 0) {
 354         from_size = alignment;
 355       } else {
 356         from_size = align_size_up(from_size, alignment);
 357       }
 358 
 359       from_end = from_start + from_size;
 360       assert(from_end > from_start, "addition overflow or from_size problem");
 361 
 362       guarantee(from_end <= (char*)from_space()->end(),
 363         "from_end moved to the right");
 364 
 365       // Now update to_start with the new from_end
 366       to_start = MAX2(from_end, to_start);
 367     }


 490     from_space()->check_mangled_unused_area(limit);
 491       to_space()->check_mangled_unused_area(limit);
 492   }
 493   // When an existing space is being initialized, it is not
 494   // mangled because the space has been previously mangled.
 495   eden_space()->initialize(edenMR,
 496                            SpaceDecorator::Clear,
 497                            SpaceDecorator::DontMangle);
 498     to_space()->initialize(toMR,
 499                            SpaceDecorator::Clear,
 500                            SpaceDecorator::DontMangle);
 501   from_space()->initialize(fromMR,
 502                            SpaceDecorator::DontClear,
 503                            SpaceDecorator::DontMangle);
 504 
 505   PSScavenge::set_young_generation_boundary(eden_space()->bottom());
 506 
 507   assert(from_space()->top() == old_from_top, "from top changed!");
 508 
 509   if (PrintAdaptiveSizePolicy) {
 510     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();


 511     gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
 512                   "collection: %d "
 513                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
 514                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
 515                   heap->total_collections(),
 516                   old_from, old_to,
 517                   from_space()->capacity_in_bytes(),
 518                   to_space()->capacity_in_bytes());
 519     gclog_or_tty->cr();
 520   }
 521   space_invariants();
 522 }
 523 void ASPSYoungGen::reset_after_change() {
 524   assert_locked_or_safepoint(Heap_lock);
 525 
 526   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 527                         (HeapWord*)virtual_space()->high_boundary());
 528   PSScavenge::reference_processor()->set_span(_reserved);
 529 
 530   HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
 531   HeapWord* eden_bottom = eden_space()->bottom();
 532   if (new_eden_bottom != eden_bottom) {
 533     MemRegion eden_mr(new_eden_bottom, eden_space()->end());
 534     eden_space()->initialize(eden_mr,
 535                              SpaceDecorator::Clear,
 536                              SpaceDecorator::Mangle);
 537     PSScavenge::set_young_generation_boundary(eden_space()->bottom());
 538   }
 539   MemRegion cmr((HeapWord*)virtual_space()->low(),
 540                 (HeapWord*)virtual_space()->high());
 541   ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
 542 
 543   space_invariants();
 544 }
< prev index next >