< prev index next >

src/share/vm/gc/parallel/asPSOldGen.cpp

Print this page




  74   // The old gen can grow to gen_size_limit().  _reserve reflects only
  75   // the current maximum that can be committed.
  76   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  77 
  78   initialize_performance_counters(perf_data_name, level);
  79 }
  80 
  81 void ASPSOldGen::reset_after_change() {
  82   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  83                         (HeapWord*)virtual_space()->high_boundary());
  84   post_resize();
  85 }
  86 
  87 
  88 size_t ASPSOldGen::available_for_expansion() {
  89   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
  90   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
  91 
  92   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  93   size_t result =  gen_size_limit() - virtual_space()->committed_size();
  94   size_t result_aligned = align_size_down(result, heap->generation_alignment());
  95   return result_aligned;
  96 }
  97 
  98 size_t ASPSOldGen::available_for_contraction() {
  99   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
 100   if (uncommitted_bytes != 0) {
 101     return uncommitted_bytes;
 102   }
 103 
 104   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 105   const size_t gen_alignment = heap->generation_alignment();
 106   PSAdaptiveSizePolicy* policy = heap->size_policy();
 107   const size_t working_size =
 108     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
 109   const size_t working_aligned = align_size_up(working_size, gen_alignment);
 110   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
 111   if (working_or_min > reserved().byte_size()) {
 112     // If the used or minimum gen size (aligned up) is greater
 113     // than the total reserved size, then the space available
 114     // for contraction should (after proper alignment) be 0
 115     return 0;
 116   }
 117   const size_t max_contraction =
 118     reserved().byte_size() - working_or_min;
 119 
 120   // Use the "increment" fraction instead of the "decrement" fraction
 121   // to allow the other gen to expand more aggressively.  The
 122   // "decrement" fraction is conservative because its intent is to
 123   // only reduce the footprint.
 124 
 125   size_t result = policy->promo_increment_aligned_down(max_contraction);
 126   // Also adjust for inter-generational alignment
 127   size_t result_aligned = align_size_down(result, gen_alignment);
 128 
 129   Log(gc, ergo) log;
 130   if (log.is_trace()) {
 131     size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
 132     size_t promo_increment = policy->promo_increment(max_contraction);
 133     log.trace("ASPSOldGen::available_for_contraction: " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, result_aligned/K, result_aligned);
 134     log.trace(" reserved().byte_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, reserved().byte_size()/K, reserved().byte_size());
 135     log.trace(" padded promoted " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, working_promoted/K, working_promoted);
 136     log.trace(" used " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, used_in_bytes()/K, used_in_bytes());
 137     log.trace(" min_gen_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, min_gen_size()/K, min_gen_size());
 138     log.trace(" max_contraction " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, max_contraction/K, max_contraction);
 139     log.trace("    without alignment " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, promo_increment/K, promo_increment);
 140     log.trace(" alignment " SIZE_FORMAT_HEX, gen_alignment);
 141   }
 142 
 143   assert(result_aligned <= max_contraction, "arithmetic is wrong");
 144   return result_aligned;
 145 }


  74   // The old gen can grow to gen_size_limit().  _reserve reflects only
  75   // the current maximum that can be committed.
  76   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  77 
  78   initialize_performance_counters(perf_data_name, level);
  79 }
  80 
  81 void ASPSOldGen::reset_after_change() {
  82   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  83                         (HeapWord*)virtual_space()->high_boundary());
  84   post_resize();
  85 }
  86 
  87 
  88 size_t ASPSOldGen::available_for_expansion() {
  89   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
  90   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
  91 
  92   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  93   size_t result =  gen_size_limit() - virtual_space()->committed_size();
  94   size_t result_aligned = align_down(result, heap->generation_alignment());
  95   return result_aligned;
  96 }
  97 
  98 size_t ASPSOldGen::available_for_contraction() {
  99   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
 100   if (uncommitted_bytes != 0) {
 101     return uncommitted_bytes;
 102   }
 103 
 104   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 105   const size_t gen_alignment = heap->generation_alignment();
 106   PSAdaptiveSizePolicy* policy = heap->size_policy();
 107   const size_t working_size =
 108     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
 109   const size_t working_aligned = align_up(working_size, gen_alignment);
 110   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
 111   if (working_or_min > reserved().byte_size()) {
 112     // If the used or minimum gen size (aligned up) is greater
 113     // than the total reserved size, then the space available
 114     // for contraction should (after proper alignment) be 0
 115     return 0;
 116   }
 117   const size_t max_contraction =
 118     reserved().byte_size() - working_or_min;
 119 
 120   // Use the "increment" fraction instead of the "decrement" fraction
 121   // to allow the other gen to expand more aggressively.  The
 122   // "decrement" fraction is conservative because its intent is to
 123   // only reduce the footprint.
 124 
 125   size_t result = policy->promo_increment_aligned_down(max_contraction);
 126   // Also adjust for inter-generational alignment
 127   size_t result_aligned = align_down(result, gen_alignment);
 128 
 129   Log(gc, ergo) log;
 130   if (log.is_trace()) {
 131     size_t working_promoted = (size_t) policy->avg_promoted()->padded_average();
 132     size_t promo_increment = policy->promo_increment(max_contraction);
 133     log.trace("ASPSOldGen::available_for_contraction: " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, result_aligned/K, result_aligned);
 134     log.trace(" reserved().byte_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, reserved().byte_size()/K, reserved().byte_size());
 135     log.trace(" padded promoted " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, working_promoted/K, working_promoted);
 136     log.trace(" used " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, used_in_bytes()/K, used_in_bytes());
 137     log.trace(" min_gen_size() " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, min_gen_size()/K, min_gen_size());
 138     log.trace(" max_contraction " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, max_contraction/K, max_contraction);
 139     log.trace("    without alignment " SIZE_FORMAT " K / " SIZE_FORMAT_HEX, promo_increment/K, promo_increment);
 140     log.trace(" alignment " SIZE_FORMAT_HEX, gen_alignment);
 141   }
 142 
 143   assert(result_aligned <= max_contraction, "arithmetic is wrong");
 144   return result_aligned;
 145 }
< prev index next >