< prev index next >

src/hotspot/share/gc/parallel/asPSOldGen.cpp

Print this page




  75   // The old gen can grow to gen_size_limit().  _reserve reflects only
  76   // the current maximum that can be committed.
  77   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  78 
  79   initialize_performance_counters(perf_data_name, level);
  80 }
  81 
  82 void ASPSOldGen::reset_after_change() {
  83   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  84                         (HeapWord*)virtual_space()->high_boundary());
  85   post_resize();
  86 }
  87 
  88 
  89 size_t ASPSOldGen::available_for_expansion() {
  90   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
  91   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
  92 
  93   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  94   size_t result =  gen_size_limit() - virtual_space()->committed_size();
  95   size_t result_aligned = align_down(result, heap->generation_alignment());
  96   return result_aligned;
  97 }
  98 
  99 size_t ASPSOldGen::available_for_contraction() {
 100   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
 101   if (uncommitted_bytes != 0) {
 102     return uncommitted_bytes;
 103   }
 104 
 105   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 106   const size_t gen_alignment = heap->generation_alignment();
 107   PSAdaptiveSizePolicy* policy = heap->size_policy();
 108   const size_t working_size =
 109     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
 110   const size_t working_aligned = align_up(working_size, gen_alignment);
 111   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
 112   if (working_or_min > reserved().byte_size()) {
 113     // If the used or minimum gen size (aligned up) is greater
 114     // than the total reserved size, then the space available
 115     // for contraction should (after proper alignment) be 0
 116     return 0;
 117   }
 118   const size_t max_contraction =
 119     reserved().byte_size() - working_or_min;
 120 
 121   // Use the "increment" fraction instead of the "decrement" fraction
 122   // to allow the other gen to expand more aggressively.  The
 123   // "decrement" fraction is conservative because its intent is to
 124   // only reduce the footprint.
 125 
 126   size_t result = policy->promo_increment_aligned_down(max_contraction);


  75   // The old gen can grow to gen_size_limit().  _reserve reflects only
  76   // the current maximum that can be committed.
  77   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  78 
  79   initialize_performance_counters(perf_data_name, level);
  80 }
  81 
  82 void ASPSOldGen::reset_after_change() {
  83   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  84                         (HeapWord*)virtual_space()->high_boundary());
  85   post_resize();
  86 }
  87 
  88 
  89 size_t ASPSOldGen::available_for_expansion() {
  90   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
  91   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
  92 
  93   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  94   size_t result =  gen_size_limit() - virtual_space()->committed_size();
  95   size_t result_aligned = align_down(result, heap->gen_alignment());
  96   return result_aligned;
  97 }
  98 
  99 size_t ASPSOldGen::available_for_contraction() {
 100   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
 101   if (uncommitted_bytes != 0) {
 102     return uncommitted_bytes;
 103   }
 104 
 105   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 106   const size_t gen_alignment = heap->gen_alignment();
 107   PSAdaptiveSizePolicy* policy = heap->size_policy();
 108   const size_t working_size =
 109     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
 110   const size_t working_aligned = align_up(working_size, gen_alignment);
 111   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
 112   if (working_or_min > reserved().byte_size()) {
 113     // If the used or minimum gen size (aligned up) is greater
 114     // than the total reserved size, then the space available
 115     // for contraction should (after proper alignment) be 0
 116     return 0;
 117   }
 118   const size_t max_contraction =
 119     reserved().byte_size() - working_or_min;
 120 
 121   // Use the "increment" fraction instead of the "decrement" fraction
 122   // to allow the other gen to expand more aggressively.  The
 123   // "decrement" fraction is conservative because its intent is to
 124   // only reduce the footprint.
 125 
 126   size_t result = policy->promo_increment_aligned_down(max_contraction);
< prev index next >