< prev index next >

src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp

Print this page
rev 8068 : imported patch parallelscavenge_cleanup


  72   PSOldGen::initialize_work(perf_data_name, level);
  73 
  74   // The old gen can grow to gen_size_limit().  _reserve reflects only
  75   // the current maximum that can be committed.
  76   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  77 
  78   initialize_performance_counters(perf_data_name, level);
  79 }
  80 
  81 void ASPSOldGen::reset_after_change() {
  82   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  83                         (HeapWord*)virtual_space()->high_boundary());
  84   post_resize();
  85 }
  86 
  87 
  88 size_t ASPSOldGen::available_for_expansion() {
  89   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
  90   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
  91 
  92   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  93   size_t result =  gen_size_limit() - virtual_space()->committed_size();
  94   size_t result_aligned = align_size_down(result, heap->generation_alignment());
  95   return result_aligned;
  96 }
  97 
  98 size_t ASPSOldGen::available_for_contraction() {
  99   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
 100   if (uncommitted_bytes != 0) {
 101     return uncommitted_bytes;
 102   }
 103 
 104   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
 105   const size_t gen_alignment = heap->generation_alignment();
 106   PSAdaptiveSizePolicy* policy = heap->size_policy();
 107   const size_t working_size =
 108     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
 109   const size_t working_aligned = align_size_up(working_size, gen_alignment);
 110   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
 111   if (working_or_min > reserved().byte_size()) {
 112     // If the used or minimum gen size (aligned up) is greater
 113     // than the total reserved size, then the space available
 114     // for contraction should (after proper alignment) be 0
 115     return 0;
 116   }
 117   const size_t max_contraction =
 118     reserved().byte_size() - working_or_min;
 119 
 120   // Use the "increment" fraction instead of the "decrement" fraction
 121   // to allow the other gen to expand more aggressively.  The
 122   // "decrement" fraction is conservative because its intent is to
 123   // only reduce the footprint.
 124 




  72   PSOldGen::initialize_work(perf_data_name, level);
  73 
  74   // The old gen can grow to gen_size_limit().  _reserve reflects only
  75   // the current maximum that can be committed.
  76   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  77 
  78   initialize_performance_counters(perf_data_name, level);
  79 }
  80 
  81 void ASPSOldGen::reset_after_change() {
  82   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  83                         (HeapWord*)virtual_space()->high_boundary());
  84   post_resize();
  85 }
  86 
  87 
  88 size_t ASPSOldGen::available_for_expansion() {
  89   assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned");
  90   assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size");
  91 
  92   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  93   size_t result =  gen_size_limit() - virtual_space()->committed_size();
  94   size_t result_aligned = align_size_down(result, heap->generation_alignment());
  95   return result_aligned;
  96 }
  97 
  98 size_t ASPSOldGen::available_for_contraction() {
  99   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
 100   if (uncommitted_bytes != 0) {
 101     return uncommitted_bytes;
 102   }
 103 
 104   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 105   const size_t gen_alignment = heap->generation_alignment();
 106   PSAdaptiveSizePolicy* policy = heap->size_policy();
 107   const size_t working_size =
 108     used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
 109   const size_t working_aligned = align_size_up(working_size, gen_alignment);
 110   const size_t working_or_min = MAX2(working_aligned, min_gen_size());
 111   if (working_or_min > reserved().byte_size()) {
 112     // If the used or minimum gen size (aligned up) is greater
 113     // than the total reserved size, then the space available
 114     // for contraction should (after proper alignment) be 0
 115     return 0;
 116   }
 117   const size_t max_contraction =
 118     reserved().byte_size() - working_or_min;
 119 
 120   // Use the "increment" fraction instead of the "decrement" fraction
 121   // to allow the other gen to expand more aggressively.  The
 122   // "decrement" fraction is conservative because its intent is to
 123   // only reduce the footprint.
 124 


< prev index next >