src/share/vm/memory/collectorPolicy.cpp

Print this page




 128   case GenRemSet::CardTable: {
 129     CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
 130     return res;
 131   }
 132   default:
 133     guarantee(false, "unrecognized GenRemSet::Name");
 134     return NULL;
 135   }
 136 }
 137 
 138 void CollectorPolicy::cleared_all_soft_refs() {
 139   // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
 140   // have been cleared in the last collection but if the gc overhear
 141   // limit continues to be near, SoftRefs should still be cleared.
 142   if (size_policy() != NULL) {
 143     _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
 144   }
 145   _all_soft_refs_clear = true;
 146 }
 147 
























 148 
 149 // GenCollectorPolicy methods.
 150 
 151 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
 152   size_t x = base_size / (NewRatio+1);
 153   size_t new_gen_size = x > min_alignment() ?
 154                      align_size_down(x, min_alignment()) :
 155                      min_alignment();
 156   return new_gen_size;
 157 }
 158 
 159 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
 160                                                  size_t maximum_size) {
 161   size_t alignment = min_alignment();
 162   size_t max_minus = maximum_size - alignment;
 163   return desired_size < max_minus ? desired_size : max_minus;
 164 }
 165 
 166 
 167 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
 168                                                 size_t init_promo_size,
 169                                                 size_t init_survivor_size) {
 170   const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 171   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 172                                         init_promo_size,
 173                                         init_survivor_size,
 174                                         max_gc_pause_sec,
 175                                         GCTimeRatio);
 176 }
 177 
 178 size_t GenCollectorPolicy::compute_max_alignment() {
 179   // The card marking array and the offset arrays for old generations are
 180   // committed in os pages as well. Make sure they are entirely full (to
 181   // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
 182   // byte entry and the os page size is 4096, the maximum heap size should
 183   // be 512*4096 = 2MB aligned.
 184   size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
 185 
 186   // Parallel GC does its own alignment of the generations to avoid requiring a
 187   // large page (256M on some platforms) for the permanent generation.  The
 188   // other collectors should also be updated to do their own alignment and then
 189   // this use of lcm() should be removed.
 190   if (UseLargePages && !UseParallelGC) {
 191       // in presence of large pages we have to make sure that our
 192       // alignment is large page aware
 193       alignment = lcm(os::large_page_size(), alignment);
 194   }
 195 
 196   assert(alignment >= min_alignment(), "Must be");
 197 
 198   return alignment;
 199 }
 200 
 201 void GenCollectorPolicy::initialize_flags() {
 202   // All sizes must be multiples of the generation granularity.
 203   set_min_alignment((uintx) Generation::GenGrain);
 204   set_max_alignment(compute_max_alignment());
 205 
 206   CollectorPolicy::initialize_flags();
 207 
 208   // All generational heaps have a youngest gen; handle those flags here.
 209 
 210   // Adjust max size parameters
 211   if (NewSize > MaxNewSize) {
 212     MaxNewSize = NewSize;
 213   }
 214   NewSize = align_size_down(NewSize, min_alignment());
 215   MaxNewSize = align_size_down(MaxNewSize, min_alignment());
 216 
 217   // Check validity of heap flags
 218   assert(NewSize     % min_alignment() == 0, "eden space alignment");
 219   assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
 220 
 221   if (NewSize < 3*min_alignment()) {
 222      // make sure there room for eden and two survivor spaces
 223     vm_exit_during_initialization("Too small new size specified");
 224   }




 128   case GenRemSet::CardTable: {
 129     CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
 130     return res;
 131   }
 132   default:
 133     guarantee(false, "unrecognized GenRemSet::Name");
 134     return NULL;
 135   }
 136 }
 137 
 138 void CollectorPolicy::cleared_all_soft_refs() {
 139   // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
 140   // have been cleared in the last collection but if the gc overhear
 141   // limit continues to be near, SoftRefs should still be cleared.
 142   if (size_policy() != NULL) {
 143     _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
 144   }
 145   _all_soft_refs_clear = true;
 146 }
 147 
 148 size_t CollectorPolicy::compute_largest_heap_alignment() {
 149   // The card marking array and the offset arrays for old generations are
 150   // committed in os pages as well. Make sure they are entirely full (to
 151   // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
 152   // byte entry and the os page size is 4096, the maximum heap size should
 153   // be 512*4096 = 2MB aligned.
 154 
 155   // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
 156   // is supported.
 157   // Requirements of any new remembered set implementations must be added here.
 158   size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
 159 
 160   // Parallel GC does its own alignment of the generations to avoid requiring a
 161   // large page (256M on some platforms) for the permanent generation.  The
 162   // other collectors should also be updated to do their own alignment and then
 163   // this use of lcm() should be removed.
 164   if (UseLargePages && !UseParallelGC) {
 165       // in presence of large pages we have to make sure that our
 166       // alignment is large page aware
 167       alignment = lcm(os::large_page_size(), alignment);
 168   }
 169 
 170   return alignment;
 171 }
 172 
 173 // GenCollectorPolicy methods.
 174 
 175 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
 176   size_t x = base_size / (NewRatio+1);
 177   size_t new_gen_size = x > min_alignment() ?
 178                      align_size_down(x, min_alignment()) :
 179                      min_alignment();
 180   return new_gen_size;
 181 }
 182 
 183 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
 184                                                  size_t maximum_size) {
 185   size_t alignment = min_alignment();
 186   size_t max_minus = maximum_size - alignment;
 187   return desired_size < max_minus ? desired_size : max_minus;
 188 }
 189 
 190 
 191 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
 192                                                 size_t init_promo_size,
 193                                                 size_t init_survivor_size) {
 194   const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
 195   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 196                                         init_promo_size,
 197                                         init_survivor_size,
 198                                         max_gc_pause_sec,
 199                                         GCTimeRatio);
 200 }
 201 























 202 void GenCollectorPolicy::initialize_flags() {
 203   // All sizes must be multiples of the generation granularity.
 204   set_min_alignment((uintx) Generation::GenGrain);
 205   set_max_alignment(compute_largest_heap_alignment());
 206 
 207   CollectorPolicy::initialize_flags();
 208 
 209   // All generational heaps have a youngest gen; handle those flags here.
 210 
 211   // Adjust max size parameters
 212   if (NewSize > MaxNewSize) {
 213     MaxNewSize = NewSize;
 214   }
 215   NewSize = align_size_down(NewSize, min_alignment());
 216   MaxNewSize = align_size_down(MaxNewSize, min_alignment());
 217 
 218   // Check validity of heap flags
 219   assert(NewSize     % min_alignment() == 0, "eden space alignment");
 220   assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
 221 
 222   if (NewSize < 3*min_alignment()) {
 223      // make sure there room for eden and two survivor spaces
 224     vm_exit_during_initialization("Too small new size specified");
 225   }