< prev index next >

src/share/vm/gc/parallel/psOldGen.cpp

Print this page




 212   if (GCExpandToAllocateDelayMillis > 0) {
 213     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 214   }
 215   return allocate_noexpand(word_size);
 216 }
 217 
 218 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
 219   expand(word_size*HeapWordSize);
 220   if (GCExpandToAllocateDelayMillis > 0) {
 221     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 222   }
 223   return cas_allocate_noexpand(word_size);
 224 }
 225 
 226 void PSOldGen::expand(size_t bytes) {
 227   if (bytes == 0) {
 228     return;
 229   }
 230   MutexLocker x(ExpandHeap_lock);
 231   const size_t alignment = virtual_space()->alignment();
 232   size_t aligned_bytes  = align_size_up(bytes, alignment);
 233   size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
 234 
 235   if (UseNUMA) {
 236     // With NUMA we use round-robin page allocation for the old gen. Expand by at least
 237     // providing a page per lgroup. Alignment is larger or equal to the page size.
 238     aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
 239   }
 240   if (aligned_bytes == 0){
 241     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 242     // return true with the implication that and expansion was done when it
 243     // was not.  A call to expand implies a best effort to expand by "bytes"
 244     // but not a guarantee.  Align down to give a best effort.  This is likely
 245     // the most that the generation can expand since it has some capacity to
 246     // start with.
 247     aligned_bytes = align_size_down(bytes, alignment);
 248   }
 249 
 250   bool success = false;
 251   if (aligned_expand_bytes > aligned_bytes) {
 252     success = expand_by(aligned_expand_bytes);
 253   }
 254   if (!success) {
 255     success = expand_by(aligned_bytes);
 256   }
 257   if (!success) {
 258     success = expand_to_reserved();
 259   }
 260 
 261   if (success && GCLocker::is_active_and_needs_gc()) {
 262     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 263   }
 264 }
 265 
 266 bool PSOldGen::expand_by(size_t bytes) {
 267   assert_lock_strong(ExpandHeap_lock);


 301   return result;
 302 }
 303 
 304 bool PSOldGen::expand_to_reserved() {
 305   assert_lock_strong(ExpandHeap_lock);
 306   assert_locked_or_safepoint(Heap_lock);
 307 
 308   bool result = true;
 309   const size_t remaining_bytes = virtual_space()->uncommitted_size();
 310   if (remaining_bytes > 0) {
 311     result = expand_by(remaining_bytes);
 312     DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));
 313   }
 314   return result;
 315 }
 316 
 317 void PSOldGen::shrink(size_t bytes) {
 318   assert_lock_strong(ExpandHeap_lock);
 319   assert_locked_or_safepoint(Heap_lock);
 320 
 321   size_t size = align_size_down(bytes, virtual_space()->alignment());
 322   if (size > 0) {
 323     assert_lock_strong(ExpandHeap_lock);
 324     virtual_space()->shrink_by(bytes);
 325     post_resize();
 326 
 327     size_t new_mem_size = virtual_space()->committed_size();
 328     size_t old_mem_size = new_mem_size + bytes;
 329     log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 330                   name(), old_mem_size/K, bytes/K, new_mem_size/K);
 331   }
 332 }
 333 
 334 void PSOldGen::resize(size_t desired_free_space) {
 335   const size_t alignment = virtual_space()->alignment();
 336   const size_t size_before = virtual_space()->committed_size();
 337   size_t new_size = used_in_bytes() + desired_free_space;
 338   if (new_size < used_in_bytes()) {
 339     // Overflowed the addition.
 340     new_size = gen_size_limit();
 341   }
 342   // Adjust according to our min and max
 343   new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
 344 
 345   assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
 346   new_size = align_size_up(new_size, alignment);
 347 
 348   const size_t current_size = capacity_in_bytes();
 349 
 350   log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "
 351     "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
 352     " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
 353     " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
 354     desired_free_space, used_in_bytes(), new_size, current_size,
 355     gen_size_limit(), min_gen_size());
 356 
 357   if (new_size == current_size) {
 358     // No change requested
 359     return;
 360   }
 361   if (new_size > current_size) {
 362     size_t change_bytes = new_size - current_size;
 363     expand(change_bytes);
 364   } else {
 365     size_t change_bytes = current_size - new_size;
 366     // shrink doesn't grab this lock, expand does. Is that right?




 212   if (GCExpandToAllocateDelayMillis > 0) {
 213     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 214   }
 215   return allocate_noexpand(word_size);
 216 }
 217 
 218 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
 219   expand(word_size*HeapWordSize);
 220   if (GCExpandToAllocateDelayMillis > 0) {
 221     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
 222   }
 223   return cas_allocate_noexpand(word_size);
 224 }
 225 
 226 void PSOldGen::expand(size_t bytes) {
 227   if (bytes == 0) {
 228     return;
 229   }
 230   MutexLocker x(ExpandHeap_lock);
 231   const size_t alignment = virtual_space()->alignment();
 232   size_t aligned_bytes  = align_up(bytes, alignment);
 233   size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
 234 
 235   if (UseNUMA) {
 236     // With NUMA we use round-robin page allocation for the old gen. Expand by at least
 237     // providing a page per lgroup. Alignment is larger or equal to the page size.
 238     aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
 239   }
 240   if (aligned_bytes == 0){
 241     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
 242     // return true with the implication that and expansion was done when it
 243     // was not.  A call to expand implies a best effort to expand by "bytes"
 244     // but not a guarantee.  Align down to give a best effort.  This is likely
 245     // the most that the generation can expand since it has some capacity to
 246     // start with.
 247     aligned_bytes = align_down(bytes, alignment);
 248   }
 249 
 250   bool success = false;
 251   if (aligned_expand_bytes > aligned_bytes) {
 252     success = expand_by(aligned_expand_bytes);
 253   }
 254   if (!success) {
 255     success = expand_by(aligned_bytes);
 256   }
 257   if (!success) {
 258     success = expand_to_reserved();
 259   }
 260 
 261   if (success && GCLocker::is_active_and_needs_gc()) {
 262     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 263   }
 264 }
 265 
 266 bool PSOldGen::expand_by(size_t bytes) {
 267   assert_lock_strong(ExpandHeap_lock);


 301   return result;
 302 }
 303 
 304 bool PSOldGen::expand_to_reserved() {
 305   assert_lock_strong(ExpandHeap_lock);
 306   assert_locked_or_safepoint(Heap_lock);
 307 
 308   bool result = true;
 309   const size_t remaining_bytes = virtual_space()->uncommitted_size();
 310   if (remaining_bytes > 0) {
 311     result = expand_by(remaining_bytes);
 312     DEBUG_ONLY(if (!result) log_warning(gc)("grow to reserve failed"));
 313   }
 314   return result;
 315 }
 316 
 317 void PSOldGen::shrink(size_t bytes) {
 318   assert_lock_strong(ExpandHeap_lock);
 319   assert_locked_or_safepoint(Heap_lock);
 320 
 321   size_t size = align_down(bytes, virtual_space()->alignment());
 322   if (size > 0) {
 323     assert_lock_strong(ExpandHeap_lock);
 324     virtual_space()->shrink_by(bytes);
 325     post_resize();
 326 
 327     size_t new_mem_size = virtual_space()->committed_size();
 328     size_t old_mem_size = new_mem_size + bytes;
 329     log_debug(gc)("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
 330                   name(), old_mem_size/K, bytes/K, new_mem_size/K);
 331   }
 332 }
 333 
 334 void PSOldGen::resize(size_t desired_free_space) {
 335   const size_t alignment = virtual_space()->alignment();
 336   const size_t size_before = virtual_space()->committed_size();
 337   size_t new_size = used_in_bytes() + desired_free_space;
 338   if (new_size < used_in_bytes()) {
 339     // Overflowed the addition.
 340     new_size = gen_size_limit();
 341   }
 342   // Adjust according to our min and max
 343   new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
 344 
 345   assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
 346   new_size = align_up(new_size, alignment);
 347 
 348   const size_t current_size = capacity_in_bytes();
 349 
 350   log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: "
 351     "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
 352     " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
 353     " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
 354     desired_free_space, used_in_bytes(), new_size, current_size,
 355     gen_size_limit(), min_gen_size());
 356 
 357   if (new_size == current_size) {
 358     // No change requested
 359     return;
 360   }
 361   if (new_size > current_size) {
 362     size_t change_bytes = new_size - current_size;
 363     expand(change_bytes);
 364   } else {
 365     size_t change_bytes = current_size - new_size;
 366     // shrink doesn't grab this lock, expand does. Is that right?


< prev index next >