< prev index next >

src/share/vm/gc/serial/defNewGeneration.cpp

Print this page




 368   // This is called after a GC that includes the old generation, so from-space
 369   // will normally be empty.
 370   // Note that we check both spaces, since if scavenge failed they revert roles.
 371   // If not we bail out (otherwise we would have to relocate the objects).
 372   if (!from()->is_empty() || !to()->is_empty()) {
 373     return;
 374   }
 375 
 376   GenCollectedHeap* gch = GenCollectedHeap::heap();
 377 
 378   size_t old_size = gch->old_gen()->capacity();
 379   size_t new_size_before = _virtual_space.committed_size();
 380   size_t min_new_size = initial_size();
 381   size_t max_new_size = reserved().byte_size();
 382   assert(min_new_size <= new_size_before &&
 383          new_size_before <= max_new_size,
 384          "just checking");
 385   // All space sizes must be multiples of Generation::GenGrain.
 386   size_t alignment = Generation::GenGrain;
 387 
 388   // Compute desired new generation size based on NewRatio and
 389   // NewSizeThreadIncrease
 390   size_t desired_new_size = old_size/NewRatio;
 391   int threads_count = Threads::number_of_non_daemon_threads();
 392   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
 393   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);





















 394 
 395   // Adjust new generation size
 396   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 397   assert(desired_new_size <= max_new_size, "just checking");
 398 
 399   bool changed = false;
 400   if (desired_new_size > new_size_before) {
 401     size_t change = desired_new_size - new_size_before;
 402     assert(change % alignment == 0, "just checking");
 403     if (expand(change)) {
 404        changed = true;
 405     }
 406     // If the heap failed to expand to the desired size,
 407     // "changed" will be false.  If the expansion failed
 408     // (and at this point it was expected to succeed),
 409     // ignore the failure (leaving "changed" as false).
 410   }
 411   if (desired_new_size < new_size_before && eden()->is_empty()) {
 412     // bail out of shrinking if objects in eden
 413     size_t change = new_size_before - desired_new_size;




 368   // This is called after a GC that includes the old generation, so from-space
 369   // will normally be empty.
 370   // Note that we check both spaces, since if scavenge failed they revert roles.
 371   // If not we bail out (otherwise we would have to relocate the objects).
 372   if (!from()->is_empty() || !to()->is_empty()) {
 373     return;
 374   }
 375 
 376   GenCollectedHeap* gch = GenCollectedHeap::heap();
 377 
 378   size_t old_size = gch->old_gen()->capacity();
 379   size_t new_size_before = _virtual_space.committed_size();
 380   size_t min_new_size = initial_size();
 381   size_t max_new_size = reserved().byte_size();
 382   assert(min_new_size <= new_size_before &&
 383          new_size_before <= max_new_size,
 384          "just checking");
 385   // All space sizes must be multiples of Generation::GenGrain.
 386   size_t alignment = Generation::GenGrain;
 387 
 388   // To revert previous value when an overflow happens.
 389   size_t desired_new_size = new_size_before;
 390 
 391   int threads_count = 0;
 392   size_t thread_increase_size = 0;
 393 
 394   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
 395   if (NewSizeThreadIncrease > 0) {
 396     size_t new_size_candidate = old_size / NewRatio;
 397 
 398     // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
 399     threads_count = Threads::number_of_non_daemon_threads();
 400     if (NewSizeThreadIncrease <= max_uintx / threads_count) {
 401       thread_increase_size = threads_count * NewSizeThreadIncrease;
 402 
 403       // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
 404       if (new_size_candidate <= max_uintx - thread_increase_size) {
 405         new_size_candidate += thread_increase_size;
 406 
 407         // 3. Check an overflow at 'align_size_up'.
 408         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
 409         if (new_size_candidate <= aligned_max) {
 410           desired_new_size = align_size_up(new_size_candidate, alignment);
 411         }
 412       }
 413     }
 414   }
 415 
 416   // Adjust new generation size
 417   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
 418   assert(desired_new_size <= max_new_size, "just checking");
 419 
 420   bool changed = false;
 421   if (desired_new_size > new_size_before) {
 422     size_t change = desired_new_size - new_size_before;
 423     assert(change % alignment == 0, "just checking");
 424     if (expand(change)) {
 425        changed = true;
 426     }
 427     // If the heap failed to expand to the desired size,
 428     // "changed" will be false.  If the expansion failed
 429     // (and at this point it was expected to succeed),
 430     // ignore the failure (leaving "changed" as false).
 431   }
 432   if (desired_new_size < new_size_before && eden()->is_empty()) {
 433     // bail out of shrinking if objects in eden
 434     size_t change = new_size_before - desired_new_size;


< prev index next >