< prev index next >

src/share/vm/gc/serial/defNewGeneration.cpp

Print this page




 240                                                 bool clear_space,
 241                                                 bool mangle_space) {
 242   uintx alignment =
 243     GenCollectedHeap::heap()->collector_policy()->space_alignment();
 244 
 245   // If the spaces are being cleared (only done at heap initialization
 246   // currently), the survivor spaces need not be empty.
 247   // Otherwise, no care is taken for used areas in the survivor spaces
 248   // so check.
 249   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 250     "Initialization of the survivor spaces assumes these are empty");
 251 
 252   // Compute sizes
 253   uintx size = _virtual_space.committed_size();
 254   uintx survivor_size = compute_survivor_size(size, alignment);
 255   uintx eden_size = size - (2*survivor_size);
 256   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 257 
 258   if (eden_size < minimum_eden_size) {
 259     // May happen due to 64Kb rounding, if so adjust eden size back up
 260     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 261     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 262     uintx unaligned_survivor_size =
 263       align_size_down(maximum_survivor_size, alignment);
 264     survivor_size = MAX2(unaligned_survivor_size, alignment);
 265     eden_size = size - (2*survivor_size);
 266     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 267     assert(eden_size >= minimum_eden_size, "just checking");
 268   }
 269 
 270   char *eden_start = _virtual_space.low();
 271   char *from_start = eden_start + eden_size;
 272   char *to_start   = from_start + survivor_size;
 273   char *to_end     = to_start   + survivor_size;
 274 
 275   assert(to_end == _virtual_space.high(), "just checking");
 276   assert(Space::is_aligned(eden_start), "checking alignment");
 277   assert(Space::is_aligned(from_start), "checking alignment");
 278   assert(Space::is_aligned(to_start),   "checking alignment");
 279 
 280   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 281   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 282   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 283 


 369 }
 370 
 371 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
 372                                                     size_t new_size_before,
 373                                                     size_t alignment) const {
 374   size_t desired_new_size = new_size_before;
 375 
 376   if (NewSizeThreadIncrease > 0) {
 377     int threads_count;
 378     size_t thread_increase_size = 0;
 379 
 380     // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
 381     threads_count = Threads::number_of_non_daemon_threads();
 382     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
 383       thread_increase_size = threads_count * NewSizeThreadIncrease;
 384 
 385       // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
 386       if (new_size_candidate <= max_uintx - thread_increase_size) {
 387         new_size_candidate += thread_increase_size;
 388 
 389         // 3. Check an overflow at 'align_size_up'.
 390         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
 391         if (new_size_candidate <= aligned_max) {
 392           desired_new_size = align_size_up(new_size_candidate, alignment);
 393         }
 394       }
 395     }
 396   }
 397 
 398   return desired_new_size;
 399 }
 400 
 401 void DefNewGeneration::compute_new_size() {
 402   // This is called after a GC that includes the old generation, so from-space
 403   // will normally be empty.
 404   // Note that we check both spaces, since if scavenge failed they revert roles.
 405   // If not we bail out (otherwise we would have to relocate the objects).
 406   if (!from()->is_empty() || !to()->is_empty()) {
 407     return;
 408   }
 409 
 410   GenCollectedHeap* gch = GenCollectedHeap::heap();
 411 
 412   size_t old_size = gch->old_gen()->capacity();




 240                                                 bool clear_space,
 241                                                 bool mangle_space) {
 242   uintx alignment =
 243     GenCollectedHeap::heap()->collector_policy()->space_alignment();
 244 
 245   // If the spaces are being cleared (only done at heap initialization
 246   // currently), the survivor spaces need not be empty.
 247   // Otherwise, no care is taken for used areas in the survivor spaces
 248   // so check.
 249   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 250     "Initialization of the survivor spaces assumes these are empty");
 251 
 252   // Compute sizes
 253   uintx size = _virtual_space.committed_size();
 254   uintx survivor_size = compute_survivor_size(size, alignment);
 255   uintx eden_size = size - (2*survivor_size);
 256   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 257 
 258   if (eden_size < minimum_eden_size) {
 259     // May happen due to 64Kb rounding, if so adjust eden size back up
 260     minimum_eden_size = align_up(minimum_eden_size, alignment);
 261     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 262     uintx unaligned_survivor_size =
 263       align_down(maximum_survivor_size, alignment);
 264     survivor_size = MAX2(unaligned_survivor_size, alignment);
 265     eden_size = size - (2*survivor_size);
 266     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 267     assert(eden_size >= minimum_eden_size, "just checking");
 268   }
 269 
 270   char *eden_start = _virtual_space.low();
 271   char *from_start = eden_start + eden_size;
 272   char *to_start   = from_start + survivor_size;
 273   char *to_end     = to_start   + survivor_size;
 274 
 275   assert(to_end == _virtual_space.high(), "just checking");
 276   assert(Space::is_aligned(eden_start), "checking alignment");
 277   assert(Space::is_aligned(from_start), "checking alignment");
 278   assert(Space::is_aligned(to_start),   "checking alignment");
 279 
 280   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 281   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 282   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 283 


 369 }
 370 
 371 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
 372                                                     size_t new_size_before,
 373                                                     size_t alignment) const {
 374   size_t desired_new_size = new_size_before;
 375 
 376   if (NewSizeThreadIncrease > 0) {
 377     int threads_count;
 378     size_t thread_increase_size = 0;
 379 
 380     // 1. Check an overflow at 'threads_count * NewSizeThreadIncrease'.
 381     threads_count = Threads::number_of_non_daemon_threads();
 382     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
 383       thread_increase_size = threads_count * NewSizeThreadIncrease;
 384 
 385       // 2. Check an overflow at 'new_size_candidate + thread_increase_size'.
 386       if (new_size_candidate <= max_uintx - thread_increase_size) {
 387         new_size_candidate += thread_increase_size;
 388 
 389         // 3. Check an overflow at 'align_up'.
 390         size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
 391         if (new_size_candidate <= aligned_max) {
 392           desired_new_size = align_up(new_size_candidate, alignment);
 393         }
 394       }
 395     }
 396   }
 397 
 398   return desired_new_size;
 399 }
 400 
 401 void DefNewGeneration::compute_new_size() {
 402   // This is called after a GC that includes the old generation, so from-space
 403   // will normally be empty.
 404   // Note that we check both spaces, since if scavenge failed they revert roles.
 405   // If not we bail out (otherwise we would have to relocate the objects).
 406   if (!from()->is_empty() || !to()->is_empty()) {
 407     return;
 408   }
 409 
 410   GenCollectedHeap* gch = GenCollectedHeap::heap();
 411 
 412   size_t old_size = gch->old_gen()->capacity();


< prev index next >