< prev index next >

src/share/vm/gc/parallel/psOldGen.cpp

Print this page
rev 12906 : [mq]: gc_interface


  93 
  94   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  95                         (HeapWord*)virtual_space()->high_boundary());
  96 
  97   //
  98   // Card table stuff
  99   //
 100 
 101   MemRegion cmr((HeapWord*)virtual_space()->low(),
 102                 (HeapWord*)virtual_space()->high());
 103   if (ZapUnusedHeapArea) {
 104     // Mangle newly committed space immediately rather than
 105     // waiting for the initialization of the space even though
 106     // mangling is related to spaces.  Doing it here eliminates
 107     // the need to carry along information that a complete mangling
 108     // (bottom to end) needs to be done.
 109     SpaceMangler::mangle_region(cmr);
 110   }
 111 
 112   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 113   BarrierSet* bs = heap->barrier_set();
 114 
 115   bs->resize_covered_region(cmr);
 116 
 117   CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
 118 
 119   // Verify that the start and end of this generation is the start of a card.
 120   // If this wasn't true, a single card could span more than one generation,
 121   // which would cause problems when we commit/uncommit memory, and when we
 122   // clear and dirty cards.
 123   guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
 124   if (_reserved.end() != heap->reserved_region().end()) {
 125     // Don't check at the very end of the heap as we'll assert that we're probing off
 126     // the end if we try.
 127     guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
 128   }
 129 
 130   //
 131   // ObjectSpace stuff
 132   //
 133 
 134   _object_space = new MutableSpace(virtual_space()->alignment());
 135 
 136   if (_object_space == NULL)
 137     vm_exit_during_initialization("Could not allocate an old gen space");


 368     shrink(change_bytes);
 369   }
 370 
 371   log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
 372                       ParallelScavengeHeap::heap()->total_collections(),
 373                       size_before,
 374                       virtual_space()->committed_size());
 375 }
 376 
 377 // NOTE! We need to be careful about resizing. During a GC, multiple
 378 // allocators may be active during heap expansion. If we allow the
 379 // heap resizing to become visible before we have correctly resized
 380 // all heap related data structures, we may cause program failures.
 381 void PSOldGen::post_resize() {
 382   // First construct a memregion representing the new size
 383   MemRegion new_memregion((HeapWord*)virtual_space()->low(),
 384     (HeapWord*)virtual_space()->high());
 385   size_t new_word_size = new_memregion.word_size();
 386 
 387   start_array()->set_covered_region(new_memregion);
 388   ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
 389 
 390   // ALWAYS do this last!!
 391   object_space()->initialize(new_memregion,
 392                              SpaceDecorator::DontClear,
 393                              SpaceDecorator::DontMangle);
 394 
 395   assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
 396     "Sanity");
 397 }
 398 
 399 size_t PSOldGen::gen_size_limit() {
 400   return _max_gen_size;
 401 }
 402 
 403 void PSOldGen::reset_after_change() {
 404   ShouldNotReachHere();
 405   return;
 406 }
 407 
 408 size_t PSOldGen::available_for_expansion() {




  93 
  94   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  95                         (HeapWord*)virtual_space()->high_boundary());
  96 
  97   //
  98   // Card table stuff
  99   //
 100 
 101   MemRegion cmr((HeapWord*)virtual_space()->low(),
 102                 (HeapWord*)virtual_space()->high());
 103   if (ZapUnusedHeapArea) {
 104     // Mangle newly committed space immediately rather than
 105     // waiting for the initialization of the space even though
 106     // mangling is related to spaces.  Doing it here eliminates
 107     // the need to carry along information that a complete mangling
 108     // (bottom to end) needs to be done.
 109     SpaceMangler::mangle_region(cmr);
 110   }
 111 
 112   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 113   PSCardTable* ct = heap->card_table();
 114 
 115   ct->resize_covered_region(cmr);


 116 
 117   // Verify that the start and end of this generation is the start of a card.
 118   // If this wasn't true, a single card could span more than one generation,
 119   // which would cause problems when we commit/uncommit memory, and when we
 120   // clear and dirty cards.
 121   guarantee(ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
 122   if (_reserved.end() != heap->reserved_region().end()) {
 123     // Don't check at the very end of the heap as we'll assert that we're probing off
 124     // the end if we try.
 125     guarantee(ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
 126   }
 127 
 128   //
 129   // ObjectSpace stuff
 130   //
 131 
 132   _object_space = new MutableSpace(virtual_space()->alignment());
 133 
 134   if (_object_space == NULL)
 135     vm_exit_during_initialization("Could not allocate an old gen space");


 366     shrink(change_bytes);
 367   }
 368 
 369   log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: collection: %d (" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
 370                       ParallelScavengeHeap::heap()->total_collections(),
 371                       size_before,
 372                       virtual_space()->committed_size());
 373 }
 374 
 375 // NOTE! We need to be careful about resizing. During a GC, multiple
 376 // allocators may be active during heap expansion. If we allow the
 377 // heap resizing to become visible before we have correctly resized
 378 // all heap related data structures, we may cause program failures.
 379 void PSOldGen::post_resize() {
 380   // First construct a memregion representing the new size
 381   MemRegion new_memregion((HeapWord*)virtual_space()->low(),
 382     (HeapWord*)virtual_space()->high());
 383   size_t new_word_size = new_memregion.word_size();
 384 
 385   start_array()->set_covered_region(new_memregion);
 386   ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
 387 
 388   // ALWAYS do this last!!
 389   object_space()->initialize(new_memregion,
 390                              SpaceDecorator::DontClear,
 391                              SpaceDecorator::DontMangle);
 392 
 393   assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
 394     "Sanity");
 395 }
 396 
 397 size_t PSOldGen::gen_size_limit() {
 398   return _max_gen_size;
 399 }
 400 
 401 void PSOldGen::reset_after_change() {
 402   ShouldNotReachHere();
 403   return;
 404 }
 405 
 406 size_t PSOldGen::available_for_expansion() {


< prev index next >