< prev index next >

share/gc/parallel/psOldGen.cpp

Print this page
rev 1 : G1GC+POGC+NVDIMM Patch with latest comments incorporated from all.


  55                    const char* perf_data_name, int level):
  56   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  57   _max_gen_size(max_size)
  58 {}
  59 
  60 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
  61                           const char* perf_data_name, int level) {
  62   initialize_virtual_space(rs, alignment);
  63   initialize_work(perf_data_name, level);
  64 
  65   // The old gen can grow to gen_size_limit().  _reserve reflects only
  66   // the current maximum that can be committed.
  67   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  68 
  69   initialize_performance_counters(perf_data_name, level);
  70 }
  71 
  72 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
  73 
  74   _virtual_space = new PSVirtualSpace(rs, alignment);














  75   if (!_virtual_space->expand_by(_init_gen_size)) {
  76     vm_exit_during_initialization("Could not reserve enough space for "
  77                                   "object heap");
  78   }

  79 }
  80 
  81 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
  82   //
  83   // Basic memory initialization
  84   //
  85 
  86   MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
  87     heap_word_size(_max_gen_size));
  88   assert(limit_reserved.byte_size() == _max_gen_size,
  89     "word vs bytes confusion");
  90   //
  91   // Object start stuff
  92   //
  93 
  94   start_array()->initialize(limit_reserved);
  95 
  96   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
  97                         (HeapWord*)virtual_space()->high_boundary());
  98 


 258     success = expand_by(aligned_expand_bytes);
 259   }
 260   if (!success) {
 261     success = expand_by(aligned_bytes);
 262   }
 263   if (!success) {
 264     success = expand_to_reserved();
 265   }
 266 
 267   if (success && GCLocker::is_active_and_needs_gc()) {
 268     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 269   }
 270 }
 271 
 272 bool PSOldGen::expand_by(size_t bytes) {
 273   assert_lock_strong(ExpandHeap_lock);
 274   assert_locked_or_safepoint(Heap_lock);
 275   if (bytes == 0) {
 276     return true;  // That's what virtual_space()->expand_by(0) would return
 277   }
 278   bool result = virtual_space()->expand_by(bytes);





 279   if (result) {
 280     if (ZapUnusedHeapArea) {
 281       // We need to mangle the newly expanded area. The memregion spans
 282       // end -> new_end, we assume that top -> end is already mangled.
 283       // Do the mangling before post_resize() is called because
 284       // the space is available for allocation after post_resize();
 285       HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
 286       assert(object_space()->end() < virtual_space_high,
 287         "Should be true before post_resize()");
 288       MemRegion mangle_region(object_space()->end(), virtual_space_high);
 289       // Note that the object space has not yet been updated to
 290       // coincide with the new underlying virtual space.
 291       SpaceMangler::mangle_region(mangle_region);
 292     }
 293     post_resize();
 294     if (UsePerfData) {
 295       _space_counters->update_capacity();
 296       _gen_counters->update_all();
 297     }
 298   }




  55                    const char* perf_data_name, int level):
  56   _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
  57   _max_gen_size(max_size)
  58 {}
  59 
  60 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
  61                           const char* perf_data_name, int level) {
  62   initialize_virtual_space(rs, alignment);
  63   initialize_work(perf_data_name, level);
  64 
  65   // The old gen can grow to gen_size_limit().  _reserve reflects only
  66   // the current maximum that can be committed.
  67   assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
  68 
  69   initialize_performance_counters(perf_data_name, level);
  70 }
  71 
  72 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
  73 
  74   _virtual_space = new PSVirtualSpace(rs, alignment);
  75   if (os::has_nvdimm() && UseParallelOldGC) {
  76     if (!_virtual_space->expand_by(_init_gen_size, _nvdimm_fd)) {
  77       vm_exit_during_initialization("Could not reserve enough space for "
  78                                     "object heap");
  79     }
  80 #if defined (_WINDOWS)
  81     // Windows OS does not support incremental mapping for DAX (NVDIMM) File System
  82     if (os::has_nvdimm()) {
  83       os::close(os::nvdimm_fd());
  84     }
  85 #endif
  86     os::set_nvdimm_heapbase((address)(_virtual_space->reserved_low_addr()));
  87     os::set_dram_heapbase((address)((char*)_virtual_space->reserved_low_addr() + _max_gen_size));
  88   } else {
  89     if (!_virtual_space->expand_by(_init_gen_size)) {
  90       vm_exit_during_initialization("Could not reserve enough space for "
  91                                     "object heap");
  92     }
  93   }
  94 }
  95 
  96 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
  97   //
  98   // Basic memory initialization
  99   //
 100 
 101   MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
 102     heap_word_size(_max_gen_size));
 103   assert(limit_reserved.byte_size() == _max_gen_size,
 104     "word vs bytes confusion");
 105   //
 106   // Object start stuff
 107   //
 108 
 109   start_array()->initialize(limit_reserved);
 110 
 111   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
 112                         (HeapWord*)virtual_space()->high_boundary());
 113 


 273     success = expand_by(aligned_expand_bytes);
 274   }
 275   if (!success) {
 276     success = expand_by(aligned_bytes);
 277   }
 278   if (!success) {
 279     success = expand_to_reserved();
 280   }
 281 
 282   if (success && GCLocker::is_active_and_needs_gc()) {
 283     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 284   }
 285 }
 286 
 287 bool PSOldGen::expand_by(size_t bytes) {
 288   assert_lock_strong(ExpandHeap_lock);
 289   assert_locked_or_safepoint(Heap_lock);
 290   if (bytes == 0) {
 291     return true;  // That's what virtual_space()->expand_by(0) would return
 292   }
 293   bool result = false;
 294   if (os::has_nvdimm() && UseParallelOldGC) {
 295     result = virtual_space()->expand_by(bytes, nvdimm_fd());
 296   } else {
 297     result = virtual_space()->expand_by(bytes);
 298   }
 299   if (result) {
 300     if (ZapUnusedHeapArea) {
 301       // We need to mangle the newly expanded area. The memregion spans
 302       // end -> new_end, we assume that top -> end is already mangled.
 303       // Do the mangling before post_resize() is called because
 304       // the space is available for allocation after post_resize();
 305       HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
 306       assert(object_space()->end() < virtual_space_high,
 307         "Should be true before post_resize()");
 308       MemRegion mangle_region(object_space()->end(), virtual_space_high);
 309       // Note that the object space has not yet been updated to
 310       // coincide with the new underlying virtual space.
 311       SpaceMangler::mangle_region(mangle_region);
 312     }
 313     post_resize();
 314     if (UsePerfData) {
 315       _space_counters->update_capacity();
 316       _gen_counters->update_all();
 317     }
 318   }


< prev index next >