src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/defNewGeneration.cpp

Print this page




 187     _should_allocate_from_space(false)
 188 {
 189   MemRegion cmr((HeapWord*)_virtual_space.low(),
 190                 (HeapWord*)_virtual_space.high());
 191   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 192 
 193   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 194     _eden_space = new ConcEdenSpace(this);
 195   } else {
 196     _eden_space = new EdenSpace(this);
 197   }
 198   _from_space = new ContiguousSpace();
 199   _to_space   = new ContiguousSpace();
 200 
 201   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 202     vm_exit_during_initialization("Could not allocate a new gen space");
 203 
 204   // Compute the maximum eden and survivor space sizes. These sizes
 205   // are computed assuming the entire reserved space is committed.
 206   // These values are exported as performance counters.
 207   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 208   uintx size = _virtual_space.reserved_size();
 209   _max_survivor_size = compute_survivor_size(size, alignment);
 210   _max_eden_size = size - (2*_max_survivor_size);
 211 
 212   // allocate the performance counters
 213 
 214   // Generation counters -- generation 0, 3 subspaces
 215   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 216   _gc_counters = new CollectorCounters(policy, 0);
 217 
 218   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 219                                       _gen_counters);
 220   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 221                                       _gen_counters);
 222   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 223                                     _gen_counters);
 224 
 225   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 226   update_counters();
 227   _next_gen = NULL;
 228   _tenuring_threshold = MaxTenuringThreshold;
 229   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 230 
 231   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 232 }
 233 
 234 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 235                                                 bool clear_space,
 236                                                 bool mangle_space) {
 237   uintx alignment =
 238     GenCollectedHeap::heap()->collector_policy()->min_alignment();
 239 
 240   // If the spaces are being cleared (only done at heap initialization
 241   // currently), the survivor spaces need not be empty.
 242   // Otherwise, no care is taken for used areas in the survivor spaces
 243   // so check.
 244   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 245     "Initialization of the survivor spaces assumes these are empty");
 246 
 247   // Compute sizes
 248   uintx size = _virtual_space.committed_size();
 249   uintx survivor_size = compute_survivor_size(size, alignment);
 250   uintx eden_size = size - (2*survivor_size);
 251   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 252 
 253   if (eden_size < minimum_eden_size) {
 254     // May happen due to 64Kb rounding, if so adjust eden size back up
 255     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 256     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 257     uintx unaligned_survivor_size =
 258       align_size_down(maximum_survivor_size, alignment);


 456 
 457 
 458 size_t DefNewGeneration::capacity() const {
 459   return eden()->capacity()
 460        + from()->capacity();  // to() is only used during scavenge
 461 }
 462 
 463 
 464 size_t DefNewGeneration::used() const {
 465   return eden()->used()
 466        + from()->used();      // to() is only used during scavenge
 467 }
 468 
 469 
 470 size_t DefNewGeneration::free() const {
 471   return eden()->free()
 472        + from()->free();      // to() is only used during scavenge
 473 }
 474 
 475 size_t DefNewGeneration::max_capacity() const {
 476   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
 477   const size_t reserved_bytes = reserved().byte_size();
 478   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 479 }
 480 
 481 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 482   return eden()->free();
 483 }
 484 
 485 size_t DefNewGeneration::capacity_before_gc() const {
 486   return eden()->capacity();
 487 }
 488 
 489 size_t DefNewGeneration::contiguous_available() const {
 490   return eden()->free();
 491 }
 492 
 493 
 494 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 495 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 496 




 187     _should_allocate_from_space(false)
 188 {
 189   MemRegion cmr((HeapWord*)_virtual_space.low(),
 190                 (HeapWord*)_virtual_space.high());
 191   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 192 
 193   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
 194     _eden_space = new ConcEdenSpace(this);
 195   } else {
 196     _eden_space = new EdenSpace(this);
 197   }
 198   _from_space = new ContiguousSpace();
 199   _to_space   = new ContiguousSpace();
 200 
 201   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
 202     vm_exit_during_initialization("Could not allocate a new gen space");
 203 
 204   // Compute the maximum eden and survivor space sizes. These sizes
 205   // are computed assuming the entire reserved space is committed.
 206   // These values are exported as performance counters.
 207   uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 208   uintx size = _virtual_space.reserved_size();
 209   _max_survivor_size = compute_survivor_size(size, alignment);
 210   _max_eden_size = size - (2*_max_survivor_size);
 211 
 212   // allocate the performance counters
 213 
 214   // Generation counters -- generation 0, 3 subspaces
 215   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
 216   _gc_counters = new CollectorCounters(policy, 0);
 217 
 218   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 219                                       _gen_counters);
 220   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 221                                       _gen_counters);
 222   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 223                                     _gen_counters);
 224 
 225   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 226   update_counters();
 227   _next_gen = NULL;
 228   _tenuring_threshold = MaxTenuringThreshold;
 229   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 230 
 231   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 232 }
 233 
 234 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 235                                                 bool clear_space,
 236                                                 bool mangle_space) {
 237   uintx alignment =
 238     GenCollectedHeap::heap()->collector_policy()->space_alignment();
 239 
 240   // If the spaces are being cleared (only done at heap initialization
 241   // currently), the survivor spaces need not be empty.
 242   // Otherwise, no care is taken for used areas in the survivor spaces
 243   // so check.
 244   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 245     "Initialization of the survivor spaces assumes these are empty");
 246 
 247   // Compute sizes
 248   uintx size = _virtual_space.committed_size();
 249   uintx survivor_size = compute_survivor_size(size, alignment);
 250   uintx eden_size = size - (2*survivor_size);
 251   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 252 
 253   if (eden_size < minimum_eden_size) {
 254     // May happen due to 64Kb rounding, if so adjust eden size back up
 255     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
 256     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 257     uintx unaligned_survivor_size =
 258       align_size_down(maximum_survivor_size, alignment);


 456 
 457 
 458 size_t DefNewGeneration::capacity() const {
 459   return eden()->capacity()
 460        + from()->capacity();  // to() is only used during scavenge
 461 }
 462 
 463 
 464 size_t DefNewGeneration::used() const {
 465   return eden()->used()
 466        + from()->used();      // to() is only used during scavenge
 467 }
 468 
 469 
 470 size_t DefNewGeneration::free() const {
 471   return eden()->free()
 472        + from()->free();      // to() is only used during scavenge
 473 }
 474 
 475 size_t DefNewGeneration::max_capacity() const {
 476   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
 477   const size_t reserved_bytes = reserved().byte_size();
 478   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
 479 }
 480 
 481 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 482   return eden()->free();
 483 }
 484 
 485 size_t DefNewGeneration::capacity_before_gc() const {
 486   return eden()->capacity();
 487 }
 488 
 489 size_t DefNewGeneration::contiguous_available() const {
 490   return eden()->free();
 491 }
 492 
 493 
 494 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
 495 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
 496 


src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File