src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hsx-rt.8007074 Sdiff src/share/vm/memory

src/share/vm/memory/genCollectedHeap.cpp

Print this page




  78       !_gen_process_strong_tasks->valid()) {
  79     vm_exit_during_initialization("Failed necessary allocation.");
  80   }
  81   assert(policy != NULL, "Sanity check");
  82 }
  83 
  84 jint GenCollectedHeap::initialize() {
  85   CollectedHeap::pre_initialize();
  86 
  87   int i;
  88   _n_gens = gen_policy()->number_of_generations();
  89 
  90   // While there are no constraints in the GC code that HeapWordSize
  91   // be any particular value, there are multiple other areas in the
  92   // system which believe this to be true (e.g. oop->object_size in some
  93   // cases incorrectly returns the size in wordSize units rather than
  94   // HeapWordSize).
  95   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  96 
  97   // The heap must be at least as aligned as generations.
  98   size_t alignment = Generation::GenGrain;
  99 
 100   _gen_specs = gen_policy()->generations();
 101 
 102   // Make sure the sizes are all aligned.
 103   for (i = 0; i < _n_gens; i++) {
 104     _gen_specs[i]->align(alignment);
 105   }
 106 
 107   // Allocate space for the heap.
 108 
 109   char* heap_address;
 110   size_t total_reserved = 0;
 111   int n_covered_regions = 0;
 112   ReservedSpace heap_rs(0);
 113 
 114   heap_address = allocate(alignment, &total_reserved,


 115                           &n_covered_regions, &heap_rs);
 116 
 117   if (!heap_rs.is_reserved()) {
 118     vm_shutdown_during_initialization(
 119       "Could not reserve enough space for object heap");
 120     return JNI_ENOMEM;
 121   }
 122 
 123   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 124                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 125 
 126   // It is important to do this in a way such that concurrent readers can't
 127   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
 128   _reserved.set_word_size(0);
 129   _reserved.set_start((HeapWord*)heap_rs.base());
 130   size_t actual_heap_size = heap_rs.size();
 131   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
 132 
 133   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
 134   set_barrier_set(rem_set()->bs());


 151   }
 152 #endif // INCLUDE_ALL_GCS
 153 
 154   return JNI_OK;
 155 }
 156 
 157 
 158 char* GenCollectedHeap::allocate(size_t alignment,
 159                                  size_t* _total_reserved,
 160                                  int* _n_covered_regions,
 161                                  ReservedSpace* heap_rs){
 162   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
 163     "the maximum representable size";
 164 
 165   // Now figure out the total size.
 166   size_t total_reserved = 0;
 167   int n_covered_regions = 0;
 168   const size_t pageSize = UseLargePages ?
 169       os::large_page_size() : os::vm_page_size();
 170 


 171   for (int i = 0; i < _n_gens; i++) {
 172     total_reserved += _gen_specs[i]->max_size();
 173     if (total_reserved < _gen_specs[i]->max_size()) {
 174       vm_exit_during_initialization(overflow_msg);
 175     }
 176     n_covered_regions += _gen_specs[i]->n_covered_regions();
 177   }
 178   assert(total_reserved % pageSize == 0,
 179          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
 180                  SIZE_FORMAT, total_reserved, pageSize));
 181 
 182   // Needed until the cardtable is fixed to have the right number
 183   // of covered regions.
 184   n_covered_regions += 2;
 185 
 186   if (UseLargePages) {
 187     assert(total_reserved != 0, "total_reserved cannot be 0");
 188     total_reserved = round_to(total_reserved, os::large_page_size());
 189     if (total_reserved < os::large_page_size()) {
 190       vm_exit_during_initialization(overflow_msg);
 191     }
 192   }
 193 
 194       *_total_reserved = total_reserved;
 195       *_n_covered_regions = n_covered_regions;

 196   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 197   return heap_rs->base();
 198 }
 199 
 200 
 201 void GenCollectedHeap::post_initialize() {
 202   SharedHeap::post_initialize();
 203   TwoGenerationCollectorPolicy *policy =
 204     (TwoGenerationCollectorPolicy *)collector_policy();
 205   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
 206   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
 207   assert(def_new_gen->kind() == Generation::DefNew ||
 208          def_new_gen->kind() == Generation::ParNew ||
 209          def_new_gen->kind() == Generation::ASParNew,
 210          "Wrong generation kind");
 211 
 212   Generation* old_gen = get_gen(1);
 213   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
 214          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
 215          old_gen->kind() == Generation::MarkSweepCompact,




  78       !_gen_process_strong_tasks->valid()) {
  79     vm_exit_during_initialization("Failed necessary allocation.");
  80   }
  81   assert(policy != NULL, "Sanity check");
  82 }
  83 
  84 jint GenCollectedHeap::initialize() {
  85   CollectedHeap::pre_initialize();
  86 
  87   int i;
  88   _n_gens = gen_policy()->number_of_generations();
  89 
  90   // While there are no constraints in the GC code that HeapWordSize
  91   // be any particular value, there are multiple other areas in the
  92   // system which believe this to be true (e.g. oop->object_size in some
  93   // cases incorrectly returns the size in wordSize units rather than
  94   // HeapWordSize).
  95   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
  96 
  97   // The heap must be at least as aligned as generations.
  98   size_t gen_alignment = Generation::GenGrain;
  99 
 100   _gen_specs = gen_policy()->generations();
 101 
 102   // Make sure the sizes are all aligned.
 103   for (i = 0; i < _n_gens; i++) {
 104     _gen_specs[i]->align(gen_alignment);
 105   }
 106 
 107   // Allocate space for the heap.
 108 
 109   char* heap_address;
 110   size_t total_reserved = 0;
 111   int n_covered_regions = 0;
 112   ReservedSpace heap_rs;
 113 
 114   size_t heap_alignment = collector_policy()->max_alignment();
 115 
 116   heap_address = allocate(heap_alignment, &total_reserved,
 117                           &n_covered_regions, &heap_rs);
 118 
 119   if (!heap_rs.is_reserved()) {
 120     vm_shutdown_during_initialization(
 121       "Could not reserve enough space for object heap");
 122     return JNI_ENOMEM;
 123   }
 124 
 125   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 126                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 127 
 128   // It is important to do this in a way such that concurrent readers can't
 129   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
 130   _reserved.set_word_size(0);
 131   _reserved.set_start((HeapWord*)heap_rs.base());
 132   size_t actual_heap_size = heap_rs.size();
 133   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
 134 
 135   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
 136   set_barrier_set(rem_set()->bs());


 153   }
 154 #endif // INCLUDE_ALL_GCS
 155 
 156   return JNI_OK;
 157 }
 158 
 159 
 160 char* GenCollectedHeap::allocate(size_t alignment,
 161                                  size_t* _total_reserved,
 162                                  int* _n_covered_regions,
 163                                  ReservedSpace* heap_rs){
 164   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
 165     "the maximum representable size";
 166 
 167   // Now figure out the total size.
 168   size_t total_reserved = 0;
 169   int n_covered_regions = 0;
 170   const size_t pageSize = UseLargePages ?
 171       os::large_page_size() : os::vm_page_size();
 172 
 173   assert(alignment % pageSize == 0, "Must be");
 174 
 175   for (int i = 0; i < _n_gens; i++) {
 176     total_reserved += _gen_specs[i]->max_size();
 177     if (total_reserved < _gen_specs[i]->max_size()) {
 178       vm_exit_during_initialization(overflow_msg);
 179     }
 180     n_covered_regions += _gen_specs[i]->n_covered_regions();
 181   }
 182   assert(total_reserved % alignment == 0,
 183          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 184                  SIZE_FORMAT, total_reserved, alignment));
 185 
 186   // Needed until the cardtable is fixed to have the right number
 187   // of covered regions.
 188   n_covered_regions += 2;
 189 








 190   *_total_reserved = total_reserved;
 191   *_n_covered_regions = n_covered_regions;
 192 
 193   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
 194   return heap_rs->base();
 195 }
 196 
 197 
 198 void GenCollectedHeap::post_initialize() {
 199   SharedHeap::post_initialize();
 200   TwoGenerationCollectorPolicy *policy =
 201     (TwoGenerationCollectorPolicy *)collector_policy();
 202   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
 203   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
 204   assert(def_new_gen->kind() == Generation::DefNew ||
 205          def_new_gen->kind() == Generation::ParNew ||
 206          def_new_gen->kind() == Generation::ASParNew,
 207          "Wrong generation kind");
 208 
 209   Generation* old_gen = get_gen(1);
 210   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
 211          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
 212          old_gen->kind() == Generation::MarkSweepCompact,


src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File