src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8007074 Sdiff src/share/vm/memory

src/share/vm/memory/genCollectedHeap.cpp

Print this page




  82     vm_exit_during_initialization("Failed necessary allocation.");
  83   }
  84   assert(policy != NULL, "Sanity check");
  85   _preloading_shared_classes = false;
  86 }
  87 
  88 jint GenCollectedHeap::initialize() {
  89   CollectedHeap::pre_initialize();
  90 
  91   int i;
  92   _n_gens = gen_policy()->number_of_generations();
  93 
  94   // While there are no constraints in the GC code that HeapWordSize
  95   // be any particular value, there are multiple other areas in the
  96   // system which believe this to be true (e.g. oop->object_size in some
  97   // cases incorrectly returns the size in wordSize units rather than
  98   // HeapWordSize).
  99   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 100 
 101   // The heap must be at least as aligned as generations.
 102   size_t alignment = Generation::GenGrain;
 103 
 104   _gen_specs = gen_policy()->generations();
 105   PermanentGenerationSpec *perm_gen_spec =
 106                                 collector_policy()->permanent_generation();
 107 


 108   // Make sure the sizes are all aligned.
 109   for (i = 0; i < _n_gens; i++) {
 110     _gen_specs[i]->align(alignment);
 111   }
 112   perm_gen_spec->align(alignment);
 113 
 114   // If we are dumping the heap, then allocate a wasted block of address
 115   // space in order to push the heap to a lower address.  This extra
 116   // address range allows for other (or larger) libraries to be loaded
 117   // without them occupying the space required for the shared spaces.
 118 
 119   if (DumpSharedSpaces) {
 120     uintx reserved = 0;
 121     uintx block_size = 64*1024*1024;
 122     while (reserved < SharedDummyBlockSize) {
 123       char* dummy = os::reserve_memory(block_size);
 124       reserved += block_size;
 125     }
 126   }
 127 
 128   // Allocate space for the heap.
 129 
 130   char* heap_address;
 131   size_t total_reserved = 0;
 132   int n_covered_regions = 0;
 133   ReservedSpace heap_rs(0);
 134 
 135   heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
 136                           &n_covered_regions, &heap_rs);
 137 
 138   if (UseSharedSpaces) {
 139     if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
 140       if (heap_rs.is_reserved()) {
 141         heap_rs.release();
 142       }
 143       FileMapInfo* mapinfo = FileMapInfo::current_info();
 144       mapinfo->fail_continue("Unable to reserve shared region.");
 145       allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
 146                &heap_rs);
 147     }
 148   }
 149 
 150   if (!heap_rs.is_reserved()) {
 151     vm_shutdown_during_initialization(
 152       "Could not reserve enough space for object heap");
 153     return JNI_ENOMEM;
 154   }
 155 
 156   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 157                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 158 
 159   // It is important to do this in a way such that concurrent readers can't
 160   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
 161   _reserved.set_word_size(0);
 162   _reserved.set_start((HeapWord*)heap_rs.base());
 163   size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
 164                                            - perm_gen_spec->misc_code_size();
 165   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));


 190     bool success = create_cms_collector();
 191     if (!success) return JNI_ENOMEM;
 192   }
 193 #endif // SERIALGC
 194 
 195   return JNI_OK;
 196 }
 197 
 198 
 199 char* GenCollectedHeap::allocate(size_t alignment,
 200                                  PermanentGenerationSpec* perm_gen_spec,
 201                                  size_t* _total_reserved,
 202                                  int* _n_covered_regions,
 203                                  ReservedSpace* heap_rs){
 204   // Now figure out the total size.
 205   size_t total_reserved = 0;
 206   int n_covered_regions = 0;
 207   const size_t pageSize = UseLargePages ?
 208       os::large_page_size() : os::vm_page_size();
 209 


 210   for (int i = 0; i < _n_gens; i++) {
 211     total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
 212     n_covered_regions += _gen_specs[i]->n_covered_regions();
 213   }
 214 
 215   assert(total_reserved % pageSize == 0,
 216          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
 217                  SIZE_FORMAT, total_reserved, pageSize));
 218   total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
 219   assert(total_reserved % pageSize == 0,
 220          err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
 221                  SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
 222                  pageSize, perm_gen_spec->max_size()));
 223 
 224   n_covered_regions += perm_gen_spec->n_covered_regions();
 225 
 226   // Add the size of the data area which shares the same reserved area
 227   // as the heap, but which is not actually part of the heap.
 228   size_t misc = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
 229   total_reserved = add_and_check_overflow(total_reserved, misc);
 230 
 231   if (UseLargePages) {

 232     assert(total_reserved != 0, "total_reserved cannot be 0");

 233     total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
 234   }
 235 
 236   // Calculate the address at which the heap must reside in order for
 237   // the shared data to be at the required address.
 238 
 239   char* heap_address;
 240   if (UseSharedSpaces) {
 241 
 242     // Calculate the address of the first word beyond the heap.
 243     FileMapInfo* mapinfo = FileMapInfo::current_info();
 244     int lr = CompactingPermGenGen::n_regions - 1;
 245     size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
 246     heap_address = mapinfo->region_base(lr) + capacity;
 247 
 248     // Calculate the address of the first word of the heap.
 249     heap_address -= total_reserved;
 250   } else {
 251     heap_address = NULL;  // any address will do.
 252     if (UseCompressedOops) {
 253       heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 254       *_total_reserved = total_reserved;
 255       *_n_covered_regions = n_covered_regions;
 256       *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 257                                    UseLargePages, heap_address);
 258 
 259       if (heap_address != NULL && !heap_rs->is_reserved()) {
 260         // Failed to reserve at specified address - the requested memory
 261         // region is taken already, for example, by 'java' launcher.
 262         // Try again to reserver heap higher.
 263         heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 264         *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 265                                      UseLargePages, heap_address);
 266 
 267         if (heap_address != NULL && !heap_rs->is_reserved()) {
 268           // Failed to reserve at specified address again - give up.
 269           heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 270           assert(heap_address == NULL, "");
 271           *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 272                                        UseLargePages, heap_address);
 273         }
 274       }
 275       return heap_address;
 276     }
 277   }
 278 
 279   *_total_reserved = total_reserved;
 280   *_n_covered_regions = n_covered_regions;
 281   *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 282                                UseLargePages, heap_address);
 283 
 284   return heap_address;
 285 }
 286 
 287 
 288 void GenCollectedHeap::post_initialize() {
 289   SharedHeap::post_initialize();




  82     vm_exit_during_initialization("Failed necessary allocation.");
  83   }
  84   assert(policy != NULL, "Sanity check");
  85   _preloading_shared_classes = false;
  86 }
  87 
  88 jint GenCollectedHeap::initialize() {
  89   CollectedHeap::pre_initialize();
  90 
  91   int i;
  92   _n_gens = gen_policy()->number_of_generations();
  93 
  94   // While there are no constraints in the GC code that HeapWordSize
  95   // be any particular value, there are multiple other areas in the
  96   // system which believe this to be true (e.g. oop->object_size in some
  97   // cases incorrectly returns the size in wordSize units rather than
  98   // HeapWordSize).
  99   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 100 
 101   // The heap must be at least as aligned as generations.
 102   size_t gen_alignment = Generation::GenGrain;
 103 
 104   _gen_specs = gen_policy()->generations();
 105   PermanentGenerationSpec *perm_gen_spec =
 106                                 collector_policy()->permanent_generation();
 107 
 108   size_t heap_alignment = collector_policy()->max_alignment();
 109 
 110   // Make sure the sizes are all aligned.
 111   for (i = 0; i < _n_gens; i++) {
 112     _gen_specs[i]->align(gen_alignment);
 113   }
 114   perm_gen_spec->align(heap_alignment);
 115 
 116   // If we are dumping the heap, then allocate a wasted block of address
 117   // space in order to push the heap to a lower address.  This extra
 118   // address range allows for other (or larger) libraries to be loaded
 119   // without them occupying the space required for the shared spaces.
 120 
 121   if (DumpSharedSpaces) {
 122     uintx reserved = 0;
 123     uintx block_size = 64*1024*1024;
 124     while (reserved < SharedDummyBlockSize) {
 125       char* dummy = os::reserve_memory(block_size);
 126       reserved += block_size;
 127     }
 128   }
 129 
 130   // Allocate space for the heap.
 131 
 132   char* heap_address;
 133   size_t total_reserved = 0;
 134   int n_covered_regions = 0;
 135   ReservedSpace heap_rs;
 136 
 137   heap_address = allocate(heap_alignment, perm_gen_spec, &total_reserved,
 138                           &n_covered_regions, &heap_rs);
 139 
 140   if (UseSharedSpaces) {
 141     if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
 142       if (heap_rs.is_reserved()) {
 143         heap_rs.release();
 144       }
 145       FileMapInfo* mapinfo = FileMapInfo::current_info();
 146       mapinfo->fail_continue("Unable to reserve shared region.");
 147       allocate(heap_alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
 148                &heap_rs);
 149     }
 150   }
 151 
 152   if (!heap_rs.is_reserved()) {
 153     vm_shutdown_during_initialization(
 154       "Could not reserve enough space for object heap");
 155     return JNI_ENOMEM;
 156   }
 157 
 158   _reserved = MemRegion((HeapWord*)heap_rs.base(),
 159                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 160 
 161   // It is important to do this in a way such that concurrent readers can't
 162   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
 163   _reserved.set_word_size(0);
 164   _reserved.set_start((HeapWord*)heap_rs.base());
 165   size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
 166                                            - perm_gen_spec->misc_code_size();
 167   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));


 192     bool success = create_cms_collector();
 193     if (!success) return JNI_ENOMEM;
 194   }
 195 #endif // SERIALGC
 196 
 197   return JNI_OK;
 198 }
 199 
 200 
 201 char* GenCollectedHeap::allocate(size_t alignment,
 202                                  PermanentGenerationSpec* perm_gen_spec,
 203                                  size_t* _total_reserved,
 204                                  int* _n_covered_regions,
 205                                  ReservedSpace* heap_rs){
 206   // Now figure out the total size.
 207   size_t total_reserved = 0;
 208   int n_covered_regions = 0;
 209   const size_t pageSize = UseLargePages ?
 210       os::large_page_size() : os::vm_page_size();
 211 
 212   assert(alignment % pageSize == 0, "Must be");
 213 
 214   for (int i = 0; i < _n_gens; i++) {
 215     total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
 216     n_covered_regions += _gen_specs[i]->n_covered_regions();
 217   }
 218 
 219   assert(total_reserved % alignment == 0,
 220          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 221                  SIZE_FORMAT, total_reserved, alignment));
 222   total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
 223   assert(total_reserved % alignment == 0,
 224          err_msg("Perm size; total_reserved=" SIZE_FORMAT ", alignment="
 225                  SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
 226                  alignment, perm_gen_spec->max_size()));
 227 
 228   n_covered_regions += perm_gen_spec->n_covered_regions();
 229 
 230   // Add the size of the data area which shares the same reserved area
 231   // as the heap, but which is not actually part of the heap.
 232   size_t misc = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
 233   total_reserved = add_and_check_overflow(total_reserved, misc);
 234 
 235   if (UseLargePages) {
 236     assert(misc == 0, "CDS does not support Large Pages");
 237     assert(total_reserved != 0, "total_reserved cannot be 0");
 238     assert(is_size_aligned(total_reserved, os::large_page_size()), "Must be");
 239     total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
 240   }
 241 
 242   // Calculate the address at which the heap must reside in order for
 243   // the shared data to be at the required address.
 244 
 245   char* heap_address;
 246   if (UseSharedSpaces) {
 247 
 248     // Calculate the address of the first word beyond the heap.
 249     FileMapInfo* mapinfo = FileMapInfo::current_info();
 250     int lr = CompactingPermGenGen::n_regions - 1;
 251     size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
 252     heap_address = mapinfo->region_base(lr) + capacity;
 253 
 254     // Calculate the address of the first word of the heap.
 255     heap_address -= total_reserved;
 256   } else {
 257     heap_address = NULL;  // any address will do.
 258     if (UseCompressedOops) {
 259       heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
 260       *_total_reserved = total_reserved;
 261       *_n_covered_regions = n_covered_regions;
 262       *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 263                                    UseLargePages, heap_address);
 264 
 265       if (heap_address != NULL && !heap_rs->is_reserved()) {
 266         // Failed to reserve at specified address - the requested memory
 267         // region is taken already, for example, by 'java' launcher.
 268         // Try again to reserver heap higher.
 269         heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
 270         *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 271                                      UseLargePages, heap_address);
 272 
 273         if (heap_address != NULL && !heap_rs->is_reserved()) {
 274           // Failed to reserve at specified address again - give up.
 275           heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
 276           assert(heap_address == NULL, "");
 277           *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 278                                        UseLargePages, heap_address);
 279         }
 280       }
 281       return heap_address;
 282     }
 283   }
 284 
 285   *_total_reserved = total_reserved;
 286   *_n_covered_regions = n_covered_regions;
 287   *heap_rs = ReservedHeapSpace(total_reserved, alignment,
 288                                UseLargePages, heap_address);
 289 
 290   return heap_address;
 291 }
 292 
 293 
 294 void GenCollectedHeap::post_initialize() {
 295   SharedHeap::post_initialize();


src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File