< prev index next >

src/hotspot/share/memory/metaspaceShared.cpp

Print this page




 217   FileMapInfo* mapinfo = new FileMapInfo();
 218 
 219   // Open the shared archive file, read and validate the header. If
 220   // initialization fails, shared spaces [UseSharedSpaces] are
 221   // disabled and the file is closed.
 222   // Map in spaces now also
 223   if (mapinfo->initialize() && map_shared_spaces(mapinfo)) {
 224     size_t cds_total = core_spaces_size();
 225     cds_address = (address)mapinfo->header()->region_addr(0);
 226 #ifdef _LP64
 227     if (Metaspace::using_class_space()) {
 228       char* cds_end = (char*)(cds_address + cds_total);
 229       cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment());
 230       // If UseCompressedClassPointers is set then allocate the metaspace area
 231       // above the heap and above the CDS area (if it exists).
 232       Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
 233       // map_heap_regions() compares the current narrow oop and klass encodings
 234       // with the archived ones, so it must be done after all encodings are determined.
 235       mapinfo->map_heap_regions();
 236     }



 237 #endif // _LP64
 238   } else {
 239     assert(!mapinfo->is_open() && !UseSharedSpaces,
 240            "archive file not closed or shared spaces not disabled.");
 241   }
 242 }
 243 
 244 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
 245   assert(DumpSharedSpaces, "should be called for dump time only");
 246   const size_t reserve_alignment = Metaspace::reserve_alignment();
 247   bool large_pages = false; // No large pages when dumping the CDS archive.
 248   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 249 
 250 #ifdef _LP64
 251   // On 64-bit VM, the heap and class space layout will be the same as if
 252   // you're running in -Xshare:on mode:
 253   //
 254   //                              +-- SharedBaseAddress (default = 0x800000000)
 255   //                              v
 256   // +-..---------+---------+ ... +----+----+----+----+----+---------------+


 282   // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
 283   // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
 284   //   will store Klasses into this space.
 285   // + The lower 3 GB is used for the archive -- when preload_classes() is done,
 286   //   ArchiveCompactor will copy the class metadata into this space, first the RW parts,
 287   //   then the RO parts.
 288 
 289   assert(UseCompressedOops && UseCompressedClassPointers,
 290       "UseCompressedOops and UseCompressedClassPointers must be set");
 291 
 292   size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
 293   ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
 294   CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
 295   _shared_rs = _shared_rs.first_part(max_archive_size);
 296 
 297   // Set up compress class pointers.
 298   Universe::set_narrow_klass_base((address)_shared_rs.base());
 299   // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
 300   // with AOT.
 301   Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);




 302 
 303   Metaspace::initialize_class_space(tmp_class_space);
 304   tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
 305                 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
 306 
 307   tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 308                 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
 309 #endif
 310 
 311   // Start with 0 committed bytes. The memory will be committed as needed by
 312   // MetaspaceShared::commit_shared_space_to().
 313   if (!_shared_vs.initialize(_shared_rs, 0)) {
 314     vm_exit_during_initialization("Unable to allocate memory for shared space");
 315   }
 316 
 317   _mc_region.init(&_shared_rs);
 318   tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 319                 _shared_rs.size(), p2i(_shared_rs.base()));
 320 }
 321 




 217   FileMapInfo* mapinfo = new FileMapInfo();
 218 
 219   // Open the shared archive file, read and validate the header. If
 220   // initialization fails, shared spaces [UseSharedSpaces] are
 221   // disabled and the file is closed.
 222   // Map in spaces now also
 223   if (mapinfo->initialize() && map_shared_spaces(mapinfo)) {
 224     size_t cds_total = core_spaces_size();
 225     cds_address = (address)mapinfo->header()->region_addr(0);
 226 #ifdef _LP64
 227     if (Metaspace::using_class_space()) {
 228       char* cds_end = (char*)(cds_address + cds_total);
 229       cds_end = (char *)align_up(cds_end, Metaspace::reserve_alignment());
 230       // If UseCompressedClassPointers is set then allocate the metaspace area
 231       // above the heap and above the CDS area (if it exists).
 232       Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
 233       // map_heap_regions() compares the current narrow oop and klass encodings
 234       // with the archived ones, so it must be done after all encodings are determined.
 235       mapinfo->map_heap_regions();
 236     }
 237 #ifdef AARCH64
 238     Universe::set_narrow_klass_range(CompressedClassSpaceSize);
 239 #endif
 240 #endif // _LP64
 241   } else {
 242     assert(!mapinfo->is_open() && !UseSharedSpaces,
 243            "archive file not closed or shared spaces not disabled.");
 244   }
 245 }
 246 
 247 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
 248   assert(DumpSharedSpaces, "should be called for dump time only");
 249   const size_t reserve_alignment = Metaspace::reserve_alignment();
 250   bool large_pages = false; // No large pages when dumping the CDS archive.
 251   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 252 
 253 #ifdef _LP64
 254   // On 64-bit VM, the heap and class space layout will be the same as if
 255   // you're running in -Xshare:on mode:
 256   //
 257   //                              +-- SharedBaseAddress (default = 0x800000000)
 258   //                              v
 259   // +-..---------+---------+ ... +----+----+----+----+----+---------------+


 285   // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
 286   // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
 287   //   will store Klasses into this space.
 288   // + The lower 3 GB is used for the archive -- when preload_classes() is done,
 289   //   ArchiveCompactor will copy the class metadata into this space, first the RW parts,
 290   //   then the RO parts.
 291 
 292   assert(UseCompressedOops && UseCompressedClassPointers,
 293       "UseCompressedOops and UseCompressedClassPointers must be set");
 294 
 295   size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
 296   ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
 297   CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
 298   _shared_rs = _shared_rs.first_part(max_archive_size);
 299 
 300   // Set up compress class pointers.
 301   Universe::set_narrow_klass_base((address)_shared_rs.base());
 302   // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
 303   // with AOT.
 304   Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
 305 
 306 #ifdef AARCH64
 307   Universe::set_narrow_klass_range(cds_total);
 308 #endif
 309 
 310   Metaspace::initialize_class_space(tmp_class_space);
 311   tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
 312                 p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
 313 
 314   tty->print_cr("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 315                 CompressedClassSpaceSize, p2i(tmp_class_space.base()));
 316 #endif
 317 
 318   // Start with 0 committed bytes. The memory will be committed as needed by
 319   // MetaspaceShared::commit_shared_space_to().
 320   if (!_shared_vs.initialize(_shared_rs, 0)) {
 321     vm_exit_during_initialization("Unable to allocate memory for shared space");
 322   }
 323 
 324   _mc_region.init(&_shared_rs);
 325   tty->print_cr("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
 326                 _shared_rs.size(), p2i(_shared_rs.base()));
 327 }
 328 


< prev index next >