src/share/vm/memory/compactingPermGenGen.cpp

Print this page




 223     // Allocate the shared spaces.
 224     _ro_bts = new BlockOffsetSharedArray(
 225                   MemRegion(readonly_bottom,
 226                             heap_word_size(spec()->read_only_size())),
 227                   heap_word_size(spec()->read_only_size()));
 228     _ro_space = new OffsetTableContigSpace(_ro_bts,
 229                   MemRegion(readonly_bottom, readonly_end));
 230     _rw_bts = new BlockOffsetSharedArray(
 231                   MemRegion(readwrite_bottom,
 232                             heap_word_size(spec()->read_write_size())),
 233                   heap_word_size(spec()->read_write_size()));
 234     _rw_space = new OffsetTableContigSpace(_rw_bts,
 235                   MemRegion(readwrite_bottom, readwrite_end));
 236 
 237     // Restore mangling flag.
 238     NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
 239 
 240     if (_ro_space == NULL || _rw_space == NULL)
 241       vm_exit_during_initialization("Could not allocate a shared space");
 242 
 243     // Cover both shared spaces entirely with cards.
 244     _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
 245 
 246     if (UseSharedSpaces) {
 247 
 248       // Map in the regions in the shared file.
 249       FileMapInfo* mapinfo = FileMapInfo::current_info();
 250       size_t image_alignment = mapinfo->alignment();
 251       CollectedHeap* ch = Universe::heap();
 252       if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
 253           (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
 254           (!mapinfo->map_space(md, md_rs, NULL))      ||
 255           (!mapinfo->map_space(mc, mc_rs, NULL))      ||
 256           // check the alignment constraints
 257           (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
 258            image_alignment !=
 259            ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
 260         // Base addresses didn't match; skip sharing, but continue
 261         shared_rs.release();
 262         spec()->disable_sharing();
 263         // If -Xshare:on is specified, print out the error message and exit VM,
 264         // otherwise, set UseSharedSpaces to false and continue.
 265         if (RequireSharedSpaces) {
 266           vm_exit_during_initialization("Unable to use shared archive.", NULL);
 267         } else {
 268           FLAG_SET_DEFAULT(UseSharedSpaces, false);
 269         }
 270 
 271         // Note: freeing the block offset array objects does not
 272         // currently free up the underlying storage.
 273         delete _ro_bts;
 274         _ro_bts = NULL;
 275         delete _ro_space;
 276         _ro_space = NULL;
 277         delete _rw_bts;
 278         _rw_bts = NULL;
 279         delete _rw_space;
 280         _rw_space = NULL;
 281         shared_end = (HeapWord*)(rs.base() + rs.size());
 282         _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
 283       }
 284     }
 285 





 286     // Reserved region includes shared spaces for oop.is_in_reserved().
 287     _reserved.set_end(shared_end);
 288 
 289   } else {
 290     _ro_space = NULL;
 291     _rw_space = NULL;
 292   }
 293 }
 294 
 295 
 296 // Do a complete scan of the shared read write space to catch all
 297 // objects which contain references to any younger generation.  Forward
 298 // the pointers.  Avoid space_iterate, as actually visiting all the
 299 // objects in the space will page in more objects than we need.
 300 // Instead, use the system dictionary as strong roots into the read
 301 // write space.
 302 //
 303 // If a RedefineClasses() call has been made, then we have to iterate
 304 // over the entire shared read-write space in order to find all the
 305 // objects that need to be forwarded. For example, it is possible for




 223     // Allocate the shared spaces.
 224     _ro_bts = new BlockOffsetSharedArray(
 225                   MemRegion(readonly_bottom,
 226                             heap_word_size(spec()->read_only_size())),
 227                   heap_word_size(spec()->read_only_size()));
 228     _ro_space = new OffsetTableContigSpace(_ro_bts,
 229                   MemRegion(readonly_bottom, readonly_end));
 230     _rw_bts = new BlockOffsetSharedArray(
 231                   MemRegion(readwrite_bottom,
 232                             heap_word_size(spec()->read_write_size())),
 233                   heap_word_size(spec()->read_write_size()));
 234     _rw_space = new OffsetTableContigSpace(_rw_bts,
 235                   MemRegion(readwrite_bottom, readwrite_end));
 236 
 237     // Restore mangling flag.
 238     NOT_PRODUCT(ZapUnusedHeapArea = old_ZapUnusedHeapArea;)
 239 
 240     if (_ro_space == NULL || _rw_space == NULL)
 241       vm_exit_during_initialization("Could not allocate a shared space");
 242 



 243     if (UseSharedSpaces) {
 244 
 245       // Map in the regions in the shared file.
 246       FileMapInfo* mapinfo = FileMapInfo::current_info();
 247       size_t image_alignment = mapinfo->alignment();
 248       CollectedHeap* ch = Universe::heap();
 249       if ((!mapinfo->map_space(ro, ro_rs, _ro_space)) ||
 250           (!mapinfo->map_space(rw, rw_rs, _rw_space)) ||
 251           (!mapinfo->map_space(md, md_rs, NULL))      ||
 252           (!mapinfo->map_space(mc, mc_rs, NULL))      ||
 253           // check the alignment constraints
 254           (ch == NULL || ch->kind() != CollectedHeap::GenCollectedHeap ||
 255            image_alignment !=
 256            ((GenCollectedHeap*)ch)->gen_policy()->max_alignment())) {
 257         // Base addresses didn't match; skip sharing, but continue
 258         shared_rs.release();
 259         spec()->disable_sharing();
 260         // If -Xshare:on is specified, print out the error message and exit VM,
 261         // otherwise, set UseSharedSpaces to false and continue.
 262         if (RequireSharedSpaces) {
 263           vm_exit_during_initialization("Unable to use shared archive.", NULL);
 264         } else {
 265           FLAG_SET_DEFAULT(UseSharedSpaces, false);
 266         }
 267 
 268         // Note: freeing the block offset array objects does not
 269         // currently free up the underlying storage.
 270         delete _ro_bts;
 271         _ro_bts = NULL;
 272         delete _ro_space;
 273         _ro_space = NULL;
 274         delete _rw_bts;
 275         _rw_bts = NULL;
 276         delete _rw_space;
 277         _rw_space = NULL;
 278         shared_end = (HeapWord*)(rs.base() + rs.size());

 279       }
 280     }
 281 
 282     if (spec()->enable_shared_spaces()) {
 283       // Cover both shared spaces entirely with cards.
 284       _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
 285     }
 286 
 287     // Reserved region includes shared spaces for oop.is_in_reserved().
 288     _reserved.set_end(shared_end);
 289 
 290   } else {
 291     _ro_space = NULL;
 292     _rw_space = NULL;
 293   }
 294 }
 295 
 296 
 297 // Do a complete scan of the shared read write space to catch all
 298 // objects which contain references to any younger generation.  Forward
 299 // the pointers.  Avoid space_iterate, as actually visiting all the
 300 // objects in the space will page in more objects than we need.
 301 // Instead, use the system dictionary as strong roots into the read
 302 // write space.
 303 //
 304 // If a RedefineClasses() call has been made, then we have to iterate
 305 // over the entire shared read-write space in order to find all the
 306 // objects that need to be forwarded. For example, it is possible for