src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hsx-rt.8007074 Sdiff src/share/vm/memory

src/share/vm/memory/metaspace.cpp

Print this page




 328   // Expands/shrinks the committed space in a virtual space.  Delegates
 329   // to Virtualspace
 330   bool expand_by(size_t words, bool pre_touch = false);
 331   bool shrink_by(size_t words);
 332 
 333   // In preparation for deleting this node, remove all the chunks
 334   // in the node from any freelist.
 335   void purge(ChunkManager* chunk_manager);
 336 
 337 #ifdef ASSERT
 338   // Debug support
 339   static void verify_virtual_space_total();
 340   static void verify_virtual_space_count();
 341   void mangle();
 342 #endif
 343 
 344   void print_on(outputStream* st) const;
 345 };
 346 
 347   // byte_size is the size of the associated virtualspace.
 348 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
 349   // align up to vm allocation granularity
 350   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 351 
 352   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 353   // configurable address, generally at the top of the Java heap so other
 354   // memory addresses don't conflict.
 355   if (DumpSharedSpaces) {
 356     char* shared_base = (char*)SharedBaseAddress;
 357     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
 358     if (_rs.is_reserved()) {
 359       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 360     } else {
 361       // Get a mmap region anywhere if the SharedBaseAddress fails.
 362       _rs = ReservedSpace(byte_size);
 363     }
 364     MetaspaceShared::set_shared_rs(&_rs);
 365   } else {
 366     _rs = ReservedSpace(byte_size);
 367   }
 368 




 328   // Expands/shrinks the committed space in a virtual space.  Delegates
 329   // to Virtualspace
 330   bool expand_by(size_t words, bool pre_touch = false);
 331   bool shrink_by(size_t words);
 332 
 333   // In preparation for deleting this node, remove all the chunks
 334   // in the node from any freelist.
 335   void purge(ChunkManager* chunk_manager);
 336 
 337 #ifdef ASSERT
 338   // Debug support
 339   static void verify_virtual_space_total();
 340   static void verify_virtual_space_count();
 341   void mangle();
 342 #endif
 343 
 344   void print_on(outputStream* st) const;
 345 };
 346 
 347   // byte_size is the size of the associated virtualspace.
 348 VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
 349   // align up to vm allocation granularity
 350   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 351 
 352   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 353   // configurable address, generally at the top of the Java heap so other
 354   // memory addresses don't conflict.
 355   if (DumpSharedSpaces) {
 356     char* shared_base = (char*)SharedBaseAddress;
 357     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
 358     if (_rs.is_reserved()) {
 359       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 360     } else {
 361       // Get a mmap region anywhere if the SharedBaseAddress fails.
 362       _rs = ReservedSpace(byte_size);
 363     }
 364     MetaspaceShared::set_shared_rs(&_rs);
 365   } else {
 366     _rs = ReservedSpace(byte_size);
 367   }
 368 


src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File