src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/metaspace.cpp

Print this page




 346   // align up to vm allocation granularity
 347   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 348 
 349   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 350   // configurable address, generally at the top of the Java heap so other
 351   // memory addresses don't conflict.
 352   if (DumpSharedSpaces) {
 353     char* shared_base = (char*)SharedBaseAddress;
 354     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
 355     if (_rs.is_reserved()) {
 356       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 357     } else {
 358       // Get a mmap region anywhere if the SharedBaseAddress fails.
 359       _rs = ReservedSpace(byte_size);
 360     }
 361     MetaspaceShared::set_shared_rs(&_rs);
 362   } else {
 363     _rs = ReservedSpace(byte_size);
 364   }
 365 
 366   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);

 367 }
 368 
 369 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 370   Metachunk* chunk = first_chunk();
 371   Metachunk* invalid_chunk = (Metachunk*) top();
 372   while (chunk < invalid_chunk ) {
 373     assert(chunk->is_free(), "Should be marked free");
 374       MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 375       chunk_manager->remove_chunk(chunk);
 376       assert(chunk->next() == NULL &&
 377              chunk->prev() == NULL,
 378              "Was not removed from its list");
 379       chunk = (Metachunk*) next;
 380   }
 381 }
 382 
 383 #ifdef ASSERT
 384 uint VirtualSpaceNode::container_count_slow() {
 385   uint count = 0;
 386   Metachunk* chunk = first_chunk();




 346   // align up to vm allocation granularity
 347   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 348 
 349   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
 350   // configurable address, generally at the top of the Java heap so other
 351   // memory addresses don't conflict.
 352   if (DumpSharedSpaces) {
 353     char* shared_base = (char*)SharedBaseAddress;
 354     _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
 355     if (_rs.is_reserved()) {
 356       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
 357     } else {
 358       // Get a mmap region anywhere if the SharedBaseAddress fails.
 359       _rs = ReservedSpace(byte_size);
 360     }
 361     MetaspaceShared::set_shared_rs(&_rs);
 362   } else {
 363     _rs = ReservedSpace(byte_size);
 364   }
 365 
 366   NMTTrackOp op(NMTTrackOp::TypeOp);
 367   op.execute_op((address)_rs.base(), 0, mtClass);
 368 }
 369 
 370 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
 371   Metachunk* chunk = first_chunk();
 372   Metachunk* invalid_chunk = (Metachunk*) top();
 373   while (chunk < invalid_chunk ) {
 374     assert(chunk->is_free(), "Should be marked free");
 375       MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
 376       chunk_manager->remove_chunk(chunk);
 377       assert(chunk->next() == NULL &&
 378              chunk->prev() == NULL,
 379              "Was not removed from its list");
 380       chunk = (Metachunk*) next;
 381   }
 382 }
 383 
 384 #ifdef ASSERT
 385 uint VirtualSpaceNode::container_count_slow() {
 386   uint count = 0;
 387   Metachunk* chunk = first_chunk();


src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File