src/share/vm/memory/allocation.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hs25_8011661 Sdiff src/share/vm/memory

src/share/vm/memory/allocation.cpp

Print this page




 242     }
 243     return c;
 244   }
 245 
 246  public:
 247   // All chunks in a ChunkPool has the same size
 248    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
 249 
 250   // Allocate a new chunk from the pool (might expand the pool)
 251   _NOINLINE_ void* allocate(size_t bytes) {
 252     assert(bytes == _size, "bad size");
 253     void* p = NULL;
 254     // No VM lock can be taken inside ThreadCritical lock, so os::malloc
 255     // should be done outside ThreadCritical lock due to NMT
 256     { ThreadCritical tc;
 257       _num_used++;
 258       p = get_first();
 259     }
 260     if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
 261     if (p == NULL)
 262       vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
 263 
 264     return p;
 265   }
 266 
 267   // Return a chunk to the pool
 268   void free(Chunk* chunk) {
 269     assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
 270     ThreadCritical tc;
 271     _num_used--;
 272 
 273     // Add chunk to list
 274     chunk->set_next(_first);
 275     _first = chunk;
 276     _num_chunks++;
 277   }
 278 
 279   // Prune the pool
 280   void free_all_but(size_t n) {
 281     Chunk* cur = NULL;
 282     Chunk* next;


 354    }
 355 };
 356 
 357 //--------------------------------------------------------------------------------------
 358 // Chunk implementation
 359 
 360 void* Chunk::operator new(size_t requested_size, size_t length) {
 361   // requested_size is equal to sizeof(Chunk) but in order for the arena
 362   // allocations to come out aligned as expected the size must be aligned
 363   // to expected arean alignment.
 364   // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
 365   assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
 366   size_t bytes = ARENA_ALIGN(requested_size) + length;
 367   switch (length) {
 368    case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
 369    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
 370    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
 371    default: {
 372      void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
 373      if (p == NULL)
 374        vm_exit_out_of_memory(bytes, "Chunk::new");
 375      return p;
 376    }
 377   }
 378 }
 379 
 380 void Chunk::operator delete(void* p) {
 381   Chunk* c = (Chunk*)p;
 382   switch (c->length()) {
 383    case Chunk::size:        ChunkPool::large_pool()->free(c); break;
 384    case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
 385    case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
 386    default:                 os::free(c, mtChunk);
 387   }
 388 }
 389 
 390 Chunk::Chunk(size_t length) : _len(length) {
 391   _next = NULL;         // Chain on the linked list
 392 }
 393 
 394 


 514 // change the size
 515 void Arena::set_size_in_bytes(size_t size) {
 516   if (_size_in_bytes != size) {
 517     _size_in_bytes = size;
 518     MemTracker::record_arena_size((address)this, size);
 519   }
 520 }
 521 
 522 // Total of all Chunks in arena
 523 size_t Arena::used() const {
 524   size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
 525   register Chunk *k = _first;
 526   while( k != _chunk) {         // Whilst have Chunks in a row
 527     sum += k->length();         // Total size of this Chunk
 528     k = k->next();              // Bump along to next Chunk
 529   }
 530   return sum;                   // Return total consumed space.
 531 }
 532 
 533 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
 534   vm_exit_out_of_memory(sz, whence);
 535 }
 536 
 537 // Grow a new Chunk
 538 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
 539   // Get minimal required size.  Either real big, or even bigger for giant objs
 540   size_t len = MAX2(x, (size_t) Chunk::size);
 541 
 542   Chunk *k = _chunk;            // Get filled-up chunk address
 543   _chunk = new (len) Chunk(len);
 544 
 545   if (_chunk == NULL) {
 546     if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 547       signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
 548     }
 549     return NULL;
 550   }
 551   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
 552   else _first = _chunk;
 553   _hwm  = _chunk->bottom();     // Save the cached hwm, max
 554   _max =  _chunk->top();




 242     }
 243     return c;
 244   }
 245 
 246  public:
 247   // All chunks in a ChunkPool has the same size
 248    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
 249 
 250   // Allocate a new chunk from the pool (might expand the pool)
 251   _NOINLINE_ void* allocate(size_t bytes) {
 252     assert(bytes == _size, "bad size");
 253     void* p = NULL;
 254     // No VM lock can be taken inside ThreadCritical lock, so os::malloc
 255     // should be done outside ThreadCritical lock due to NMT
 256     { ThreadCritical tc;
 257       _num_used++;
 258       p = get_first();
 259     }
 260     if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
 261     if (p == NULL)
 262       vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
 263 
 264     return p;
 265   }
 266 
 267   // Return a chunk to the pool
 268   void free(Chunk* chunk) {
 269     assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
 270     ThreadCritical tc;
 271     _num_used--;
 272 
 273     // Add chunk to list
 274     chunk->set_next(_first);
 275     _first = chunk;
 276     _num_chunks++;
 277   }
 278 
 279   // Prune the pool
 280   void free_all_but(size_t n) {
 281     Chunk* cur = NULL;
 282     Chunk* next;


 354    }
 355 };
 356 
 357 //--------------------------------------------------------------------------------------
 358 // Chunk implementation
 359 
 360 void* Chunk::operator new(size_t requested_size, size_t length) {
 361   // requested_size is equal to sizeof(Chunk) but in order for the arena
 362   // allocations to come out aligned as expected the size must be aligned
 363   // to expected arean alignment.
 364   // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
 365   assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
 366   size_t bytes = ARENA_ALIGN(requested_size) + length;
 367   switch (length) {
 368    case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
 369    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
 370    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
 371    default: {
 372      void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
 373      if (p == NULL)
 374        vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
 375      return p;
 376    }
 377   }
 378 }
 379 
 380 void Chunk::operator delete(void* p) {
 381   Chunk* c = (Chunk*)p;
 382   switch (c->length()) {
 383    case Chunk::size:        ChunkPool::large_pool()->free(c); break;
 384    case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
 385    case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
 386    default:                 os::free(c, mtChunk);
 387   }
 388 }
 389 
 390 Chunk::Chunk(size_t length) : _len(length) {
 391   _next = NULL;         // Chain on the linked list
 392 }
 393 
 394 


 514 // change the size
 515 void Arena::set_size_in_bytes(size_t size) {
 516   if (_size_in_bytes != size) {
 517     _size_in_bytes = size;
 518     MemTracker::record_arena_size((address)this, size);
 519   }
 520 }
 521 
 522 // Total of all Chunks in arena
 523 size_t Arena::used() const {
 524   size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
 525   register Chunk *k = _first;
 526   while( k != _chunk) {         // Whilst have Chunks in a row
 527     sum += k->length();         // Total size of this Chunk
 528     k = k->next();              // Bump along to next Chunk
 529   }
 530   return sum;                   // Return total consumed space.
 531 }
 532 
 533 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
 534   vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
 535 }
 536 
 537 // Grow a new Chunk
 538 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
 539   // Get minimal required size.  Either real big, or even bigger for giant objs
 540   size_t len = MAX2(x, (size_t) Chunk::size);
 541 
 542   Chunk *k = _chunk;            // Get filled-up chunk address
 543   _chunk = new (len) Chunk(len);
 544 
 545   if (_chunk == NULL) {
 546     if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
 547       signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
 548     }
 549     return NULL;
 550   }
 551   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
 552   else _first = _chunk;
 553   _hwm  = _chunk->bottom();     // Save the cached hwm, max
 554   _max =  _chunk->top();


src/share/vm/memory/allocation.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File