src/share/vm/memory/allocation.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/allocation.cpp

Print this page




  62 void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
  63 void  _ValueObj::operator delete [](void* p)          { ShouldNotCallThis(); }
  64 
  65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
  66                                  size_t word_size, bool read_only,
  67                                  MetaspaceObj::Type type, TRAPS) throw() {
  68   // Klass has it's own operator new
  69   return Metaspace::allocate(loader_data, word_size, read_only,
  70                              type, CHECK_NULL);
  71 }
  72 
  73 bool MetaspaceObj::is_shared() const {
  74   return MetaspaceShared::is_in_shared_space(this);
  75 }
  76 
  77 bool MetaspaceObj::is_metaspace_object() const {
  78   return ClassLoaderDataGraph::contains((void*)this);
  79 }
  80 
  81 void MetaspaceObj::print_address_on(outputStream* st) const {
  82   st->print(" {"INTPTR_FORMAT"}", this);
  83 }
  84 
  85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
  86   address res;
  87   switch (type) {
  88    case C_HEAP:
  89     res = (address)AllocateHeap(size, flags, CALLER_PC);
  90     DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
  91     break;
  92    case RESOURCE_AREA:
  93     // new(size) sets allocation type RESOURCE_AREA.
  94     res = (address)operator new(size);
  95     break;
  96    default:
  97     ShouldNotReachHere();
  98   }
  99   return res;
 100 }
 101 
 102 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {


 125 void* ResourceObj::operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
 126     allocation_type type, MEMFLAGS flags) throw() {
 127   return (address)operator new(size, nothrow_constant, type, flags);
 128 }
 129 
 130 void ResourceObj::operator delete(void* p) {
 131   assert(((ResourceObj *)p)->allocated_on_C_heap(),
 132          "delete only allowed for C_HEAP objects");
 133   DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
 134   FreeHeap(p);
 135 }
 136 
 137 void ResourceObj::operator delete [](void* p) {
 138   operator delete(p);
 139 }
 140 
 141 #ifdef ASSERT
 142 void ResourceObj::set_allocation_type(address res, allocation_type type) {
 143     // Set allocation type in the resource object
 144     uintptr_t allocation = (uintptr_t)res;
 145     assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " PTR_FORMAT, res));
 146     assert(type <= allocation_mask, "incorrect allocation type");
 147     ResourceObj* resobj = (ResourceObj *)res;
 148     resobj->_allocation_t[0] = ~(allocation + type);
 149     if (type != STACK_OR_EMBEDDED) {
 150       // Called from operator new() and CollectionSetChooser(),
 151       // set verification value.
 152       resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
 153     }
 154 }
 155 
 156 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
 157     assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
 158     return (allocation_type)((~_allocation_t[0]) & allocation_mask);
 159 }
 160 
 161 bool ResourceObj::is_type_set() const {
 162     allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
 163     return get_allocation_type()  == type &&
 164            (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
 165 }
 166 
 167 ResourceObj::ResourceObj() { // default constructor
 168     if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
 169       // Operator new() is not called for allocations
 170       // on stack and for embedded objects.
 171       set_allocation_type((address)this, STACK_OR_EMBEDDED);
 172     } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
 173       // For some reason we got a value which resembles
 174       // an embedded or stack object (operator new() does not
 175       // set such type). Keep it since it is valid value
 176       // (even if it was garbage).
 177       // Ignore garbage in other fields.
 178     } else if (is_type_set()) {
 179       // Operator new() was called and type was set.
 180       assert(!allocated_on_stack(),
 181              err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 182                      this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 183     } else {
 184       // Operator new() was not called.
 185       // Assume that it is embedded or stack object.
 186       set_allocation_type((address)this, STACK_OR_EMBEDDED);
 187     }
 188     _allocation_t[1] = 0; // Zap verification value
 189 }
 190 
 191 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
 192     // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
 193     // Note: garbage may resembles valid value.
 194     assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
 195            err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 196                    this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 197     set_allocation_type((address)this, STACK_OR_EMBEDDED);
 198     _allocation_t[1] = 0; // Zap verification value
 199 }
 200 
 201 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
 202     // Used in InlineTree::ok_to_inline() for WarmCallInfo.
 203     assert(allocated_on_stack(),
 204            err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 205                    this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 206     // Keep current _allocation_t value;
 207     return *this;
 208 }
 209 
 210 ResourceObj::~ResourceObj() {
 211     // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
 212     if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
 213       _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
 214     }
 215 }
 216 #endif // ASSERT
 217 
 218 
 219 void trace_heap_malloc(size_t size, const char* name, void* p) {
 220   // A lock is not needed here - tty uses a lock internally
 221   tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
 222 }
 223 
 224 
 225 void trace_heap_free(void* p) {
 226   // A lock is not needed here - tty uses a lock internally
 227   tty->print_cr("Heap free   " INTPTR_FORMAT, p);
 228 }
 229 
 230 //--------------------------------------------------------------------------------------
 231 // ChunkPool implementation
 232 
 233 // MT-safe pool of chunks to reduce malloc/free thrashing
 234 // NB: not using Mutex because pools are used before Threads are initialized
 235 class ChunkPool: public CHeapObj<mtInternal> {
 236   Chunk*       _first;        // first cached Chunk; its first word points to next chunk
 237   size_t       _num_chunks;   // number of unused chunks in pool
 238   size_t       _num_used;     // number of chunks currently checked out
 239   const size_t _size;         // size of each chunk (must be uniform)
 240 
 241   // Our four static pools
 242   static ChunkPool* _large_pool;
 243   static ChunkPool* _medium_pool;
 244   static ChunkPool* _small_pool;
 245   static ChunkPool* _tiny_pool;
 246 
 247   // return first element or null


 708 }
 709 
 710 void* operator new [](size_t size, std::nothrow_t&  nothrow_constant) throw() {
 711   assert(false, "Should not call global operator new[]");
 712   return 0;
 713 }
 714 
 715 void operator delete(void* p) {
 716   assert(false, "Should not call global delete");
 717 }
 718 
 719 void operator delete [](void* p) {
 720   assert(false, "Should not call global delete []");
 721 }
 722 #endif // ALLOW_OPERATOR_NEW_USAGE
 723 
 724 void AllocatedObj::print() const       { print_on(tty); }
 725 void AllocatedObj::print_value() const { print_value_on(tty); }
 726 
 727 void AllocatedObj::print_on(outputStream* st) const {
 728   st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
 729 }
 730 
 731 void AllocatedObj::print_value_on(outputStream* st) const {
 732   st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
 733 }
 734 
 735 julong Arena::_bytes_allocated = 0;
 736 
 737 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
 738 
 739 AllocStats::AllocStats() {
 740   start_mallocs      = os::num_mallocs;
 741   start_frees        = os::num_frees;
 742   start_malloc_bytes = os::alloc_bytes;
 743   start_mfree_bytes  = os::free_bytes;
 744   start_res_bytes    = Arena::_bytes_allocated;
 745 }
 746 
 747 julong  AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
 748 julong  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
 749 julong  AllocStats::num_frees()   { return os::num_frees - start_frees; }
 750 julong  AllocStats::free_bytes()  { return os::free_bytes - start_mfree_bytes; }
 751 julong  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
 752 void    AllocStats::print() {




  62 void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
  63 void  _ValueObj::operator delete [](void* p)          { ShouldNotCallThis(); }
  64 
  65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
  66                                  size_t word_size, bool read_only,
  67                                  MetaspaceObj::Type type, TRAPS) throw() {
  68   // Klass has it's own operator new
  69   return Metaspace::allocate(loader_data, word_size, read_only,
  70                              type, CHECK_NULL);
  71 }
  72 
  73 bool MetaspaceObj::is_shared() const {
  74   return MetaspaceShared::is_in_shared_space(this);
  75 }
  76 
  77 bool MetaspaceObj::is_metaspace_object() const {
  78   return ClassLoaderDataGraph::contains((void*)this);
  79 }
  80 
  81 void MetaspaceObj::print_address_on(outputStream* st) const {
  82   st->print(" {" INTPTR_FORMAT "}", p2i(this));
  83 }
  84 
  85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
  86   address res;
  87   switch (type) {
  88    case C_HEAP:
  89     res = (address)AllocateHeap(size, flags, CALLER_PC);
  90     DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
  91     break;
  92    case RESOURCE_AREA:
  93     // new(size) sets allocation type RESOURCE_AREA.
  94     res = (address)operator new(size);
  95     break;
  96    default:
  97     ShouldNotReachHere();
  98   }
  99   return res;
 100 }
 101 
 102 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {


 125 void* ResourceObj::operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
 126     allocation_type type, MEMFLAGS flags) throw() {
 127   return (address)operator new(size, nothrow_constant, type, flags);
 128 }
 129 
 130 void ResourceObj::operator delete(void* p) {
 131   assert(((ResourceObj *)p)->allocated_on_C_heap(),
 132          "delete only allowed for C_HEAP objects");
 133   DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
 134   FreeHeap(p);
 135 }
 136 
 137 void ResourceObj::operator delete [](void* p) {
 138   operator delete(p);
 139 }
 140 
 141 #ifdef ASSERT
 142 void ResourceObj::set_allocation_type(address res, allocation_type type) {
 143     // Set allocation type in the resource object
 144     uintptr_t allocation = (uintptr_t)res;
 145     assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " PTR_FORMAT, p2i(res)));
 146     assert(type <= allocation_mask, "incorrect allocation type");
 147     ResourceObj* resobj = (ResourceObj *)res;
 148     resobj->_allocation_t[0] = ~(allocation + type);
 149     if (type != STACK_OR_EMBEDDED) {
 150       // Called from operator new() and CollectionSetChooser(),
 151       // set verification value.
 152       resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
 153     }
 154 }
 155 
 156 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
 157     assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
 158     return (allocation_type)((~_allocation_t[0]) & allocation_mask);
 159 }
 160 
 161 bool ResourceObj::is_type_set() const {
 162     allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
 163     return get_allocation_type()  == type &&
 164            (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
 165 }
 166 
 167 ResourceObj::ResourceObj() { // default constructor
 168     if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
 169       // Operator new() is not called for allocations
 170       // on stack and for embedded objects.
 171       set_allocation_type((address)this, STACK_OR_EMBEDDED);
 172     } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
 173       // For some reason we got a value which resembles
 174       // an embedded or stack object (operator new() does not
 175       // set such type). Keep it since it is valid value
 176       // (even if it was garbage).
 177       // Ignore garbage in other fields.
 178     } else if (is_type_set()) {
 179       // Operator new() was called and type was set.
 180       assert(!allocated_on_stack(),
 181              err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 182                      p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 183     } else {
 184       // Operator new() was not called.
 185       // Assume that it is embedded or stack object.
 186       set_allocation_type((address)this, STACK_OR_EMBEDDED);
 187     }
 188     _allocation_t[1] = 0; // Zap verification value
 189 }
 190 
 191 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
 192     // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
 193     // Note: garbage may resembles valid value.
 194     assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
 195            err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 196                    p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 197     set_allocation_type((address)this, STACK_OR_EMBEDDED);
 198     _allocation_t[1] = 0; // Zap verification value
 199 }
 200 
 201 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
 202     // Used in InlineTree::ok_to_inline() for WarmCallInfo.
 203     assert(allocated_on_stack(),
 204            err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
 205                    p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
 206     // Keep current _allocation_t value;
 207     return *this;
 208 }
 209 
 210 ResourceObj::~ResourceObj() {
 211     // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
 212     if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
 213       _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
 214     }
 215 }
 216 #endif // ASSERT
 217 
 218 
 219 void trace_heap_malloc(size_t size, const char* name, void* p) {
 220   // A lock is not needed here - tty uses a lock internally
 221   tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
 222 }
 223 
 224 
 225 void trace_heap_free(void* p) {
 226   // A lock is not needed here - tty uses a lock internally
 227   tty->print_cr("Heap free   " INTPTR_FORMAT, p2i(p));
 228 }
 229 
 230 //--------------------------------------------------------------------------------------
 231 // ChunkPool implementation
 232 
 233 // MT-safe pool of chunks to reduce malloc/free thrashing
 234 // NB: not using Mutex because pools are used before Threads are initialized
 235 class ChunkPool: public CHeapObj<mtInternal> {
 236   Chunk*       _first;        // first cached Chunk; its first word points to next chunk
 237   size_t       _num_chunks;   // number of unused chunks in pool
 238   size_t       _num_used;     // number of chunks currently checked out
 239   const size_t _size;         // size of each chunk (must be uniform)
 240 
 241   // Our four static pools
 242   static ChunkPool* _large_pool;
 243   static ChunkPool* _medium_pool;
 244   static ChunkPool* _small_pool;
 245   static ChunkPool* _tiny_pool;
 246 
 247   // return first element or null


 708 }
 709 
 710 void* operator new [](size_t size, std::nothrow_t&  nothrow_constant) throw() {
 711   assert(false, "Should not call global operator new[]");
 712   return 0;
 713 }
 714 
 715 void operator delete(void* p) {
 716   assert(false, "Should not call global delete");
 717 }
 718 
 719 void operator delete [](void* p) {
 720   assert(false, "Should not call global delete []");
 721 }
 722 #endif // ALLOW_OPERATOR_NEW_USAGE
 723 
 724 void AllocatedObj::print() const       { print_on(tty); }
 725 void AllocatedObj::print_value() const { print_value_on(tty); }
 726 
 727 void AllocatedObj::print_on(outputStream* st) const {
 728   st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
 729 }
 730 
 731 void AllocatedObj::print_value_on(outputStream* st) const {
 732   st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
 733 }
 734 
 735 julong Arena::_bytes_allocated = 0;
 736 
 737 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
 738 
 739 AllocStats::AllocStats() {
 740   start_mallocs      = os::num_mallocs;
 741   start_frees        = os::num_frees;
 742   start_malloc_bytes = os::alloc_bytes;
 743   start_mfree_bytes  = os::free_bytes;
 744   start_res_bytes    = Arena::_bytes_allocated;
 745 }
 746 
 747 julong  AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
 748 julong  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
 749 julong  AllocStats::num_frees()   { return os::num_frees - start_frees; }
 750 julong  AllocStats::free_bytes()  { return os::free_bytes - start_mfree_bytes; }
 751 julong  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
 752 void    AllocStats::print() {


src/share/vm/memory/allocation.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File