108 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
109 allocation_type type, MEMFLAGS flags) throw() {
110 return (address)operator new(size, nothrow_constant, type, flags);
111 }
112
113 void ResourceObj::operator delete(void* p) {
114 assert(((ResourceObj *)p)->allocated_on_C_heap(),
115 "delete only allowed for C_HEAP objects");
116 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
117 FreeHeap(p);
118 }
119
120 void ResourceObj::operator delete [](void* p) {
121 operator delete(p);
122 }
123
124 #ifdef ASSERT
125 void ResourceObj::set_allocation_type(address res, allocation_type type) {
126 // Set allocation type in the resource object
127 uintptr_t allocation = (uintptr_t)res;
128 assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res)));
129 assert(type <= allocation_mask, "incorrect allocation type");
130 ResourceObj* resobj = (ResourceObj *)res;
131 resobj->_allocation_t[0] = ~(allocation + type);
132 if (type != STACK_OR_EMBEDDED) {
133 // Called from operator new() and CollectionSetChooser(),
134 // set verification value.
135 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
136 }
137 }
138
139 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
140 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
141 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
142 }
143
144 bool ResourceObj::is_type_set() const {
145 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
146 return get_allocation_type() == type &&
147 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
148 }
149
150 ResourceObj::ResourceObj() { // default constructor
151 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
152 // Operator new() is not called for allocations
153 // on stack and for embedded objects.
154 set_allocation_type((address)this, STACK_OR_EMBEDDED);
155 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
156 // For some reason we got a value which resembles
157 // an embedded or stack object (operator new() does not
158 // set such type). Keep it since it is valid value
159 // (even if it was garbage).
160 // Ignore garbage in other fields.
161 } else if (is_type_set()) {
162 // Operator new() was called and type was set.
163 assert(!allocated_on_stack(),
164 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
165 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
166 } else {
167 // Operator new() was not called.
168 // Assume that it is embedded or stack object.
169 set_allocation_type((address)this, STACK_OR_EMBEDDED);
170 }
171 _allocation_t[1] = 0; // Zap verification value
172 }
173
174 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
175 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
176 // Note: garbage may resembles valid value.
177 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
178 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
179 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
180 set_allocation_type((address)this, STACK_OR_EMBEDDED);
181 _allocation_t[1] = 0; // Zap verification value
182 }
183
184 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
185 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
186 assert(allocated_on_stack(),
187 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
188 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
189 // Keep current _allocation_t value;
190 return *this;
191 }
192
193 ResourceObj::~ResourceObj() {
194 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
195 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
196 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
197 }
198 }
199 #endif // ASSERT
200
201
202 void trace_heap_malloc(size_t size, const char* name, void* p) {
203 // A lock is not needed here - tty uses a lock internally
204 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
205 }
206
207
208 void trace_heap_free(void* p) {
516 void Arena::set_size_in_bytes(size_t size) {
517 if (_size_in_bytes != size) {
518 long delta = (long)(size - size_in_bytes());
519 _size_in_bytes = size;
520 MemTracker::record_arena_size_change(delta, _flags);
521 }
522 }
523
524 // Total of all Chunks in arena
525 size_t Arena::used() const {
526 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
527 register Chunk *k = _first;
528 while( k != _chunk) { // Whilst have Chunks in a row
529 sum += k->length(); // Total size of this Chunk
530 k = k->next(); // Bump along to next Chunk
531 }
532 return sum; // Return total consumed space.
533 }
534
535 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
536 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
537 }
538
539 // Grow a new Chunk
540 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
541 // Get minimal required size. Either real big, or even bigger for giant objs
542 size_t len = MAX2(x, (size_t) Chunk::size);
543
544 Chunk *k = _chunk; // Get filled-up chunk address
545 _chunk = new (alloc_failmode, len) Chunk(len);
546
547 if (_chunk == NULL) {
548 _chunk = k; // restore the previous value of _chunk
549 return NULL;
550 }
551 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
552 else _first = _chunk;
553 _hwm = _chunk->bottom(); // Save the cached hwm, max
554 _max = _chunk->top();
555 set_size_in_bytes(size_in_bytes() + len);
556 void* result = _hwm;
|
108 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
109 allocation_type type, MEMFLAGS flags) throw() {
110 return (address)operator new(size, nothrow_constant, type, flags);
111 }
112
113 void ResourceObj::operator delete(void* p) {
114 assert(((ResourceObj *)p)->allocated_on_C_heap(),
115 "delete only allowed for C_HEAP objects");
116 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
117 FreeHeap(p);
118 }
119
120 void ResourceObj::operator delete [](void* p) {
121 operator delete(p);
122 }
123
124 #ifdef ASSERT
125 void ResourceObj::set_allocation_type(address res, allocation_type type) {
126 // Set allocation type in the resource object
127 uintptr_t allocation = (uintptr_t)res;
128 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res));
129 assert(type <= allocation_mask, "incorrect allocation type");
130 ResourceObj* resobj = (ResourceObj *)res;
131 resobj->_allocation_t[0] = ~(allocation + type);
132 if (type != STACK_OR_EMBEDDED) {
133 // Called from operator new() and CollectionSetChooser(),
134 // set verification value.
135 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
136 }
137 }
138
139 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
140 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
141 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
142 }
143
144 bool ResourceObj::is_type_set() const {
145 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
146 return get_allocation_type() == type &&
147 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
148 }
149
150 ResourceObj::ResourceObj() { // default constructor
151 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
152 // Operator new() is not called for allocations
153 // on stack and for embedded objects.
154 set_allocation_type((address)this, STACK_OR_EMBEDDED);
155 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
156 // For some reason we got a value which resembles
157 // an embedded or stack object (operator new() does not
158 // set such type). Keep it since it is valid value
159 // (even if it was garbage).
160 // Ignore garbage in other fields.
161 } else if (is_type_set()) {
162 // Operator new() was called and type was set.
163 assert(!allocated_on_stack(),
164 "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
165 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
166 } else {
167 // Operator new() was not called.
168 // Assume that it is embedded or stack object.
169 set_allocation_type((address)this, STACK_OR_EMBEDDED);
170 }
171 _allocation_t[1] = 0; // Zap verification value
172 }
173
174 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
175 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
176 // Note: garbage may resembles valid value.
177 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
178 "embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
179 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
180 set_allocation_type((address)this, STACK_OR_EMBEDDED);
181 _allocation_t[1] = 0; // Zap verification value
182 }
183
184 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
185 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
186 assert(allocated_on_stack(),
187 "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
188 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]);
189 // Keep current _allocation_t value;
190 return *this;
191 }
192
193 ResourceObj::~ResourceObj() {
194 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
195 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
196 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
197 }
198 }
199 #endif // ASSERT
200
201
202 void trace_heap_malloc(size_t size, const char* name, void* p) {
203 // A lock is not needed here - tty uses a lock internally
204 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
205 }
206
207
208 void trace_heap_free(void* p) {
516 void Arena::set_size_in_bytes(size_t size) {
517 if (_size_in_bytes != size) {
518 long delta = (long)(size - size_in_bytes());
519 _size_in_bytes = size;
520 MemTracker::record_arena_size_change(delta, _flags);
521 }
522 }
523
524 // Total of all Chunks in arena
525 size_t Arena::used() const {
526 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
527 register Chunk *k = _first;
528 while( k != _chunk) { // Whilst have Chunks in a row
529 sum += k->length(); // Total size of this Chunk
530 k = k->next(); // Bump along to next Chunk
531 }
532 return sum; // Return total consumed space.
533 }
534
535 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
536 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, "%s", whence);
537 }
538
539 // Grow a new Chunk
540 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
541 // Get minimal required size. Either real big, or even bigger for giant objs
542 size_t len = MAX2(x, (size_t) Chunk::size);
543
544 Chunk *k = _chunk; // Get filled-up chunk address
545 _chunk = new (alloc_failmode, len) Chunk(len);
546
547 if (_chunk == NULL) {
548 _chunk = k; // restore the previous value of _chunk
549 return NULL;
550 }
551 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
552 else _first = _chunk;
553 _hwm = _chunk->bottom(); // Save the cached hwm, max
554 _max = _chunk->top();
555 set_size_in_bytes(size_in_bytes() + len);
556 void* result = _hwm;
|