Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/allocation.cpp
+++ new/src/share/vm/memory/allocation.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "memory/allocation.hpp"
27 27 #include "memory/allocation.inline.hpp"
28 28 #include "memory/genCollectedHeap.hpp"
29 29 #include "memory/metaspaceShared.hpp"
30 30 #include "memory/resourceArea.hpp"
31 31 #include "memory/universe.hpp"
32 32 #include "runtime/atomic.hpp"
33 33 #include "runtime/os.hpp"
34 34 #include "runtime/task.hpp"
35 35 #include "runtime/threadCritical.hpp"
36 36 #include "services/memTracker.hpp"
37 37 #include "utilities/ostream.hpp"
38 38
39 39 #ifdef TARGET_OS_FAMILY_linux
40 40 # include "os_linux.inline.hpp"
41 41 #endif
42 42 #ifdef TARGET_OS_FAMILY_solaris
43 43 # include "os_solaris.inline.hpp"
44 44 #endif
45 45 #ifdef TARGET_OS_FAMILY_windows
46 46 # include "os_windows.inline.hpp"
47 47 #endif
48 48 #ifdef TARGET_OS_FAMILY_aix
49 49 # include "os_aix.inline.hpp"
50 50 #endif
51 51 #ifdef TARGET_OS_FAMILY_bsd
52 52 # include "os_bsd.inline.hpp"
53 53 #endif
54 54
55 55 void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
56 56 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
57 57 void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
58 58 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
59 59
60 60 void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
61 61 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
62 62 void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
63 63 void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
64 64
65 65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
66 66 size_t word_size, bool read_only,
67 67 MetaspaceObj::Type type, TRAPS) throw() {
68 68 // Klass has it's own operator new
69 69 return Metaspace::allocate(loader_data, word_size, read_only,
70 70 type, CHECK_NULL);
71 71 }
72 72
73 73 bool MetaspaceObj::is_shared() const {
74 74 return MetaspaceShared::is_in_shared_space(this);
75 75 }
76 76
77 77 bool MetaspaceObj::is_metaspace_object() const {
78 78 return Metaspace::contains((void*)this);
79 79 }
80 80
81 81 void MetaspaceObj::print_address_on(outputStream* st) const {
82 82 st->print(" {" INTPTR_FORMAT "}", p2i(this));
83 83 }
84 84
85 85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
86 86 address res;
87 87 switch (type) {
88 88 case C_HEAP:
89 89 res = (address)AllocateHeap(size, flags, CALLER_PC);
90 90 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
91 91 break;
92 92 case RESOURCE_AREA:
93 93 // new(size) sets allocation type RESOURCE_AREA.
94 94 res = (address)operator new(size);
95 95 break;
96 96 default:
97 97 ShouldNotReachHere();
98 98 }
99 99 return res;
100 100 }
101 101
102 102 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
103 103 return (address) operator new(size, type, flags);
104 104 }
105 105
106 106 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
107 107 allocation_type type, MEMFLAGS flags) throw() {
108 108 //should only call this with std::nothrow, use other operator new() otherwise
109 109 address res;
110 110 switch (type) {
111 111 case C_HEAP:
112 112 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
113 113 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
114 114 break;
115 115 case RESOURCE_AREA:
116 116 // new(size) sets allocation type RESOURCE_AREA.
117 117 res = (address)operator new(size, std::nothrow);
118 118 break;
119 119 default:
120 120 ShouldNotReachHere();
121 121 }
122 122 return res;
123 123 }
124 124
125 125 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
126 126 allocation_type type, MEMFLAGS flags) throw() {
127 127 return (address)operator new(size, nothrow_constant, type, flags);
128 128 }
129 129
130 130 void ResourceObj::operator delete(void* p) {
131 131 assert(((ResourceObj *)p)->allocated_on_C_heap(),
132 132 "delete only allowed for C_HEAP objects");
133 133 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
134 134 FreeHeap(p);
135 135 }
136 136
137 137 void ResourceObj::operator delete [](void* p) {
138 138 operator delete(p);
139 139 }
140 140
141 141 #ifdef ASSERT
142 142 void ResourceObj::set_allocation_type(address res, allocation_type type) {
143 143 // Set allocation type in the resource object
144 144 uintptr_t allocation = (uintptr_t)res;
145 145 assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res)));
146 146 assert(type <= allocation_mask, "incorrect allocation type");
147 147 ResourceObj* resobj = (ResourceObj *)res;
148 148 resobj->_allocation_t[0] = ~(allocation + type);
149 149 if (type != STACK_OR_EMBEDDED) {
150 150 // Called from operator new() and CollectionSetChooser(),
151 151 // set verification value.
152 152 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
153 153 }
154 154 }
155 155
156 156 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
157 157 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
158 158 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
159 159 }
160 160
161 161 bool ResourceObj::is_type_set() const {
162 162 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
163 163 return get_allocation_type() == type &&
164 164 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
165 165 }
166 166
167 167 ResourceObj::ResourceObj() { // default constructor
168 168 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
169 169 // Operator new() is not called for allocations
170 170 // on stack and for embedded objects.
171 171 set_allocation_type((address)this, STACK_OR_EMBEDDED);
172 172 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
173 173 // For some reason we got a value which resembles
174 174 // an embedded or stack object (operator new() does not
175 175 // set such type). Keep it since it is valid value
176 176 // (even if it was garbage).
177 177 // Ignore garbage in other fields.
178 178 } else if (is_type_set()) {
179 179 // Operator new() was called and type was set.
180 180 assert(!allocated_on_stack(),
181 181 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
182 182 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
183 183 } else {
184 184 // Operator new() was not called.
185 185 // Assume that it is embedded or stack object.
186 186 set_allocation_type((address)this, STACK_OR_EMBEDDED);
187 187 }
188 188 _allocation_t[1] = 0; // Zap verification value
189 189 }
190 190
191 191 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
192 192 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
193 193 // Note: garbage may resembles valid value.
194 194 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
195 195 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
196 196 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
197 197 set_allocation_type((address)this, STACK_OR_EMBEDDED);
198 198 _allocation_t[1] = 0; // Zap verification value
199 199 }
200 200
201 201 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
202 202 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
203 203 assert(allocated_on_stack(),
204 204 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
205 205 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
206 206 // Keep current _allocation_t value;
207 207 return *this;
208 208 }
209 209
210 210 ResourceObj::~ResourceObj() {
211 211 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
212 212 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
213 213 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
214 214 }
215 215 }
216 216 #endif // ASSERT
217 217
218 218
219 219 void trace_heap_malloc(size_t size, const char* name, void* p) {
220 220 // A lock is not needed here - tty uses a lock internally
221 221 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
222 222 }
223 223
224 224
225 225 void trace_heap_free(void* p) {
226 226 // A lock is not needed here - tty uses a lock internally
227 227 tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p));
228 228 }
229 229
230 230 //--------------------------------------------------------------------------------------
231 231 // ChunkPool implementation
232 232
233 233 // MT-safe pool of chunks to reduce malloc/free thrashing
234 234 // NB: not using Mutex because pools are used before Threads are initialized
235 235 class ChunkPool: public CHeapObj<mtInternal> {
236 236 Chunk* _first; // first cached Chunk; its first word points to next chunk
237 237 size_t _num_chunks; // number of unused chunks in pool
238 238 size_t _num_used; // number of chunks currently checked out
239 239 const size_t _size; // size of each chunk (must be uniform)
240 240
241 241 // Our four static pools
242 242 static ChunkPool* _large_pool;
243 243 static ChunkPool* _medium_pool;
244 244 static ChunkPool* _small_pool;
245 245 static ChunkPool* _tiny_pool;
246 246
247 247 // return first element or null
248 248 void* get_first() {
249 249 Chunk* c = _first;
250 250 if (_first) {
251 251 _first = _first->next();
252 252 _num_chunks--;
253 253 }
254 254 return c;
255 255 }
256 256
257 257 public:
258 258 // All chunks in a ChunkPool has the same size
259 259 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
260 260
261 261 // Allocate a new chunk from the pool (might expand the pool)
262 262 _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
263 263 assert(bytes == _size, "bad size");
264 264 void* p = NULL;
265 265 // No VM lock can be taken inside ThreadCritical lock, so os::malloc
266 266 // should be done outside ThreadCritical lock due to NMT
267 267 { ThreadCritical tc;
268 268 _num_used++;
269 269 p = get_first();
270 270 }
271 271 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
272 272 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
273 273 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
274 274 }
275 275 return p;
276 276 }
277 277
278 278 // Return a chunk to the pool
279 279 void free(Chunk* chunk) {
280 280 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
281 281 ThreadCritical tc;
282 282 _num_used--;
283 283
284 284 // Add chunk to list
285 285 chunk->set_next(_first);
286 286 _first = chunk;
287 287 _num_chunks++;
288 288 }
289 289
290 290 // Prune the pool
291 291 void free_all_but(size_t n) {
292 292 Chunk* cur = NULL;
293 293 Chunk* next;
294 294 {
295 295 // if we have more than n chunks, free all of them
296 296 ThreadCritical tc;
297 297 if (_num_chunks > n) {
298 298 // free chunks at end of queue, for better locality
299 299 cur = _first;
300 300 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
301 301
302 302 if (cur != NULL) {
303 303 next = cur->next();
304 304 cur->set_next(NULL);
305 305 cur = next;
306 306
307 307 _num_chunks = n;
308 308 }
309 309 }
310 310 }
311 311
312 312 // Free all remaining chunks, outside of ThreadCritical
313 313 // to avoid deadlock with NMT
314 314 while(cur != NULL) {
315 315 next = cur->next();
316 316 os::free(cur, mtChunk);
317 317 cur = next;
318 318 }
319 319 }
320 320
321 321 // Accessors to preallocated pool's
322 322 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
323 323 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
324 324 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
325 325 static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
326 326
327 327 static void initialize() {
328 328 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
329 329 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
330 330 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
331 331 _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
332 332 }
333 333
334 334 static void clean() {
335 335 enum { BlocksToKeep = 5 };
336 336 _tiny_pool->free_all_but(BlocksToKeep);
337 337 _small_pool->free_all_but(BlocksToKeep);
338 338 _medium_pool->free_all_but(BlocksToKeep);
339 339 _large_pool->free_all_but(BlocksToKeep);
340 340 }
341 341 };
342 342
343 343 ChunkPool* ChunkPool::_large_pool = NULL;
344 344 ChunkPool* ChunkPool::_medium_pool = NULL;
345 345 ChunkPool* ChunkPool::_small_pool = NULL;
346 346 ChunkPool* ChunkPool::_tiny_pool = NULL;
347 347
348 348 void chunkpool_init() {
349 349 ChunkPool::initialize();
350 350 }
351 351
352 352 void
353 353 Chunk::clean_chunk_pool() {
354 354 ChunkPool::clean();
355 355 }
356 356
357 357
358 358 //--------------------------------------------------------------------------------------
359 359 // ChunkPoolCleaner implementation
360 360 //
361 361
362 362 class ChunkPoolCleaner : public PeriodicTask {
363 363 enum { CleaningInterval = 5000 }; // cleaning interval in ms
364 364
365 365 public:
366 366 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
367 367 void task() {
368 368 ChunkPool::clean();
369 369 }
370 370 };
371 371
372 372 //--------------------------------------------------------------------------------------
373 373 // Chunk implementation
374 374
375 375 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
376 376 // requested_size is equal to sizeof(Chunk) but in order for the arena
377 377 // allocations to come out aligned as expected the size must be aligned
378 378 // to expected arena alignment.
379 379 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
380 380 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
381 381 size_t bytes = ARENA_ALIGN(requested_size) + length;
382 382 switch (length) {
383 383 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
384 384 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
385 385 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
386 386 case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
387 387 default: {
388 388 void* p = os::malloc(bytes, mtChunk, CALLER_PC);
389 389 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
390 390 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
391 391 }
392 392 return p;
393 393 }
394 394 }
395 395 }
396 396
397 397 void Chunk::operator delete(void* p) {
398 398 Chunk* c = (Chunk*)p;
399 399 switch (c->length()) {
400 400 case Chunk::size: ChunkPool::large_pool()->free(c); break;
401 401 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
402 402 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
403 403 case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
404 404 default: os::free(c, mtChunk);
405 405 }
406 406 }
407 407
408 408 Chunk::Chunk(size_t length) : _len(length) {
409 409 _next = NULL; // Chain on the linked list
410 410 }
411 411
412 412
413 413 void Chunk::chop() {
414 414 Chunk *k = this;
415 415 while( k ) {
416 416 Chunk *tmp = k->next();
417 417 // clear out this chunk (to detect allocation bugs)
418 418 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
419 419 delete k; // Free chunk (was malloc'd)
420 420 k = tmp;
421 421 }
422 422 }
423 423
424 424 void Chunk::next_chop() {
425 425 _next->chop();
426 426 _next = NULL;
427 427 }
428 428
429 429
430 430 void Chunk::start_chunk_pool_cleaner_task() {
431 431 #ifdef ASSERT
432 432 static bool task_created = false;
433 433 assert(!task_created, "should not start chuck pool cleaner twice");
434 434 task_created = true;
435 435 #endif
436 436 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
437 437 cleaner->enroll();
438 438 }
439 439
440 440 //------------------------------Arena------------------------------------------
441 441 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
442 442
443 443 Arena::Arena(size_t init_size) {
444 444 size_t round_size = (sizeof (char *)) - 1;
445 445 init_size = (init_size+round_size) & ~round_size;
446 446 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
447 447 _hwm = _chunk->bottom(); // Save the cached hwm, max
448 448 _max = _chunk->top();
449 449 _size_in_bytes = 0;
450 450 set_size_in_bytes(init_size);
451 451 NOT_PRODUCT(Atomic::inc(&_instance_count);)
452 452 }
453 453
454 454 Arena::Arena() {
455 455 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
456 456 _hwm = _chunk->bottom(); // Save the cached hwm, max
457 457 _max = _chunk->top();
458 458 _size_in_bytes = 0;
459 459 set_size_in_bytes(Chunk::init_size);
460 460 NOT_PRODUCT(Atomic::inc(&_instance_count);)
461 461 }
462 462
463 463 Arena *Arena::move_contents(Arena *copy) {
464 464 copy->destruct_contents();
465 465 copy->_chunk = _chunk;
466 466 copy->_hwm = _hwm;
467 467 copy->_max = _max;
468 468 copy->_first = _first;
469 469
470 470 // workaround rare racing condition, which could double count
471 471 // the arena size by native memory tracking
472 472 size_t size = size_in_bytes();
473 473 set_size_in_bytes(0);
474 474 copy->set_size_in_bytes(size);
475 475 // Destroy original arena
476 476 reset();
477 477 return copy; // Return Arena with contents
478 478 }
479 479
480 480 Arena::~Arena() {
481 481 destruct_contents();
482 482 NOT_PRODUCT(Atomic::dec(&_instance_count);)
483 483 }
484 484
485 485 void* Arena::operator new(size_t size) throw() {
486 486 assert(false, "Use dynamic memory type binding");
487 487 return NULL;
488 488 }
489 489
490 490 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
491 491 assert(false, "Use dynamic memory type binding");
492 492 return NULL;
493 493 }
494 494
495 495 // dynamic memory type binding
496 496 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
497 497 #ifdef ASSERT
498 498 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
499 499 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
500 500 return p;
501 501 #else
502 502 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
503 503 #endif
504 504 }
505 505
506 506 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
507 507 #ifdef ASSERT
508 508 void* p = os::malloc(size, flags|otArena, CALLER_PC);
509 509 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
510 510 return p;
511 511 #else
512 512 return os::malloc(size, flags|otArena, CALLER_PC);
513 513 #endif
514 514 }
515 515
516 516 void Arena::operator delete(void* p) {
517 517 FreeHeap(p);
518 518 }
519 519
520 520 // Destroy this arenas contents and reset to empty
521 521 void Arena::destruct_contents() {
522 522 if (UseMallocOnly && _first != NULL) {
523 523 char* end = _first->next() ? _first->top() : _hwm;
524 524 free_malloced_objects(_first, _first->bottom(), end, _hwm);
525 525 }
526 526 // reset size before chop to avoid a rare racing condition
527 527 // that can have total arena memory exceed total chunk memory
528 528 set_size_in_bytes(0);
529 529 _first->chop();
530 530 reset();
531 531 }
532 532
533 533 // This is high traffic method, but many calls actually don't
534 534 // change the size
535 535 void Arena::set_size_in_bytes(size_t size) {
536 536 if (_size_in_bytes != size) {
537 537 _size_in_bytes = size;
538 538 MemTracker::record_arena_size((address)this, size);
539 539 }
540 540 }
541 541
542 542 // Total of all Chunks in arena
543 543 size_t Arena::used() const {
544 544 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
545 545 register Chunk *k = _first;
546 546 while( k != _chunk) { // Whilst have Chunks in a row
547 547 sum += k->length(); // Total size of this Chunk
548 548 k = k->next(); // Bump along to next Chunk
549 549 }
550 550 return sum; // Return total consumed space.
551 551 }
552 552
553 553 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
554 554 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
555 555 }
↓ open down ↓ |
555 lines elided |
↑ open up ↑ |
556 556
557 557 // Grow a new Chunk
558 558 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
559 559 // Get minimal required size. Either real big, or even bigger for giant objs
560 560 size_t len = MAX2(x, (size_t) Chunk::size);
561 561
562 562 Chunk *k = _chunk; // Get filled-up chunk address
563 563 _chunk = new (alloc_failmode, len) Chunk(len);
564 564
565 565 if (_chunk == NULL) {
566 + _chunk = k; // restore the value of _chunk
566 567 return NULL;
567 568 }
568 569 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
569 570 else _first = _chunk;
570 571 _hwm = _chunk->bottom(); // Save the cached hwm, max
571 572 _max = _chunk->top();
572 573 set_size_in_bytes(size_in_bytes() + len);
573 574 void* result = _hwm;
574 575 _hwm += x;
575 576 return result;
576 577 }
577 578
578 579
579 580
580 581 // Reallocate storage in Arena.
581 582 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
582 583 assert(new_size >= 0, "bad size");
583 584 if (new_size == 0) return NULL;
584 585 #ifdef ASSERT
585 586 if (UseMallocOnly) {
586 587 // always allocate a new object (otherwise we'll free this one twice)
587 588 char* copy = (char*)Amalloc(new_size, alloc_failmode);
588 589 if (copy == NULL) {
589 590 return NULL;
590 591 }
591 592 size_t n = MIN2(old_size, new_size);
592 593 if (n > 0) memcpy(copy, old_ptr, n);
593 594 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
594 595 return copy;
595 596 }
596 597 #endif
597 598 char *c_old = (char*)old_ptr; // Handy name
598 599 // Stupid fast special case
599 600 if( new_size <= old_size ) { // Shrink in-place
600 601 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
601 602 _hwm = c_old+new_size; // Adjust hwm
602 603 return c_old;
603 604 }
604 605
605 606 // make sure that new_size is legal
606 607 size_t corrected_new_size = ARENA_ALIGN(new_size);
607 608
608 609 // See if we can resize in-place
609 610 if( (c_old+old_size == _hwm) && // Adjusting recent thing
610 611 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
611 612 _hwm = c_old+corrected_new_size; // Adjust hwm
612 613 return c_old; // Return old pointer
613 614 }
614 615
615 616 // Oops, got to relocate guts
616 617 void *new_ptr = Amalloc(new_size, alloc_failmode);
617 618 if (new_ptr == NULL) {
618 619 return NULL;
619 620 }
620 621 memcpy( new_ptr, c_old, old_size );
621 622 Afree(c_old,old_size); // Mostly done to keep stats accurate
622 623 return new_ptr;
623 624 }
624 625
625 626
626 627 // Determine if pointer belongs to this Arena or not.
627 628 bool Arena::contains( const void *ptr ) const {
628 629 #ifdef ASSERT
629 630 if (UseMallocOnly) {
630 631 // really slow, but not easy to make fast
631 632 if (_chunk == NULL) return false;
632 633 char** bottom = (char**)_chunk->bottom();
633 634 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
634 635 if (*p == ptr) return true;
635 636 }
636 637 for (Chunk *c = _first; c != NULL; c = c->next()) {
637 638 if (c == _chunk) continue; // current chunk has been processed
638 639 char** bottom = (char**)c->bottom();
639 640 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
640 641 if (*p == ptr) return true;
641 642 }
642 643 }
643 644 return false;
644 645 }
645 646 #endif
646 647 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
647 648 return true; // Check for in this chunk
648 649 for (Chunk *c = _first; c; c = c->next()) {
649 650 if (c == _chunk) continue; // current chunk has been processed
650 651 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
651 652 return true; // Check for every chunk in Arena
652 653 }
653 654 }
654 655 return false; // Not in any Chunk, so not in Arena
655 656 }
656 657
657 658
658 659 #ifdef ASSERT
659 660 void* Arena::malloc(size_t size) {
660 661 assert(UseMallocOnly, "shouldn't call");
661 662 // use malloc, but save pointer in res. area for later freeing
662 663 char** save = (char**)internal_malloc_4(sizeof(char*));
663 664 return (*save = (char*)os::malloc(size, mtChunk));
664 665 }
665 666
666 667 // for debugging with UseMallocOnly
667 668 void* Arena::internal_malloc_4(size_t x) {
668 669 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
669 670 check_for_overflow(x, "Arena::internal_malloc_4");
670 671 if (_hwm + x > _max) {
671 672 return grow(x);
672 673 } else {
673 674 char *old = _hwm;
674 675 _hwm += x;
675 676 return old;
676 677 }
677 678 }
678 679 #endif
679 680
680 681
681 682 //--------------------------------------------------------------------------------------
682 683 // Non-product code
683 684
684 685 #ifndef PRODUCT
685 686 // The global operator new should never be called since it will usually indicate
686 687 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
687 688 // that they're allocated on the C heap.
688 689 // Commented out in product version to avoid conflicts with third-party C++ native code.
689 690 //
690 691 // In C++98/03 the throwing new operators are defined with the following signature:
691 692 //
692 693 // void* operator new(std::size_tsize) throw(std::bad_alloc);
693 694 // void* operator new[](std::size_tsize) throw(std::bad_alloc);
694 695 //
695 696 // while all the other (non-throwing) new and delete operators are defined with an empty
696 697 // throw clause (i.e. "operator delete(void* p) throw()") which means that they do not
697 698 // throw any exceptions (see section 18.4 of the C++ standard).
698 699 //
699 700 // In the new C++11/14 standard, the signature of the throwing new operators was changed
700 701 // by completely omitting the throw clause (which effectively means they could throw any
701 702 // exception) while all the other new/delete operators where changed to have a 'nothrow'
702 703 // clause instead of an empty throw clause.
703 704 //
704 705 // Unfortunately, the support for exception specifications among C++ compilers is still
705 706 // very fragile. While some more strict compilers like AIX xlC or HP aCC reject to
706 707 // override the default throwing new operator with a user operator with an empty throw()
707 708 // clause, the MS Visual C++ compiler warns for every non-empty throw clause like
708 709 // throw(std::bad_alloc) that it will ignore the exception specification. The following
709 710 // operator definitions have been checked to correctly work with all currently supported
710 711 // compilers and they should be upwards compatible with C++11/14. Therefore
711 712 // PLEASE BE CAREFUL if you change the signature of the following operators!
712 713
713 714 void* operator new(size_t size) /* throw(std::bad_alloc) */ {
714 715 fatal("Should not call global operator new");
715 716 return 0;
716 717 }
717 718
718 719 void* operator new [](size_t size) /* throw(std::bad_alloc) */ {
719 720 fatal("Should not call global operator new[]");
720 721 return 0;
721 722 }
722 723
723 724 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
724 725 fatal("Should not call global operator new");
725 726 return 0;
726 727 }
727 728
728 729 void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() {
729 730 fatal("Should not call global operator new[]");
730 731 return 0;
731 732 }
732 733
733 734 void operator delete(void* p) throw() {
734 735 fatal("Should not call global delete");
735 736 }
736 737
737 738 void operator delete [](void* p) throw() {
738 739 fatal("Should not call global delete []");
739 740 }
740 741
741 742 void AllocatedObj::print() const { print_on(tty); }
742 743 void AllocatedObj::print_value() const { print_value_on(tty); }
743 744
744 745 void AllocatedObj::print_on(outputStream* st) const {
745 746 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
746 747 }
747 748
748 749 void AllocatedObj::print_value_on(outputStream* st) const {
749 750 st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
750 751 }
751 752
752 753 julong Arena::_bytes_allocated = 0;
753 754
754 755 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
755 756
756 757 AllocStats::AllocStats() {
757 758 start_mallocs = os::num_mallocs;
758 759 start_frees = os::num_frees;
759 760 start_malloc_bytes = os::alloc_bytes;
760 761 start_mfree_bytes = os::free_bytes;
761 762 start_res_bytes = Arena::_bytes_allocated;
762 763 }
763 764
764 765 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
765 766 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
766 767 julong AllocStats::num_frees() { return os::num_frees - start_frees; }
767 768 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; }
768 769 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
769 770 void AllocStats::print() {
770 771 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
771 772 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
772 773 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
773 774 }
774 775
775 776
776 777 // debugging code
777 778 inline void Arena::free_all(char** start, char** end) {
778 779 for (char** p = start; p < end; p++) if (*p) os::free(*p);
779 780 }
780 781
781 782 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
782 783 assert(UseMallocOnly, "should not call");
783 784 // free all objects malloced since resource mark was created; resource area
784 785 // contains their addresses
785 786 if (chunk->next()) {
786 787 // this chunk is full, and some others too
787 788 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
788 789 char* top = c->top();
789 790 if (c->next() == NULL) {
790 791 top = hwm2; // last junk is only used up to hwm2
791 792 assert(c->contains(hwm2), "bad hwm2");
792 793 }
793 794 free_all((char**)c->bottom(), (char**)top);
794 795 }
795 796 assert(chunk->contains(hwm), "bad hwm");
796 797 assert(chunk->contains(max), "bad max");
797 798 free_all((char**)hwm, (char**)max);
798 799 } else {
799 800 // this chunk was partially used
800 801 assert(chunk->contains(hwm), "bad hwm");
801 802 assert(chunk->contains(hwm2), "bad hwm2");
802 803 free_all((char**)hwm, (char**)hwm2);
803 804 }
804 805 }
805 806
806 807
807 808 ReallocMark::ReallocMark() {
808 809 #ifdef ASSERT
809 810 Thread *thread = ThreadLocalStorage::get_thread_slow();
810 811 _nesting = thread->resource_area()->nesting();
811 812 #endif
812 813 }
813 814
814 815 void ReallocMark::check() {
815 816 #ifdef ASSERT
816 817 if (_nesting != Thread::current()->resource_area()->nesting()) {
817 818 fatal("allocation bug: array could grow within nested ResourceMark");
818 819 }
819 820 #endif
820 821 }
821 822
822 823 #endif // Non-product
↓ open down ↓ |
247 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX