270 _num_chunks++;
271 }
272
273 // Prune the pool
274 void free_all_but(size_t n) {
275 Chunk* cur = NULL;
276 Chunk* next;
277 {
278 // if we have more than n chunks, free all of them
279 ThreadCritical tc;
280 if (_num_chunks > n) {
281 // free chunks at end of queue, for better locality
282 cur = _first;
283 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
284
285 if (cur != NULL) {
286 next = cur->next();
287 cur->set_next(NULL);
288 cur = next;
289
290 _num_chunks = n;
291 }
292 }
293 }
294
295 // Free all remaining chunks, outside of ThreadCritical
296 // to avoid deadlock with NMT
297 while(cur != NULL) {
298 next = cur->next();
299 os::free(cur);
300 cur = next;
301 }
302 }
303
304 // Accessors to preallocated pool's
305 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
306 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
307 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
308 static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
309
310 static void initialize() {
311 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
312 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
313 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
314 _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
315 }
316
317 static void clean() {
318 enum { BlocksToKeep = 5 };
319 _tiny_pool->free_all_but(BlocksToKeep);
320 _small_pool->free_all_but(BlocksToKeep);
321 _medium_pool->free_all_but(BlocksToKeep);
322 _large_pool->free_all_but(BlocksToKeep);
367 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
368 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
369 case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
370 default: {
371 void* p = os::malloc(bytes, mtChunk, CALLER_PC);
372 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
373 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
374 }
375 return p;
376 }
377 }
378 }
379
380 void Chunk::operator delete(void* p) {
381 Chunk* c = (Chunk*)p;
382 switch (c->length()) {
383 case Chunk::size: ChunkPool::large_pool()->free(c); break;
384 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
385 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
386 case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
387 default: os::free(c);
388 }
389 }
390
391 Chunk::Chunk(size_t length) : _len(length) {
392 _next = NULL; // Chain on the linked list
393 }
394
395
396 void Chunk::chop() {
397 Chunk *k = this;
398 while( k ) {
399 Chunk *tmp = k->next();
400 // clear out this chunk (to detect allocation bugs)
401 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
402 delete k; // Free chunk (was malloc'd)
403 k = tmp;
404 }
405 }
406
407 void Chunk::next_chop() {
|
270 _num_chunks++;
271 }
272
273 // Prune the pool
274 void free_all_but(size_t n) {
275 Chunk* cur = NULL;
276 Chunk* next;
277 {
278 // if we have more than n chunks, free all of them
279 ThreadCritical tc;
280 if (_num_chunks > n) {
281 // free chunks at end of queue, for better locality
282 cur = _first;
283 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
284
285 if (cur != NULL) {
286 next = cur->next();
287 cur->set_next(NULL);
288 cur = next;
289
290 // Free all remaining chunks while in ThreadCritical lock
291 // so NMT adjustment is stable.
292 while(cur != NULL) {
293 next = cur->next();
294 os::free(cur);
295 _num_chunks--;
296 cur = next;
297 }
298 }
299 }
300 }
301 }
302
303 // Accessors to preallocated pool's
304 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
305 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
306 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
307 static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
308
309 static void initialize() {
310 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
311 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
312 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
313 _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
314 }
315
316 static void clean() {
317 enum { BlocksToKeep = 5 };
318 _tiny_pool->free_all_but(BlocksToKeep);
319 _small_pool->free_all_but(BlocksToKeep);
320 _medium_pool->free_all_but(BlocksToKeep);
321 _large_pool->free_all_but(BlocksToKeep);
366 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
367 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
368 case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
369 default: {
370 void* p = os::malloc(bytes, mtChunk, CALLER_PC);
371 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
372 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
373 }
374 return p;
375 }
376 }
377 }
378
379 void Chunk::operator delete(void* p) {
380 Chunk* c = (Chunk*)p;
381 switch (c->length()) {
382 case Chunk::size: ChunkPool::large_pool()->free(c); break;
383 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
384 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
385 case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
386 default:
387 ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
388 os::free(c);
389 }
390 }
391
392 Chunk::Chunk(size_t length) : _len(length) {
393 _next = NULL; // Chain on the linked list
394 }
395
396
397 void Chunk::chop() {
398 Chunk *k = this;
399 while( k ) {
400 Chunk *tmp = k->next();
401 // clear out this chunk (to detect allocation bugs)
402 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
403 delete k; // Free chunk (was malloc'd)
404 k = tmp;
405 }
406 }
407
408 void Chunk::next_chop() {
|