src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/metaspace.cpp

Print this page




2406   if (TraceMetadataHumongousAllocation && next != NULL &&
2407       SpaceManager::is_humongous(next->word_size())) {
2408     gclog_or_tty->print_cr("  new humongous chunk word size "
2409                            PTR_FORMAT, next->word_size());
2410   }
2411 
2412   return next;
2413 }
2414 
2415 /*
2416  * The policy is to allocate up to _small_chunk_limit small chunks
2417  * after which only medium chunks are allocated.  This is done to
2418  * reduce fragmentation.  In some cases, this can result in a lot
2419  * of small chunks being allocated to the point where it's not
2420  * possible to expand.  If this happens, there may be no medium chunks
2421  * available and OOME would be thrown.  Instead of doing that,
2422  * if the allocation request size fits in a small chunk, an attempt
2423  * will be made to allocate a small chunk.
2424  */
2425 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2426   if (word_size + Metachunk::overhead() > small_chunk_size()) {


2427     return NULL;
2428   }
2429 
2430   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2431   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2432 
2433   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2434 
2435   MetaWord* mem = NULL;
2436 
2437   if (chunk != NULL) {
2438     // Add chunk to the in-use chunk list and do an allocation from it.
2439     // Add to this manager's list of chunks in use.
2440     add_chunk(chunk, false);
2441     mem = chunk->allocate(word_size);
2442 
2443     inc_used_metrics(word_size);
2444 
2445     // Track metaspace memory usage statistic.
2446     track_metaspace_memory_usage();
2447   }
2448 
2449   return mem;
2450 }
2451 
2452 MetaWord* SpaceManager::allocate(size_t word_size) {
2453   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2454 
2455   size_t raw_word_size = get_raw_word_size(word_size);
2456   BlockFreelist* fl =  block_freelists();
2457   MetaWord* p = NULL;
2458   // Allocation from the dictionary is expensive in the sense that
2459   // the dictionary has to be searched for a size.  Don't allocate
2460   // from the dictionary until it starts to get fat.  Is this
2461   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2462   // for allocations.  Do some profiling.  JJJ
2463   if (fl->total_size() > allocation_from_dictionary_limit) {




2406   if (TraceMetadataHumongousAllocation && next != NULL &&
2407       SpaceManager::is_humongous(next->word_size())) {
2408     gclog_or_tty->print_cr("  new humongous chunk word size "
2409                            PTR_FORMAT, next->word_size());
2410   }
2411 
2412   return next;
2413 }
2414 
2415 /*
2416  * The policy is to allocate up to _small_chunk_limit small chunks
2417  * after which only medium chunks are allocated.  This is done to
2418  * reduce fragmentation.  In some cases, this can result in a lot
2419  * of small chunks being allocated to the point where it's not
2420  * possible to expand.  If this happens, there may be no medium chunks
2421  * available and OOME would be thrown.  Instead of doing that,
2422  * if the allocation request size fits in a small chunk, an attempt
2423  * will be made to allocate a small chunk.
2424  */
2425 MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
2426   size_t raw_word_size = get_raw_word_size(word_size);
2427 
2428   if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
2429     return NULL;
2430   }
2431 
2432   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2433   MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
2434 
2435   Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
2436 
2437   MetaWord* mem = NULL;
2438 
2439   if (chunk != NULL) {
2440     // Add chunk to the in-use chunk list and do an allocation from it.
2441     // Add to this manager's list of chunks in use.
2442     add_chunk(chunk, false);
2443     mem = chunk->allocate(raw_word_size);
2444 
2445     inc_used_metrics(raw_word_size);
2446 
2447     // Track metaspace memory usage statistic.
2448     track_metaspace_memory_usage();
2449   }
2450 
2451   return mem;
2452 }
2453 
2454 MetaWord* SpaceManager::allocate(size_t word_size) {
2455   MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
2456 
2457   size_t raw_word_size = get_raw_word_size(word_size);
2458   BlockFreelist* fl =  block_freelists();
2459   MetaWord* p = NULL;
2460   // Allocation from the dictionary is expensive in the sense that
2461   // the dictionary has to be searched for a size.  Don't allocate
2462   // from the dictionary until it starts to get fat.  Is this
2463   // a reasonable policy?  Maybe an skinny dictionary is fast enough
2464   // for allocations.  Do some profiling.  JJJ
2465   if (fl->total_size() > allocation_from_dictionary_limit) {


src/share/vm/memory/metaspace.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File