< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page
rev 13180 : imported patch 8181917-refactor-ul-logstream


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"

  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "runtime/orderAccess.inline.hpp"
  47 #include "services/memTracker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "utilities/copy.hpp"


 997 
 998 size_t VirtualSpaceNode::free_words_in_vs() const {
 999   return pointer_delta(end(), top(), sizeof(MetaWord));
1000 }
1001 
1002 // Allocates the chunk from the virtual space only.
1003 // This interface is also used internally for debugging.  Not all
1004 // chunks removed here are necessarily used for allocation.
1005 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1006   // Bottom of the new chunk
1007   MetaWord* chunk_limit = top();
1008   assert(chunk_limit != NULL, "Not safe to call this method");
1009 
1010   // The virtual spaces are always expanded by the
1011   // commit granularity to enforce the following condition.
1012   // Without this the is_available check will not work correctly.
1013   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1014       "The committed memory doesn't match the expanded memory.");
1015 
1016   if (!is_available(chunk_word_size)) {
1017     Log(gc, metaspace, freelist) log;
1018     log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);


1019     // Dump some information about the virtual space that is nearly full
1020     ResourceMark rm;
1021     print_on(log.debug_stream());
1022     return NULL;
1023   }
1024 
1025   // Take the space  (bump top on the current virtual space).
1026   inc_top(chunk_word_size);
1027 
1028   // Initialize the chunk
1029   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1030   return result;
1031 }
1032 
1033 
1034 // Expand the virtual space (commit more of the reserved space)
1035 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1036   size_t min_bytes = min_words * BytesPerWord;
1037   size_t preferred_bytes = preferred_words * BytesPerWord;
1038 
1039   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1040 
1041   if (uncommitted < min_bytes) {


1336     // ensure lock-free iteration sees fully initialized node
1337     OrderAccess::storestore();
1338     link_vs(new_entry);
1339     return true;
1340   }
1341 }
1342 
1343 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1344   if (virtual_space_list() == NULL) {
1345       set_virtual_space_list(new_entry);
1346   } else {
1347     current_virtual_space()->set_next(new_entry);
1348   }
1349   set_current_virtual_space(new_entry);
1350   inc_reserved_words(new_entry->reserved_words());
1351   inc_committed_words(new_entry->committed_words());
1352   inc_virtual_space_count();
1353 #ifdef ASSERT
1354   new_entry->mangle();
1355 #endif
1356   if (log_is_enabled(Trace, gc, metaspace)) {
1357     Log(gc, metaspace) log;

1358     VirtualSpaceNode* vsl = current_virtual_space();
1359     ResourceMark rm;
1360     vsl->print_on(log.trace_stream());
1361   }
1362 }
1363 
1364 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1365                                       size_t min_words,
1366                                       size_t preferred_words) {
1367   size_t before = node->committed_words();
1368 
1369   bool result = node->expand_by(min_words, preferred_words);
1370 
1371   size_t after = node->committed_words();
1372 
1373   // after and before can be the same if the memory was pre-committed.
1374   assert(after >= before, "Inconsistency");
1375   inc_committed_words(after - before);
1376 
1377   return result;
1378 }
1379 
1380 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1942 #endif
1943   chunk->container()->inc_container_count();
1944 
1945   slow_locked_verify();
1946   return chunk;
1947 }
1948 
1949 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1950   assert_lock_strong(SpaceManager::expand_lock());
1951   slow_locked_verify();
1952 
1953   // Take from the beginning of the list
1954   Metachunk* chunk = free_chunks_get(word_size);
1955   if (chunk == NULL) {
1956     return NULL;
1957   }
1958 
1959   assert((word_size <= chunk->word_size()) ||
1960          (list_index(chunk->word_size()) == HumongousIndex),
1961          "Non-humongous variable sized chunk");
1962   Log(gc, metaspace, freelist) log;
1963   if (log.is_debug()) {
1964     size_t list_count;
1965     if (list_index(word_size) < HumongousIndex) {
1966       ChunkList* list = find_free_chunks_list(word_size);
1967       list_count = list->count();
1968     } else {
1969       list_count = humongous_dictionary()->total_count();
1970     }
1971     log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",

1972                p2i(this), p2i(chunk), chunk->word_size(), list_count);
1973     ResourceMark rm;
1974     locked_print_free_chunks(log.debug_stream());
1975   }
1976 
1977   return chunk;
1978 }
1979 
1980 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1981   assert_lock_strong(SpaceManager::expand_lock());
1982   assert(chunk != NULL, "Expected chunk.");
1983   assert(chunk->container() != NULL, "Container should have been set.");
1984   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1985   index_bounds_check(index);
1986 
1987   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1988   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1989   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1990   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1991 
1992   if (index != HumongousIndex) {
1993     // Return non-humongous chunk to freelist.
1994     ChunkList* list = free_chunks(index);


2381 }
2382 
2383 SpaceManager::~SpaceManager() {
2384   // This call this->_lock which can't be done while holding expand_lock()
2385   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2386          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2387          " allocated_chunks_words() " SIZE_FORMAT,
2388          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2389 
2390   MutexLockerEx fcl(SpaceManager::expand_lock(),
2391                     Mutex::_no_safepoint_check_flag);
2392 
2393   chunk_manager()->slow_locked_verify();
2394 
2395   dec_total_from_size_metrics();
2396 
2397   Log(gc, metaspace, freelist) log;
2398   if (log.is_trace()) {
2399     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2400     ResourceMark rm;
2401     locked_print_chunks_in_use_on(log.trace_stream());

2402     if (block_freelists() != NULL) {
2403     block_freelists()->print_on(log.trace_stream());
2404   }
2405   }
2406 
2407   // Add all the chunks in use by this space manager
2408   // to the global list of free chunks.
2409 
2410   // Follow each list of chunks-in-use and add them to the
2411   // free lists.  Each list is NULL terminated.
2412 
2413   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2414     Metachunk* chunks = chunks_in_use(i);
2415     chunk_manager()->return_chunk_list(i, chunks);
2416     set_chunks_in_use(i, NULL);
2417   }
2418 
2419   chunk_manager()->slow_locked_verify();
2420 
2421   if (_block_freelists != NULL) {
2422     delete _block_freelists;
2423   }


2460       set_current_chunk(new_chunk);
2461     }
2462     // Link at head.  The _current_chunk only points to a humongous chunk for
2463     // the null class loader metaspace (class and data virtual space managers)
2464     // any humongous chunks so will not point to the tail
2465     // of the humongous chunks list.
2466     new_chunk->set_next(chunks_in_use(HumongousIndex));
2467     set_chunks_in_use(HumongousIndex, new_chunk);
2468 
2469     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2470   }
2471 
2472   // Add to the running sum of capacity
2473   inc_size_metrics(new_chunk->word_size());
2474 
2475   assert(new_chunk->is_empty(), "Not ready for reuse");
2476   Log(gc, metaspace, freelist) log;
2477   if (log.is_trace()) {
2478     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2479     ResourceMark rm;
2480     outputStream* out = log.trace_stream();
2481     new_chunk->print_on(out);
2482     chunk_manager()->locked_print_free_chunks(out);
2483   }
2484 }
2485 
2486 void SpaceManager::retire_current_chunk() {
2487   if (current_chunk() != NULL) {
2488     size_t remaining_words = current_chunk()->free_word_size();
2489     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2490       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2491       deallocate(ptr, remaining_words);
2492       inc_used_metrics(remaining_words);
2493     }
2494   }
2495 }
2496 
2497 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2498   // Get a chunk from the chunk freelist
2499   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2500 
2501   if (next == NULL) {
2502     next = vs_list()->get_new_chunk(chunk_word_size,


3192                                               compressed_class_space_size()));
3193       }
3194     }
3195   }
3196 
3197   // If we got here then the metaspace got allocated.
3198   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3199 
3200 #if INCLUDE_CDS
3201   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3202   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3203     FileMapInfo::stop_sharing_and_unmap(
3204         "Could not allocate metaspace at a compatible address");
3205   }
3206 #endif
3207   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3208                                   UseSharedSpaces ? (address)cds_base : 0);
3209 
3210   initialize_class_space(metaspace_rs);
3211 
3212   if (log_is_enabled(Trace, gc, metaspace)) {
3213     Log(gc, metaspace) log;
3214     ResourceMark rm;
3215     print_compressed_class_space(log.trace_stream(), requested_addr);

3216   }
3217 }
3218 
3219 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3220   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3221                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3222   if (_class_space_list != NULL) {
3223     address base = (address)_class_space_list->current_virtual_space()->bottom();
3224     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3225                  compressed_class_space_size(), p2i(base));
3226     if (requested_addr != 0) {
3227       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3228     }
3229     st->cr();
3230   }
3231 }
3232 
3233 // For UseCompressedClassPointers the class space is reserved above the top of
3234 // the Java heap.  The argument passed in is at the base of the compressed space.
3235 void Metaspace::initialize_class_space(ReservedSpace rs) {


3649   // Zero initialize.
3650   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3651 
3652   return result;
3653 }
3654 
3655 size_t Metaspace::class_chunk_size(size_t word_size) {
3656   assert(using_class_space(), "Has to use class space");
3657   return class_vsm()->calc_chunk_size(word_size);
3658 }
3659 
3660 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3661   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3662 
3663   // If result is still null, we are out of memory.
3664   Log(gc, metaspace, freelist) log;
3665   if (log.is_info()) {
3666     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3667              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3668     ResourceMark rm;
3669     outputStream* out = log.info_stream();
3670     if (loader_data->metaspace_or_null() != NULL) {
3671       loader_data->dump(out);
3672     }
3673     MetaspaceAux::dump(out);
3674   }
3675 
3676   bool out_of_compressed_class_space = false;
3677   if (is_class_space_allocation(mdtype)) {
3678     Metaspace* metaspace = loader_data->metaspace_non_null();
3679     out_of_compressed_class_space =
3680       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3681       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3682       CompressedClassSpaceSize;
3683   }
3684 
3685   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3686   const char* space_string = out_of_compressed_class_space ?
3687     "Compressed class space" : "Metaspace";
3688 
3689   report_java_out_of_memory(space_string);
3690 
3691   if (JvmtiExport::should_post_resource_exhausted()) {
3692     JvmtiExport::post_resource_exhausted(
3693         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/copy.hpp"


 998 
 999 size_t VirtualSpaceNode::free_words_in_vs() const {
1000   return pointer_delta(end(), top(), sizeof(MetaWord));
1001 }
1002 
1003 // Allocates the chunk from the virtual space only.
1004 // This interface is also used internally for debugging.  Not all
1005 // chunks removed here are necessarily used for allocation.
1006 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1007   // Bottom of the new chunk
1008   MetaWord* chunk_limit = top();
1009   assert(chunk_limit != NULL, "Not safe to call this method");
1010 
1011   // The virtual spaces are always expanded by the
1012   // commit granularity to enforce the following condition.
1013   // Without this the is_available check will not work correctly.
1014   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1015       "The committed memory doesn't match the expanded memory.");
1016 
1017   if (!is_available(chunk_word_size)) {
1018     LogTarget(Debug, gc, metaspace, freelist) lt;
1019     if (lt.is_enabled()) {
1020       LogStream ls(lt);
1021       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1022       // Dump some information about the virtual space that is nearly full
1023       print_on(&ls);
1024     }
1025     return NULL;
1026   }
1027 
1028   // Take the space  (bump top on the current virtual space).
1029   inc_top(chunk_word_size);
1030 
1031   // Initialize the chunk
1032   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1033   return result;
1034 }
1035 
1036 
1037 // Expand the virtual space (commit more of the reserved space)
1038 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1039   size_t min_bytes = min_words * BytesPerWord;
1040   size_t preferred_bytes = preferred_words * BytesPerWord;
1041 
1042   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1043 
1044   if (uncommitted < min_bytes) {


1339     // ensure lock-free iteration sees fully initialized node
1340     OrderAccess::storestore();
1341     link_vs(new_entry);
1342     return true;
1343   }
1344 }
1345 
1346 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1347   if (virtual_space_list() == NULL) {
1348       set_virtual_space_list(new_entry);
1349   } else {
1350     current_virtual_space()->set_next(new_entry);
1351   }
1352   set_current_virtual_space(new_entry);
1353   inc_reserved_words(new_entry->reserved_words());
1354   inc_committed_words(new_entry->committed_words());
1355   inc_virtual_space_count();
1356 #ifdef ASSERT
1357   new_entry->mangle();
1358 #endif
1359   LogTarget(Trace, gc, metaspace) lt;
1360   if (lt.is_enabled()) {
1361     LogStream ls(lt);
1362     VirtualSpaceNode* vsl = current_virtual_space();
1363     ResourceMark rm;
1364     vsl->print_on(&ls);
1365   }
1366 }
1367 
1368 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1369                                       size_t min_words,
1370                                       size_t preferred_words) {
1371   size_t before = node->committed_words();
1372 
1373   bool result = node->expand_by(min_words, preferred_words);
1374 
1375   size_t after = node->committed_words();
1376 
1377   // after and before can be the same if the memory was pre-committed.
1378   assert(after >= before, "Inconsistency");
1379   inc_committed_words(after - before);
1380 
1381   return result;
1382 }
1383 
1384 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1946 #endif
1947   chunk->container()->inc_container_count();
1948 
1949   slow_locked_verify();
1950   return chunk;
1951 }
1952 
1953 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1954   assert_lock_strong(SpaceManager::expand_lock());
1955   slow_locked_verify();
1956 
1957   // Take from the beginning of the list
1958   Metachunk* chunk = free_chunks_get(word_size);
1959   if (chunk == NULL) {
1960     return NULL;
1961   }
1962 
1963   assert((word_size <= chunk->word_size()) ||
1964          (list_index(chunk->word_size()) == HumongousIndex),
1965          "Non-humongous variable sized chunk");
1966   LogTarget(Debug, gc, metaspace, freelist) lt;
1967   if (lt.is_enabled()) {
1968     size_t list_count;
1969     if (list_index(word_size) < HumongousIndex) {
1970       ChunkList* list = find_free_chunks_list(word_size);
1971       list_count = list->count();
1972     } else {
1973       list_count = humongous_dictionary()->total_count();
1974     }
1975     LogStream ls(lt);
1976     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1977              p2i(this), p2i(chunk), chunk->word_size(), list_count);
1978     ResourceMark rm;
1979     locked_print_free_chunks(&ls);
1980   }
1981 
1982   return chunk;
1983 }
1984 
1985 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1986   assert_lock_strong(SpaceManager::expand_lock());
1987   assert(chunk != NULL, "Expected chunk.");
1988   assert(chunk->container() != NULL, "Container should have been set.");
1989   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1990   index_bounds_check(index);
1991 
1992   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1993   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1994   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1995   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1996 
1997   if (index != HumongousIndex) {
1998     // Return non-humongous chunk to freelist.
1999     ChunkList* list = free_chunks(index);


2386 }
2387 
2388 SpaceManager::~SpaceManager() {
2389   // This call this->_lock which can't be done while holding expand_lock()
2390   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2391          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2392          " allocated_chunks_words() " SIZE_FORMAT,
2393          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2394 
2395   MutexLockerEx fcl(SpaceManager::expand_lock(),
2396                     Mutex::_no_safepoint_check_flag);
2397 
2398   chunk_manager()->slow_locked_verify();
2399 
2400   dec_total_from_size_metrics();
2401 
2402   Log(gc, metaspace, freelist) log;
2403   if (log.is_trace()) {
2404     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2405     ResourceMark rm;
2406     LogStream ls(log.trace());
2407     locked_print_chunks_in_use_on(&ls);
2408     if (block_freelists() != NULL) {
2409       block_freelists()->print_on(&ls);
2410     }
2411   }
2412 
2413   // Add all the chunks in use by this space manager
2414   // to the global list of free chunks.
2415 
2416   // Follow each list of chunks-in-use and add them to the
2417   // free lists.  Each list is NULL terminated.
2418 
2419   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2420     Metachunk* chunks = chunks_in_use(i);
2421     chunk_manager()->return_chunk_list(i, chunks);
2422     set_chunks_in_use(i, NULL);
2423   }
2424 
2425   chunk_manager()->slow_locked_verify();
2426 
2427   if (_block_freelists != NULL) {
2428     delete _block_freelists;
2429   }


2466       set_current_chunk(new_chunk);
2467     }
2468     // Link at head.  The _current_chunk only points to a humongous chunk for
2469     // the null class loader metaspace (class and data virtual space managers)
2470     // any humongous chunks so will not point to the tail
2471     // of the humongous chunks list.
2472     new_chunk->set_next(chunks_in_use(HumongousIndex));
2473     set_chunks_in_use(HumongousIndex, new_chunk);
2474 
2475     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2476   }
2477 
2478   // Add to the running sum of capacity
2479   inc_size_metrics(new_chunk->word_size());
2480 
2481   assert(new_chunk->is_empty(), "Not ready for reuse");
2482   Log(gc, metaspace, freelist) log;
2483   if (log.is_trace()) {
2484     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2485     ResourceMark rm;
2486     LogStream ls(log.trace());
2487     new_chunk->print_on(&ls);
2488     chunk_manager()->locked_print_free_chunks(&ls);
2489   }
2490 }
2491 
2492 void SpaceManager::retire_current_chunk() {
2493   if (current_chunk() != NULL) {
2494     size_t remaining_words = current_chunk()->free_word_size();
2495     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2496       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2497       deallocate(ptr, remaining_words);
2498       inc_used_metrics(remaining_words);
2499     }
2500   }
2501 }
2502 
2503 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2504   // Get a chunk from the chunk freelist
2505   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2506 
2507   if (next == NULL) {
2508     next = vs_list()->get_new_chunk(chunk_word_size,


3198                                               compressed_class_space_size()));
3199       }
3200     }
3201   }
3202 
3203   // If we got here then the metaspace got allocated.
3204   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3205 
3206 #if INCLUDE_CDS
3207   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3208   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3209     FileMapInfo::stop_sharing_and_unmap(
3210         "Could not allocate metaspace at a compatible address");
3211   }
3212 #endif
3213   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3214                                   UseSharedSpaces ? (address)cds_base : 0);
3215 
3216   initialize_class_space(metaspace_rs);
3217 
3218   LogTarget(Trace, gc, metaspace) lt;
3219   if (lt.is_enabled()) {
3220     ResourceMark rm;
3221     LogStream ls(lt);
3222     print_compressed_class_space(&ls, requested_addr);
3223   }
3224 }
3225 
3226 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3227   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3228                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3229   if (_class_space_list != NULL) {
3230     address base = (address)_class_space_list->current_virtual_space()->bottom();
3231     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3232                  compressed_class_space_size(), p2i(base));
3233     if (requested_addr != 0) {
3234       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3235     }
3236     st->cr();
3237   }
3238 }
3239 
3240 // For UseCompressedClassPointers the class space is reserved above the top of
3241 // the Java heap.  The argument passed in is at the base of the compressed space.
3242 void Metaspace::initialize_class_space(ReservedSpace rs) {


3656   // Zero initialize.
3657   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3658 
3659   return result;
3660 }
3661 
3662 size_t Metaspace::class_chunk_size(size_t word_size) {
3663   assert(using_class_space(), "Has to use class space");
3664   return class_vsm()->calc_chunk_size(word_size);
3665 }
3666 
3667 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3668   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3669 
3670   // If result is still null, we are out of memory.
3671   Log(gc, metaspace, freelist) log;
3672   if (log.is_info()) {
3673     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3674              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3675     ResourceMark rm;
3676     LogStream ls(log.info());
3677     if (loader_data->metaspace_or_null() != NULL) {
3678       loader_data->dump(&ls);
3679     }
3680     MetaspaceAux::dump(&ls);
3681   }
3682 
3683   bool out_of_compressed_class_space = false;
3684   if (is_class_space_allocation(mdtype)) {
3685     Metaspace* metaspace = loader_data->metaspace_non_null();
3686     out_of_compressed_class_space =
3687       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3688       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3689       CompressedClassSpaceSize;
3690   }
3691 
3692   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3693   const char* space_string = out_of_compressed_class_space ?
3694     "Compressed class space" : "Metaspace";
3695 
3696   report_java_out_of_memory(space_string);
3697 
3698   if (JvmtiExport::should_post_resource_exhausted()) {
3699     JvmtiExport::post_resource_exhausted(
3700         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,


< prev index next >