< prev index next >

src/share/vm/memory/metaspace.cpp

Print this page
rev 13265 : imported patch 8181917-refactor-ul-logstream


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"

  30 #include "memory/allocation.hpp"
  31 #include "memory/binaryTreeDictionary.hpp"
  32 #include "memory/filemap.hpp"
  33 #include "memory/freeList.hpp"
  34 #include "memory/metachunk.hpp"
  35 #include "memory/metaspace.hpp"
  36 #include "memory/metaspaceGCThresholdUpdater.hpp"
  37 #include "memory/metaspaceShared.hpp"
  38 #include "memory/metaspaceTracer.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/globals.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/mutex.hpp"
  46 #include "runtime/orderAccess.inline.hpp"
  47 #include "services/memTracker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "utilities/align.hpp"


 992 
 993 size_t VirtualSpaceNode::free_words_in_vs() const {
 994   return pointer_delta(end(), top(), sizeof(MetaWord));
 995 }
 996 
 997 // Allocates the chunk from the virtual space only.
 998 // This interface is also used internally for debugging.  Not all
 999 // chunks removed here are necessarily used for allocation.
1000 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1001   // Bottom of the new chunk
1002   MetaWord* chunk_limit = top();
1003   assert(chunk_limit != NULL, "Not safe to call this method");
1004 
1005   // The virtual spaces are always expanded by the
1006   // commit granularity to enforce the following condition.
1007   // Without this the is_available check will not work correctly.
1008   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1009       "The committed memory doesn't match the expanded memory.");
1010 
1011   if (!is_available(chunk_word_size)) {
1012     Log(gc, metaspace, freelist) log;
1013     log.debug("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);


1014     // Dump some information about the virtual space that is nearly full
1015     ResourceMark rm;
1016     print_on(log.debug_stream());
1017     return NULL;
1018   }
1019 
1020   // Take the space  (bump top on the current virtual space).
1021   inc_top(chunk_word_size);
1022 
1023   // Initialize the chunk
1024   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1025   return result;
1026 }
1027 
1028 
1029 // Expand the virtual space (commit more of the reserved space)
1030 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1031   size_t min_bytes = min_words * BytesPerWord;
1032   size_t preferred_bytes = preferred_words * BytesPerWord;
1033 
1034   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1035 
1036   if (uncommitted < min_bytes) {


1331     // ensure lock-free iteration sees fully initialized node
1332     OrderAccess::storestore();
1333     link_vs(new_entry);
1334     return true;
1335   }
1336 }
1337 
1338 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1339   if (virtual_space_list() == NULL) {
1340       set_virtual_space_list(new_entry);
1341   } else {
1342     current_virtual_space()->set_next(new_entry);
1343   }
1344   set_current_virtual_space(new_entry);
1345   inc_reserved_words(new_entry->reserved_words());
1346   inc_committed_words(new_entry->committed_words());
1347   inc_virtual_space_count();
1348 #ifdef ASSERT
1349   new_entry->mangle();
1350 #endif
1351   if (log_is_enabled(Trace, gc, metaspace)) {
1352     Log(gc, metaspace) log;

1353     VirtualSpaceNode* vsl = current_virtual_space();
1354     ResourceMark rm;
1355     vsl->print_on(log.trace_stream());
1356   }
1357 }
1358 
1359 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1360                                       size_t min_words,
1361                                       size_t preferred_words) {
1362   size_t before = node->committed_words();
1363 
1364   bool result = node->expand_by(min_words, preferred_words);
1365 
1366   size_t after = node->committed_words();
1367 
1368   // after and before can be the same if the memory was pre-committed.
1369   assert(after >= before, "Inconsistency");
1370   inc_committed_words(after - before);
1371 
1372   return result;
1373 }
1374 
1375 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1937 #endif
1938   chunk->container()->inc_container_count();
1939 
1940   slow_locked_verify();
1941   return chunk;
1942 }
1943 
1944 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1945   assert_lock_strong(SpaceManager::expand_lock());
1946   slow_locked_verify();
1947 
1948   // Take from the beginning of the list
1949   Metachunk* chunk = free_chunks_get(word_size);
1950   if (chunk == NULL) {
1951     return NULL;
1952   }
1953 
1954   assert((word_size <= chunk->word_size()) ||
1955          (list_index(chunk->word_size()) == HumongousIndex),
1956          "Non-humongous variable sized chunk");
1957   Log(gc, metaspace, freelist) log;
1958   if (log.is_debug()) {
1959     size_t list_count;
1960     if (list_index(word_size) < HumongousIndex) {
1961       ChunkList* list = find_free_chunks_list(word_size);
1962       list_count = list->count();
1963     } else {
1964       list_count = humongous_dictionary()->total_count();
1965     }
1966     log.debug("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",

1967                p2i(this), p2i(chunk), chunk->word_size(), list_count);
1968     ResourceMark rm;
1969     locked_print_free_chunks(log.debug_stream());
1970   }
1971 
1972   return chunk;
1973 }
1974 
1975 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1976   assert_lock_strong(SpaceManager::expand_lock());
1977   assert(chunk != NULL, "Expected chunk.");
1978   assert(chunk->container() != NULL, "Container should have been set.");
1979   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1980   index_bounds_check(index);
1981 
1982   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1983   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1984   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1985   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1986 
1987   if (index != HumongousIndex) {
1988     // Return non-humongous chunk to freelist.
1989     ChunkList* list = free_chunks(index);


2376 }
2377 
2378 SpaceManager::~SpaceManager() {
2379   // This call this->_lock which can't be done while holding expand_lock()
2380   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2381          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2382          " allocated_chunks_words() " SIZE_FORMAT,
2383          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2384 
2385   MutexLockerEx fcl(SpaceManager::expand_lock(),
2386                     Mutex::_no_safepoint_check_flag);
2387 
2388   chunk_manager()->slow_locked_verify();
2389 
2390   dec_total_from_size_metrics();
2391 
2392   Log(gc, metaspace, freelist) log;
2393   if (log.is_trace()) {
2394     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2395     ResourceMark rm;
2396     locked_print_chunks_in_use_on(log.trace_stream());

2397     if (block_freelists() != NULL) {
2398     block_freelists()->print_on(log.trace_stream());
2399   }
2400   }
2401 
2402   // Add all the chunks in use by this space manager
2403   // to the global list of free chunks.
2404 
2405   // Follow each list of chunks-in-use and add them to the
2406   // free lists.  Each list is NULL terminated.
2407 
2408   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2409     Metachunk* chunks = chunks_in_use(i);
2410     chunk_manager()->return_chunk_list(i, chunks);
2411     set_chunks_in_use(i, NULL);
2412   }
2413 
2414   chunk_manager()->slow_locked_verify();
2415 
2416   if (_block_freelists != NULL) {
2417     delete _block_freelists;
2418   }


2455       set_current_chunk(new_chunk);
2456     }
2457     // Link at head.  The _current_chunk only points to a humongous chunk for
2458     // the null class loader metaspace (class and data virtual space managers)
2459     // any humongous chunks so will not point to the tail
2460     // of the humongous chunks list.
2461     new_chunk->set_next(chunks_in_use(HumongousIndex));
2462     set_chunks_in_use(HumongousIndex, new_chunk);
2463 
2464     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2465   }
2466 
2467   // Add to the running sum of capacity
2468   inc_size_metrics(new_chunk->word_size());
2469 
2470   assert(new_chunk->is_empty(), "Not ready for reuse");
2471   Log(gc, metaspace, freelist) log;
2472   if (log.is_trace()) {
2473     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2474     ResourceMark rm;
2475     outputStream* out = log.trace_stream();
2476     new_chunk->print_on(out);
2477     chunk_manager()->locked_print_free_chunks(out);
2478   }
2479 }
2480 
2481 void SpaceManager::retire_current_chunk() {
2482   if (current_chunk() != NULL) {
2483     size_t remaining_words = current_chunk()->free_word_size();
2484     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2485       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2486       deallocate(ptr, remaining_words);
2487       inc_used_metrics(remaining_words);
2488     }
2489   }
2490 }
2491 
2492 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2493   // Get a chunk from the chunk freelist
2494   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2495 
2496   if (next == NULL) {
2497     next = vs_list()->get_new_chunk(chunk_word_size,


3187                                               compressed_class_space_size()));
3188       }
3189     }
3190   }
3191 
3192   // If we got here then the metaspace got allocated.
3193   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3194 
3195 #if INCLUDE_CDS
3196   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3197   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3198     FileMapInfo::stop_sharing_and_unmap(
3199         "Could not allocate metaspace at a compatible address");
3200   }
3201 #endif
3202   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3203                                   UseSharedSpaces ? (address)cds_base : 0);
3204 
3205   initialize_class_space(metaspace_rs);
3206 
3207   if (log_is_enabled(Trace, gc, metaspace)) {
3208     Log(gc, metaspace) log;
3209     ResourceMark rm;
3210     print_compressed_class_space(log.trace_stream(), requested_addr);

3211   }
3212 }
3213 
3214 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3215   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3216                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3217   if (_class_space_list != NULL) {
3218     address base = (address)_class_space_list->current_virtual_space()->bottom();
3219     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3220                  compressed_class_space_size(), p2i(base));
3221     if (requested_addr != 0) {
3222       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3223     }
3224     st->cr();
3225   }
3226 }
3227 
3228 // For UseCompressedClassPointers the class space is reserved above the top of
3229 // the Java heap.  The argument passed in is at the base of the compressed space.
3230 void Metaspace::initialize_class_space(ReservedSpace rs) {


3644   // Zero initialize.
3645   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3646 
3647   return result;
3648 }
3649 
3650 size_t Metaspace::class_chunk_size(size_t word_size) {
3651   assert(using_class_space(), "Has to use class space");
3652   return class_vsm()->calc_chunk_size(word_size);
3653 }
3654 
3655 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3656   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3657 
3658   // If result is still null, we are out of memory.
3659   Log(gc, metaspace, freelist) log;
3660   if (log.is_info()) {
3661     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3662              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3663     ResourceMark rm;
3664     outputStream* out = log.info_stream();
3665     if (loader_data->metaspace_or_null() != NULL) {
3666       loader_data->dump(out);
3667     }
3668     MetaspaceAux::dump(out);
3669   }
3670 
3671   bool out_of_compressed_class_space = false;
3672   if (is_class_space_allocation(mdtype)) {
3673     Metaspace* metaspace = loader_data->metaspace_non_null();
3674     out_of_compressed_class_space =
3675       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3676       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3677       CompressedClassSpaceSize;
3678   }
3679 
3680   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3681   const char* space_string = out_of_compressed_class_space ?
3682     "Compressed class space" : "Metaspace";
3683 
3684   report_java_out_of_memory(space_string);
3685 
3686   if (JvmtiExport::should_post_resource_exhausted()) {
3687     JvmtiExport::post_resource_exhausted(
3688         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "aot/aotLoader.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectorPolicy.hpp"
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "logging/log.hpp"
  30 #include "logging/logStream.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/binaryTreeDictionary.hpp"
  33 #include "memory/filemap.hpp"
  34 #include "memory/freeList.hpp"
  35 #include "memory/metachunk.hpp"
  36 #include "memory/metaspace.hpp"
  37 #include "memory/metaspaceGCThresholdUpdater.hpp"
  38 #include "memory/metaspaceShared.hpp"
  39 #include "memory/metaspaceTracer.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "memory/universe.hpp"
  42 #include "runtime/atomic.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/init.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutex.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "services/memTracker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/align.hpp"


 993 
 994 size_t VirtualSpaceNode::free_words_in_vs() const {
 995   return pointer_delta(end(), top(), sizeof(MetaWord));
 996 }
 997 
 998 // Allocates the chunk from the virtual space only.
 999 // This interface is also used internally for debugging.  Not all
1000 // chunks removed here are necessarily used for allocation.
1001 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
1002   // Bottom of the new chunk
1003   MetaWord* chunk_limit = top();
1004   assert(chunk_limit != NULL, "Not safe to call this method");
1005 
1006   // The virtual spaces are always expanded by the
1007   // commit granularity to enforce the following condition.
1008   // Without this the is_available check will not work correctly.
1009   assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
1010       "The committed memory doesn't match the expanded memory.");
1011 
1012   if (!is_available(chunk_word_size)) {
1013     LogTarget(Debug, gc, metaspace, freelist) lt;
1014     if (lt.is_enabled()) {
1015       LogStream ls(lt);
1016       ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
1017       // Dump some information about the virtual space that is nearly full
1018       print_on(&ls);
1019     }
1020     return NULL;
1021   }
1022 
1023   // Take the space  (bump top on the current virtual space).
1024   inc_top(chunk_word_size);
1025 
1026   // Initialize the chunk
1027   Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
1028   return result;
1029 }
1030 
1031 
1032 // Expand the virtual space (commit more of the reserved space)
1033 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
1034   size_t min_bytes = min_words * BytesPerWord;
1035   size_t preferred_bytes = preferred_words * BytesPerWord;
1036 
1037   size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
1038 
1039   if (uncommitted < min_bytes) {


1334     // ensure lock-free iteration sees fully initialized node
1335     OrderAccess::storestore();
1336     link_vs(new_entry);
1337     return true;
1338   }
1339 }
1340 
1341 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
1342   if (virtual_space_list() == NULL) {
1343       set_virtual_space_list(new_entry);
1344   } else {
1345     current_virtual_space()->set_next(new_entry);
1346   }
1347   set_current_virtual_space(new_entry);
1348   inc_reserved_words(new_entry->reserved_words());
1349   inc_committed_words(new_entry->committed_words());
1350   inc_virtual_space_count();
1351 #ifdef ASSERT
1352   new_entry->mangle();
1353 #endif
1354   LogTarget(Trace, gc, metaspace) lt;
1355   if (lt.is_enabled()) {
1356     LogStream ls(lt);
1357     VirtualSpaceNode* vsl = current_virtual_space();
1358     ResourceMark rm;
1359     vsl->print_on(&ls);
1360   }
1361 }
1362 
1363 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
1364                                       size_t min_words,
1365                                       size_t preferred_words) {
1366   size_t before = node->committed_words();
1367 
1368   bool result = node->expand_by(min_words, preferred_words);
1369 
1370   size_t after = node->committed_words();
1371 
1372   // after and before can be the same if the memory was pre-committed.
1373   assert(after >= before, "Inconsistency");
1374   inc_committed_words(after - before);
1375 
1376   return result;
1377 }
1378 
1379 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {


1941 #endif
1942   chunk->container()->inc_container_count();
1943 
1944   slow_locked_verify();
1945   return chunk;
1946 }
1947 
1948 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
1949   assert_lock_strong(SpaceManager::expand_lock());
1950   slow_locked_verify();
1951 
1952   // Take from the beginning of the list
1953   Metachunk* chunk = free_chunks_get(word_size);
1954   if (chunk == NULL) {
1955     return NULL;
1956   }
1957 
1958   assert((word_size <= chunk->word_size()) ||
1959          (list_index(chunk->word_size()) == HumongousIndex),
1960          "Non-humongous variable sized chunk");
1961   LogTarget(Debug, gc, metaspace, freelist) lt;
1962   if (lt.is_enabled()) {
1963     size_t list_count;
1964     if (list_index(word_size) < HumongousIndex) {
1965       ChunkList* list = find_free_chunks_list(word_size);
1966       list_count = list->count();
1967     } else {
1968       list_count = humongous_dictionary()->total_count();
1969     }
1970     LogStream ls(lt);
1971     ls.print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
1972              p2i(this), p2i(chunk), chunk->word_size(), list_count);
1973     ResourceMark rm;
1974     locked_print_free_chunks(&ls);
1975   }
1976 
1977   return chunk;
1978 }
1979 
1980 void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
1981   assert_lock_strong(SpaceManager::expand_lock());
1982   assert(chunk != NULL, "Expected chunk.");
1983   assert(chunk->container() != NULL, "Container should have been set.");
1984   assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
1985   index_bounds_check(index);
1986 
1987   // Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
1988   // matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
1989   // keeps tree node pointers in the chunk payload area which mangle will overwrite.
1990   NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
1991 
1992   if (index != HumongousIndex) {
1993     // Return non-humongous chunk to freelist.
1994     ChunkList* list = free_chunks(index);


2381 }
2382 
2383 SpaceManager::~SpaceManager() {
2384   // This call this->_lock which can't be done while holding expand_lock()
2385   assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
2386          "sum_capacity_in_chunks_in_use() " SIZE_FORMAT
2387          " allocated_chunks_words() " SIZE_FORMAT,
2388          sum_capacity_in_chunks_in_use(), allocated_chunks_words());
2389 
2390   MutexLockerEx fcl(SpaceManager::expand_lock(),
2391                     Mutex::_no_safepoint_check_flag);
2392 
2393   chunk_manager()->slow_locked_verify();
2394 
2395   dec_total_from_size_metrics();
2396 
2397   Log(gc, metaspace, freelist) log;
2398   if (log.is_trace()) {
2399     log.trace("~SpaceManager(): " PTR_FORMAT, p2i(this));
2400     ResourceMark rm;
2401     LogStream ls(log.trace());
2402     locked_print_chunks_in_use_on(&ls);
2403     if (block_freelists() != NULL) {
2404       block_freelists()->print_on(&ls);
2405     }
2406   }
2407 
2408   // Add all the chunks in use by this space manager
2409   // to the global list of free chunks.
2410 
2411   // Follow each list of chunks-in-use and add them to the
2412   // free lists.  Each list is NULL terminated.
2413 
2414   for (ChunkIndex i = ZeroIndex; i <= HumongousIndex; i = next_chunk_index(i)) {
2415     Metachunk* chunks = chunks_in_use(i);
2416     chunk_manager()->return_chunk_list(i, chunks);
2417     set_chunks_in_use(i, NULL);
2418   }
2419 
2420   chunk_manager()->slow_locked_verify();
2421 
2422   if (_block_freelists != NULL) {
2423     delete _block_freelists;
2424   }


2461       set_current_chunk(new_chunk);
2462     }
2463     // Link at head.  The _current_chunk only points to a humongous chunk for
2464     // the null class loader metaspace (class and data virtual space managers)
2465     // any humongous chunks so will not point to the tail
2466     // of the humongous chunks list.
2467     new_chunk->set_next(chunks_in_use(HumongousIndex));
2468     set_chunks_in_use(HumongousIndex, new_chunk);
2469 
2470     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
2471   }
2472 
2473   // Add to the running sum of capacity
2474   inc_size_metrics(new_chunk->word_size());
2475 
2476   assert(new_chunk->is_empty(), "Not ready for reuse");
2477   Log(gc, metaspace, freelist) log;
2478   if (log.is_trace()) {
2479     log.trace("SpaceManager::add_chunk: " SIZE_FORMAT ") ", sum_count_in_chunks_in_use());
2480     ResourceMark rm;
2481     LogStream ls(log.trace());
2482     new_chunk->print_on(&ls);
2483     chunk_manager()->locked_print_free_chunks(&ls);
2484   }
2485 }
2486 
2487 void SpaceManager::retire_current_chunk() {
2488   if (current_chunk() != NULL) {
2489     size_t remaining_words = current_chunk()->free_word_size();
2490     if (remaining_words >= BlockFreelist::min_dictionary_size()) {
2491       MetaWord* ptr = current_chunk()->allocate(remaining_words);
2492       deallocate(ptr, remaining_words);
2493       inc_used_metrics(remaining_words);
2494     }
2495   }
2496 }
2497 
2498 Metachunk* SpaceManager::get_new_chunk(size_t chunk_word_size) {
2499   // Get a chunk from the chunk freelist
2500   Metachunk* next = chunk_manager()->chunk_freelist_allocate(chunk_word_size);
2501 
2502   if (next == NULL) {
2503     next = vs_list()->get_new_chunk(chunk_word_size,


3193                                               compressed_class_space_size()));
3194       }
3195     }
3196   }
3197 
3198   // If we got here then the metaspace got allocated.
3199   MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3200 
3201 #if INCLUDE_CDS
3202   // Verify that we can use shared spaces.  Otherwise, turn off CDS.
3203   if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3204     FileMapInfo::stop_sharing_and_unmap(
3205         "Could not allocate metaspace at a compatible address");
3206   }
3207 #endif
3208   set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3209                                   UseSharedSpaces ? (address)cds_base : 0);
3210 
3211   initialize_class_space(metaspace_rs);
3212 
3213   LogTarget(Trace, gc, metaspace) lt;
3214   if (lt.is_enabled()) {
3215     ResourceMark rm;
3216     LogStream ls(lt);
3217     print_compressed_class_space(&ls, requested_addr);
3218   }
3219 }
3220 
3221 void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
3222   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
3223                p2i(Universe::narrow_klass_base()), Universe::narrow_klass_shift());
3224   if (_class_space_list != NULL) {
3225     address base = (address)_class_space_list->current_virtual_space()->bottom();
3226     st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
3227                  compressed_class_space_size(), p2i(base));
3228     if (requested_addr != 0) {
3229       st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
3230     }
3231     st->cr();
3232   }
3233 }
3234 
3235 // For UseCompressedClassPointers the class space is reserved above the top of
3236 // the Java heap.  The argument passed in is at the base of the compressed space.
3237 void Metaspace::initialize_class_space(ReservedSpace rs) {


3651   // Zero initialize.
3652   Copy::fill_to_words((HeapWord*)result, word_size, 0);
3653 
3654   return result;
3655 }
3656 
3657 size_t Metaspace::class_chunk_size(size_t word_size) {
3658   assert(using_class_space(), "Has to use class space");
3659   return class_vsm()->calc_chunk_size(word_size);
3660 }
3661 
3662 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
3663   tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
3664 
3665   // If result is still null, we are out of memory.
3666   Log(gc, metaspace, freelist) log;
3667   if (log.is_info()) {
3668     log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
3669              is_class_space_allocation(mdtype) ? "class" : "data", word_size);
3670     ResourceMark rm;
3671     LogStream ls(log.info());
3672     if (loader_data->metaspace_or_null() != NULL) {
3673       loader_data->dump(&ls);
3674     }
3675     MetaspaceAux::dump(&ls);
3676   }
3677 
3678   bool out_of_compressed_class_space = false;
3679   if (is_class_space_allocation(mdtype)) {
3680     Metaspace* metaspace = loader_data->metaspace_non_null();
3681     out_of_compressed_class_space =
3682       MetaspaceAux::committed_bytes(Metaspace::ClassType) +
3683       (metaspace->class_chunk_size(word_size) * BytesPerWord) >
3684       CompressedClassSpaceSize;
3685   }
3686 
3687   // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
3688   const char* space_string = out_of_compressed_class_space ?
3689     "Compressed class space" : "Metaspace";
3690 
3691   report_java_out_of_memory(space_string);
3692 
3693   if (JvmtiExport::should_post_resource_exhausted()) {
3694     JvmtiExport::post_resource_exhausted(
3695         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,


< prev index next >