/* * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "logging/log.hpp" #include "memory/metaspace/chunkLevel.hpp" #include "memory/metaspace/chunkTree.hpp" #include "memory/metaspace/commitLimiter.hpp" #include "memory/metaspace/constants.hpp" #include "memory/metaspace/counter.hpp" #include "memory/metaspace/metachunk.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/runningCounters.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" namespace metaspace { #ifdef ASSERT template void check_is_aligned_to_commit_granule(T x) { assert(is_aligned(x, constants::commit_granule_bytes), "Unaligned pointer"); } #endif // Given an address range, ensure it is committed. // // The range has to be aligned to granule size. // // Function will: // - check how many granules in that region are uncommitted; If all are committed, it // returns true immediately. // - check if committing those uncommitted granules would bring us over the commit limit // (GC threshold, MaxMetaspaceSize). If true, it returns false. // - commit the memory. // - mark the range as committed in the commit mask // // Returns true if success, false if it did hit a commit limit. bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) { DEBUG_ONLY(check_is_aligned_to_commit_granule(p);) DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) assert_lock_strong(MetaspaceExpand_lock); // First calculate how large the committed regions in this range are const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size); DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);) // By how much words we would increase commit charge // were we to commit the given address range completely. const size_t commit_increase_words = word_size - committed_words_in_range; if (commit_increase_words == 0) { return true; // Already fully committed, nothing to do. } // Before committing any more memory, check limits. if (_commit_limiter->possible_expansion_words() < commit_increase_words) { return false; } // Commit... if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) { vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace."); } log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.", commit_increase_words * BytesPerWord); // ... tell commit limiter... _commit_limiter->increase_committed(commit_increase_words); // ... update counters in containing vslist ... _total_committed_words_counter->increment_by(commit_increase_words); // ... and update the commit mask. _commit_mask.mark_range_as_committed(p, word_size); #ifdef ASSERT // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words // in both class and non-class vslist (outside gtests). if (_commit_limiter == CommitLimiter::globalLimiter()) { assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch"); } #endif return true; } // Given an address range, ensure it is committed. // // The range does not have to be aligned to granule size. However, the function will always commit // whole granules. // // Function will: // - check how many granules in that region are uncommitted; If all are committed, it // returns true immediately. // - check if committing those uncommitted granules would bring us over the commit limit // (GC threshold, MaxMetaspaceSize). If true, it returns false. // - commit the memory. // - mark the range as committed in the commit mask // // Returns true if success, false if it did hit a commit limit. bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) { assert_lock_strong(MetaspaceExpand_lock); assert(p != NULL && word_size > 0, "Sanity"); MetaWord* p_start = align_down(p, constants::commit_granule_bytes); MetaWord* p_end = align_up(p + word_size, constants::commit_granule_bytes); // Todo: simple for now. Make it more intelligent late return commit_range(p_start, p_end - p_start); } // Given an address range (which has to be aligned to commit granule size): // - uncommit it // - mark it as uncommitted in the commit mask bool VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) { DEBUG_ONLY(check_is_aligned_to_commit_granule(p);) DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) assert_lock_strong(MetaspaceExpand_lock); // First calculate how large the committed regions in this range are const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size); DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);) if (committed_words_in_range == 0) { return true; // Already fully uncommitted, nothing to do. } // Uncommit... if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) { // Note: this can actually happen, since uncommit may increase the number of mappings. fatal("Failed to uncommit metaspace."); } log_debug(gc, metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.", committed_words_in_range * BytesPerWord); // ... tell commit limiter... _commit_limiter->decrease_committed(committed_words_in_range); // ... and global counters... _total_committed_words_counter->decrement_by(committed_words_in_range); // ... and update the commit mask. _commit_mask.mark_range_as_uncommitted(p, word_size); #ifdef ASSERT // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words // in both class and non-class vslist (outside gtests). if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch"); } #endif return true; } //// creation, destruction //// VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, CommitLimiter* limiter, SizeCounter* reserve_counter, SizeCounter* commit_counter) : _next(NULL), _base(rs.base()), _word_size(rs.size() / BytesPerWord), _used_words(0), _commit_mask(rs.base(), rs.size() / BytesPerWord), _chunk_tree_array(rs.base(), rs.size() / BytesPerWord), _commit_limiter(limiter), _total_reserved_words_counter(reserve_counter), _total_committed_words_counter(commit_counter) { // Update reserved counter in vslist _total_reserved_words_counter->increment_by(_word_size); } // Create a node of a given size VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size, CommitLimiter* limiter, SizeCounter* reserve_counter, SizeCounter* commit_counter) { DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);) ReservedSpace rs(word_size * BytesPerWord, constants::commit_granule_bytes, false, // TODO deal with large pages false); if (!rs.is_reserved()) { vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace"); } reserve_counter->increment_by(word_size * BytesPerWord); return create_node(rs, limiter, reserve_counter, commit_counter); } // Create a node over an existing space VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, CommitLimiter* limiter, SizeCounter* reserve_counter, SizeCounter* commit_counter) { reserve_counter->increment_by(rs.size() * BytesPerWord); return new VirtualSpaceNode(rs, limiter, reserve_counter, commit_counter); } VirtualSpaceNode::~VirtualSpaceNode() { _rs.release(); // Update counters in vslist _total_committed_words_counter->decrement_by(committed_words()); _total_reserved_words_counter->decrement_by(_word_size); } //// Chunk allocation, splitting, merging ///// // Allocate a root chunk from this node. Will fail and return NULL // if the node is full. // Note: this just returns a chunk whose memory is reserved; no memory is committed yet. // Hence, before using this chunk, it must be committed. // Also, no limits are checked, since no committing takes place. Metachunk* VirtualSpaceNode::allocate_root_chunk() { assert_lock_strong(MetaspaceExpand_lock); assert_is_aligned(free_words, chklvl::MAX_CHUNK_WORD_SIZE); if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) { MetaWord* loc = _base + _used_words; _used_words += chklvl::MAX_CHUNK_WORD_SIZE; // Create a new chunk tree for that new root node. ChunkTree* tree = _chunk_tree_array.get_tree_by_address(loc); // Create a root chunk header and initialize it; Metachunk* c = tree->alloc_root_chunk_header(); // Wire it to the memory. c->set_base(loc); DEBUG_ONLY(c->verify(true);) return c; } return NULL; // Node is full. } Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) { assert_lock_strong(MetaspaceExpand_lock); // Get the tree associated with this chunk and let it handle the splitting ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base()); return tree->split(target_level, c, splinters); } Metachunk* VirtualSpaceNode::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) { assert_lock_strong(MetaspaceExpand_lock); // Get the tree associated with this chunk and let it handle the merging ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base()); return tree->merge(c, num_merged); } void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const { st->print("node @" PTR_FORMAT ": ", p2i(this)); st->print("reserved="); print_scaled_words(st, word_size(), scale); st->print(", committed="); print_scaled_words_and_percentage(st, committed_words(), word_size(), scale); st->print(", used="); print_scaled_words_and_percentage(st, used_words(), word_size(), scale); st->cr(); st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")", p2i(base()), p2i(base() + used_words()), p2i(base() + word_size())); st->print("Tree/Chunk footprint: "); print_scaled_words(st, _chunk_tree_array.memory_footprint_words(), scale); st->cr(); } #ifdef ASSERT // Verify counters and basic structure. Slow mode: verify all chunks in depth void VirtualSpaceNode::verify(bool slow) const { assert_lock_strong(MetaspaceExpand_lock); assert(base() != NULL, "Invalid base"); assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE); assert(used_words() < word_size(), "Sanity"); // Since we only ever hand out root chunks from a vsnode, top should always be aligned // to root chunk size. assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE); _commit_mask.verify(slow); _chunk_tree_array.verify(slow); } // Returns sum of committed space, in words. size_t VirtualSpaceNode::committed_words() const { return _commit_mask.get_committed_size(); } #endif } // namespace metaspace