1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2018, SAP. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "utilities/debug.hpp" 28 #include "utilities/globalDefinitions.hpp" 29 #include "memory/metaspace/metachunk.hpp" 30 #include "memory/metaspace/occupancyMap.hpp" 31 #include "runtime/os.hpp" 32 33 namespace metaspace { 34 namespace internals { 35 36 OccupancyMap::OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) : 37 _reference_address(reference_address), _word_size(word_size), 38 _smallest_chunk_word_size(smallest_chunk_word_size) 39 { 40 assert(reference_address != NULL, "invalid reference address"); 41 assert(is_aligned(reference_address, smallest_chunk_word_size), 42 "Reference address not aligned to smallest chunk size."); 43 assert(is_aligned(word_size, smallest_chunk_word_size), 44 "Word_size shall be a multiple of the smallest chunk size."); 45 // Calculate bitmap size: one bit per smallest_chunk_word_size'd area. 46 size_t num_bits = word_size / smallest_chunk_word_size; 47 _map_size = (num_bits + 7) / 8; 48 assert(_map_size * 8 >= num_bits, "sanity"); 49 _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal); 50 _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal); 51 assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed."); 52 memset(_map[1], 0, _map_size); 53 memset(_map[0], 0, _map_size); 54 // Sanity test: the first respectively last possible chunk start address in 55 // the covered range shall map to the first and last bit in the bitmap. 56 assert(get_bitpos_for_address(reference_address) == 0, 57 "First chunk address in range must map to fist bit in bitmap."); 58 assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1, 59 "Last chunk address in range must map to last bit in bitmap."); 60 } 61 62 OccupancyMap::~OccupancyMap() { 63 os::free(_map[0]); 64 os::free(_map[1]); 65 } 66 67 #ifdef ASSERT 68 // Verify occupancy map for the address range [from, to). 69 // We need to tell it the address range, because the memory the 70 // occupancy map is covering may not be fully comitted yet. 71 void OccupancyMap::verify(MetaWord* from, MetaWord* to) { 72 Metachunk* chunk = NULL; 73 int nth_bit_for_chunk = 0; 74 MetaWord* chunk_end = NULL; 75 for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) { 76 const unsigned pos = get_bitpos_for_address(p); 77 // Check the chunk-starts-info: 78 if (get_bit_at_position(pos, layer_chunk_start_map)) { 79 // Chunk start marked in bitmap. 80 chunk = (Metachunk*) p; 81 if (chunk_end != NULL) { 82 assert(chunk_end == p, "Unexpected chunk start found at %p (expected " 83 "the next chunk to start at %p).", p, chunk_end); 84 } 85 assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p); 86 if (chunk->get_chunk_type() != HumongousIndex) { 87 guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p); 88 } 89 chunk_end = p + chunk->word_size(); 90 nth_bit_for_chunk = 0; 91 assert(chunk_end <= to, "Chunk end overlaps test address range."); 92 } else { 93 // No chunk start marked in bitmap. 94 assert(chunk != NULL, "Chunk should start at start of address range."); 95 assert(p < chunk_end, "Did not find expected chunk start at %p.", p); 96 nth_bit_for_chunk ++; 97 } 98 // Check the in-use-info: 99 const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map); 100 if (in_use_bit) { 101 assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).", 102 chunk, nth_bit_for_chunk); 103 } else { 104 assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).", 105 chunk, nth_bit_for_chunk); 106 } 107 } 108 } 109 110 // Verify that a given chunk is correctly accounted for in the bitmap. 111 void OccupancyMap::verify_for_chunk(Metachunk* chunk) { 112 assert(chunk_starts_at_address((MetaWord*) chunk), 113 "No chunk start marked in map for chunk %p.", chunk); 114 // For chunks larger than the minimal chunk size, no other chunk 115 // must start in its area. 116 if (chunk->word_size() > _smallest_chunk_word_size) { 117 assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size, 118 chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map), 119 "No chunk must start within another chunk."); 120 } 121 if (!chunk->is_tagged_free()) { 122 assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()), 123 "Chunk %p is in use but marked as free in map (%d %d).", 124 chunk, chunk->get_chunk_type(), chunk->get_origin()); 125 } else { 126 assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()), 127 "Chunk %p is free but marked as in-use in map (%d %d).", 128 chunk, chunk->get_chunk_type(), chunk->get_origin()); 129 } 130 } 131 132 #endif // ASSERT 133 134 } // namespace metaspace 135 } // namespace internals 136 137