1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 #include "precompiled.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "memory/metaspace.hpp" 30 #include "memory/metaspace/chunkManager.hpp" 31 #include "memory/metaspace/metachunk.hpp" 32 #include "memory/metaspace/metaspaceCommon.hpp" 33 #include "memory/metaspace/virtualSpaceList.hpp" 34 #include "memory/metaspace/virtualSpaceNode.hpp" 35 #include "runtime/orderAccess.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/safepoint.hpp" 38 39 namespace metaspace { 40 41 42 VirtualSpaceList::~VirtualSpaceList() { 43 VirtualSpaceListIterator iter(virtual_space_list()); 44 while (iter.repeat()) { 45 VirtualSpaceNode* vsl = iter.get_next(); 46 delete vsl; 47 } 48 } 49 50 void VirtualSpaceList::inc_reserved_words(size_t v) { 51 assert_lock_strong(MetaspaceExpand_lock); 52 _reserved_words = _reserved_words + v; 53 } 54 void VirtualSpaceList::dec_reserved_words(size_t v) { 55 assert_lock_strong(MetaspaceExpand_lock); 56 _reserved_words = _reserved_words - v; 57 } 58 59 #define assert_committed_below_limit() \ 60 assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \ 61 "Too much committed memory. Committed: " SIZE_FORMAT \ 62 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 63 MetaspaceUtils::committed_bytes(), MaxMetaspaceSize); 64 65 void VirtualSpaceList::inc_committed_words(size_t v) { 66 assert_lock_strong(MetaspaceExpand_lock); 67 _committed_words = _committed_words + v; 68 69 assert_committed_below_limit(); 70 } 71 void VirtualSpaceList::dec_committed_words(size_t v) { 72 assert_lock_strong(MetaspaceExpand_lock); 73 _committed_words = _committed_words - v; 74 75 assert_committed_below_limit(); 76 } 77 78 void VirtualSpaceList::inc_virtual_space_count() { 79 assert_lock_strong(MetaspaceExpand_lock); 80 _virtual_space_count++; 81 } 82 83 void VirtualSpaceList::dec_virtual_space_count() { 84 assert_lock_strong(MetaspaceExpand_lock); 85 _virtual_space_count--; 86 } 87 88 // Walk the list of VirtualSpaceNodes and delete 89 // nodes with a 0 container_count. Remove Metachunks in 90 // the node from their respective freelists. 91 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 92 assert_lock_strong(MetaspaceExpand_lock); 93 // Don't use a VirtualSpaceListIterator because this 94 // list is being changed and a straightforward use of an iterator is not safe. 95 VirtualSpaceNode* purged_vsl = NULL; 96 VirtualSpaceNode* prev_vsl = virtual_space_list(); 97 VirtualSpaceNode* next_vsl = prev_vsl; 98 while (next_vsl != NULL) { 99 VirtualSpaceNode* vsl = next_vsl; 100 DEBUG_ONLY(vsl->verify_container_count();) 101 next_vsl = vsl->next(); 102 // Don't free the current virtual space since it will likely 103 // be needed soon. 104 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 105 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT 106 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs()); 107 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged)); 108 // Unlink it from the list 109 if (prev_vsl == vsl) { 110 // This is the case of the current node being the first node. 111 assert(vsl == virtual_space_list(), "Expected to be the first node"); 112 set_virtual_space_list(vsl->next()); 113 } else { 114 prev_vsl->set_next(vsl->next()); 115 } 116 117 vsl->purge(chunk_manager); 118 dec_reserved_words(vsl->reserved_words()); 119 dec_committed_words(vsl->committed_words()); 120 dec_virtual_space_count(); 121 purged_vsl = vsl; 122 delete vsl; 123 } else { 124 prev_vsl = vsl; 125 } 126 } 127 #ifdef ASSERT 128 if (purged_vsl != NULL) { 129 // List should be stable enough to use an iterator here. 130 VirtualSpaceListIterator iter(virtual_space_list()); 131 while (iter.repeat()) { 132 VirtualSpaceNode* vsl = iter.get_next(); 133 assert(vsl != purged_vsl, "Purge of vsl failed"); 134 } 135 } 136 #endif 137 } 138 139 140 // This function looks at the mmap regions in the metaspace without locking. 141 // The chunks are added with store ordering and not deleted except for at 142 // unloading time during a safepoint. 143 VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) { 144 MutexLockerEx cl(MetaspaceExpand_lock, 145 Mutex::_no_safepoint_check_flag); 146 // List should be stable enough to use an iterator here because removing virtual 147 // space nodes is only allowed at a safepoint. 148 VirtualSpaceListIterator iter(virtual_space_list()); 149 while (iter.repeat()) { 150 VirtualSpaceNode* vsn = iter.get_next(); 151 if (vsn->contains(ptr)) { 152 return vsn; 153 } 154 } 155 return NULL; 156 } 157 158 void VirtualSpaceList::retire_current_virtual_space() { 159 assert_lock_strong(MetaspaceExpand_lock); 160 161 VirtualSpaceNode* vsn = current_virtual_space(); 162 163 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 164 Metaspace::chunk_manager_metadata(); 165 166 vsn->retire(cm); 167 } 168 169 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 170 _virtual_space_list(NULL), 171 _current_virtual_space(NULL), 172 _is_class(false), 173 _reserved_words(0), 174 _committed_words(0), 175 _virtual_space_count(0) { 176 MutexLockerEx cl(MetaspaceExpand_lock, 177 Mutex::_no_safepoint_check_flag); 178 create_new_virtual_space(word_size); 179 } 180 181 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 182 _virtual_space_list(NULL), 183 _current_virtual_space(NULL), 184 _is_class(true), 185 _reserved_words(0), 186 _committed_words(0), 187 _virtual_space_count(0) { 188 MutexLockerEx cl(MetaspaceExpand_lock, 189 Mutex::_no_safepoint_check_flag); 190 VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs); 191 bool succeeded = class_entry->initialize(); 192 if (succeeded) { 193 link_vs(class_entry); 194 } 195 } 196 197 size_t VirtualSpaceList::free_bytes() { 198 return current_virtual_space()->free_words_in_vs() * BytesPerWord; 199 } 200 201 // Allocate another meta virtual space and add it to the list. 202 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 203 assert_lock_strong(MetaspaceExpand_lock); 204 205 if (is_class()) { 206 assert(false, "We currently don't support more than one VirtualSpace for" 207 " the compressed class space. The initialization of the" 208 " CCS uses another code path and should not hit this path."); 209 return false; 210 } 211 212 if (vs_word_size == 0) { 213 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 214 return false; 215 } 216 217 // Reserve the space 218 size_t vs_byte_size = vs_word_size * BytesPerWord; 219 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); 220 221 // Allocate the meta virtual space and initialize it. 222 VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size); 223 if (!new_entry->initialize()) { 224 delete new_entry; 225 return false; 226 } else { 227 assert(new_entry->reserved_words() == vs_word_size, 228 "Reserved memory size differs from requested memory size"); 229 // ensure lock-free iteration sees fully initialized node 230 OrderAccess::storestore(); 231 link_vs(new_entry); 232 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created)); 233 return true; 234 } 235 } 236 237 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 238 if (virtual_space_list() == NULL) { 239 set_virtual_space_list(new_entry); 240 } else { 241 current_virtual_space()->set_next(new_entry); 242 } 243 set_current_virtual_space(new_entry); 244 inc_reserved_words(new_entry->reserved_words()); 245 inc_committed_words(new_entry->committed_words()); 246 inc_virtual_space_count(); 247 #ifdef ASSERT 248 new_entry->mangle(); 249 #endif 250 LogTarget(Trace, gc, metaspace) lt; 251 if (lt.is_enabled()) { 252 LogStream ls(lt); 253 VirtualSpaceNode* vsl = current_virtual_space(); 254 ResourceMark rm; 255 vsl->print_on(&ls); 256 } 257 } 258 259 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 260 size_t min_words, 261 size_t preferred_words) { 262 size_t before = node->committed_words(); 263 264 bool result = node->expand_by(min_words, preferred_words); 265 266 size_t after = node->committed_words(); 267 268 // after and before can be the same if the memory was pre-committed. 269 assert(after >= before, "Inconsistency"); 270 inc_committed_words(after - before); 271 272 return result; 273 } 274 275 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 276 assert_is_aligned(min_words, Metaspace::commit_alignment_words()); 277 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); 278 assert(min_words <= preferred_words, "Invalid arguments"); 279 280 const char* const class_or_not = (is_class() ? "class" : "non-class"); 281 282 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 283 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.", 284 class_or_not); 285 return false; 286 } 287 288 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 289 if (allowed_expansion_words < min_words) { 290 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).", 291 class_or_not); 292 return false; 293 } 294 295 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 296 297 // Commit more memory from the the current virtual space. 298 bool vs_expanded = expand_node_by(current_virtual_space(), 299 min_words, 300 max_expansion_words); 301 if (vs_expanded) { 302 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.", 303 class_or_not); 304 return true; 305 } 306 log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.", 307 class_or_not); 308 retire_current_virtual_space(); 309 310 // Get another virtual space. 311 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 312 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); 313 314 if (create_new_virtual_space(grow_vs_words)) { 315 if (current_virtual_space()->is_pre_committed()) { 316 // The memory was pre-committed, so we are done here. 317 assert(min_words <= current_virtual_space()->committed_words(), 318 "The new VirtualSpace was pre-committed, so it" 319 "should be large enough to fit the alloc request."); 320 return true; 321 } 322 323 return expand_node_by(current_virtual_space(), 324 min_words, 325 max_expansion_words); 326 } 327 328 return false; 329 } 330 331 // Given a chunk, calculate the largest possible padding space which 332 // could be required when allocating it. 333 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) { 334 const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class); 335 if (chunk_type != HumongousIndex) { 336 // Normal, non-humongous chunks are allocated at chunk size 337 // boundaries, so the largest padding space required would be that 338 // minus the smallest chunk size. 339 const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; 340 return chunk_word_size - smallest_chunk_size; 341 } else { 342 // Humongous chunks are allocated at smallest-chunksize 343 // boundaries, so there is no padding required. 344 return 0; 345 } 346 } 347 348 349 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { 350 351 // Allocate a chunk out of the current virtual space. 352 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); 353 354 if (next != NULL) { 355 return next; 356 } 357 358 // The expand amount is currently only determined by the requested sizes 359 // and not how much committed memory is left in the current virtual space. 360 361 // We must have enough space for the requested size and any 362 // additional reqired padding chunks. 363 const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class()); 364 365 size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words()); 366 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); 367 if (min_word_size >= preferred_word_size) { 368 // Can happen when humongous chunks are allocated. 369 preferred_word_size = min_word_size; 370 } 371 372 bool expanded = expand_by(min_word_size, preferred_word_size); 373 if (expanded) { 374 next = current_virtual_space()->get_chunk_vs(chunk_word_size); 375 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 376 } 377 378 return next; 379 } 380 381 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const { 382 st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT, 383 _virtual_space_count, p2i(_current_virtual_space)); 384 VirtualSpaceListIterator iter(virtual_space_list()); 385 while (iter.repeat()) { 386 st->cr(); 387 VirtualSpaceNode* node = iter.get_next(); 388 node->print_on(st, scale); 389 } 390 } 391 392 void VirtualSpaceList::print_map(outputStream* st) const { 393 VirtualSpaceNode* list = virtual_space_list(); 394 VirtualSpaceListIterator iter(list); 395 unsigned i = 0; 396 while (iter.repeat()) { 397 st->print_cr("Node %u:", i); 398 VirtualSpaceNode* node = iter.get_next(); 399 node->print_map(st, this->is_class()); 400 i ++; 401 } 402 } 403 404 } // namespace metaspace 405