1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 #include "precompiled.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "memory/metaspace.hpp" 30 #include "memory/metaspace/chunkManager.hpp" 31 #include "memory/metaspace/metachunk.hpp" 32 #include "memory/metaspace/metaspaceCommon.hpp" 33 #include "memory/metaspace/virtualSpaceList.hpp" 34 #include "memory/metaspace/virtualSpaceNode.hpp" 35 #include "runtime/orderAccess.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/safepoint.hpp" 38 39 namespace metaspace { 40 41 42 VirtualSpaceList::~VirtualSpaceList() { 43 VirtualSpaceListIterator iter(virtual_space_list()); 44 while (iter.repeat()) { 45 VirtualSpaceNode* vsl = iter.get_next(); 46 delete vsl; 47 } 48 } 49 50 void VirtualSpaceList::inc_reserved_words(size_t v) { 51 assert_lock_strong(MetaspaceExpand_lock); 52 _reserved_words = _reserved_words + v; 53 } 54 void VirtualSpaceList::dec_reserved_words(size_t v) { 55 assert_lock_strong(MetaspaceExpand_lock); 56 _reserved_words = _reserved_words - v; 57 } 58 59 #define assert_committed_below_limit() \ 60 assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \ 61 "Too much committed memory. Committed: " SIZE_FORMAT \ 62 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 63 MetaspaceUtils::committed_bytes(), MaxMetaspaceSize); 64 65 void VirtualSpaceList::inc_committed_words(size_t v) { 66 assert_lock_strong(MetaspaceExpand_lock); 67 _committed_words = _committed_words + v; 68 69 assert_committed_below_limit(); 70 } 71 void VirtualSpaceList::dec_committed_words(size_t v) { 72 assert_lock_strong(MetaspaceExpand_lock); 73 _committed_words = _committed_words - v; 74 75 assert_committed_below_limit(); 76 } 77 78 void VirtualSpaceList::inc_virtual_space_count() { 79 assert_lock_strong(MetaspaceExpand_lock); 80 _virtual_space_count++; 81 } 82 83 void VirtualSpaceList::dec_virtual_space_count() { 84 assert_lock_strong(MetaspaceExpand_lock); 85 _virtual_space_count--; 86 } 87 88 // Walk the list of VirtualSpaceNodes and delete 89 // nodes with a 0 container_count. Remove Metachunks in 90 // the node from their respective freelists. 91 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 92 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 93 assert_lock_strong(MetaspaceExpand_lock); 94 // Don't use a VirtualSpaceListIterator because this 95 // list is being changed and a straightforward use of an iterator is not safe. 96 VirtualSpaceNode* purged_vsl = NULL; 97 VirtualSpaceNode* prev_vsl = virtual_space_list(); 98 VirtualSpaceNode* next_vsl = prev_vsl; 99 while (next_vsl != NULL) { 100 VirtualSpaceNode* vsl = next_vsl; 101 DEBUG_ONLY(vsl->verify_container_count();) 102 next_vsl = vsl->next(); 103 // Don't free the current virtual space since it will likely 104 // be needed soon. 105 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 106 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT 107 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs()); 108 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged)); 109 // Unlink it from the list 110 if (prev_vsl == vsl) { 111 // This is the case of the current node being the first node. 112 assert(vsl == virtual_space_list(), "Expected to be the first node"); 113 set_virtual_space_list(vsl->next()); 114 } else { 115 prev_vsl->set_next(vsl->next()); 116 } 117 118 vsl->purge(chunk_manager); 119 dec_reserved_words(vsl->reserved_words()); 120 dec_committed_words(vsl->committed_words()); 121 dec_virtual_space_count(); 122 purged_vsl = vsl; 123 delete vsl; 124 } else { 125 prev_vsl = vsl; 126 } 127 } 128 #ifdef ASSERT 129 if (purged_vsl != NULL) { 130 // List should be stable enough to use an iterator here. 131 VirtualSpaceListIterator iter(virtual_space_list()); 132 while (iter.repeat()) { 133 VirtualSpaceNode* vsl = iter.get_next(); 134 assert(vsl != purged_vsl, "Purge of vsl failed"); 135 } 136 } 137 #endif 138 } 139 140 141 // This function looks at the mmap regions in the metaspace without locking. 142 // The chunks are added with store ordering and not deleted except for at 143 // unloading time during a safepoint. 144 VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) { 145 // List should be stable enough to use an iterator here because removing virtual 146 // space nodes is only allowed at a safepoint. 147 VirtualSpaceListIterator iter(virtual_space_list()); 148 while (iter.repeat()) { 149 VirtualSpaceNode* vsn = iter.get_next(); 150 if (vsn->contains(ptr)) { 151 return vsn; 152 } 153 } 154 return NULL; 155 } 156 157 void VirtualSpaceList::retire_current_virtual_space() { 158 assert_lock_strong(MetaspaceExpand_lock); 159 160 VirtualSpaceNode* vsn = current_virtual_space(); 161 162 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 163 Metaspace::chunk_manager_metadata(); 164 165 vsn->retire(cm); 166 } 167 168 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 169 _virtual_space_list(NULL), 170 _current_virtual_space(NULL), 171 _is_class(false), 172 _reserved_words(0), 173 _committed_words(0), 174 _virtual_space_count(0) { 175 MutexLockerEx cl(MetaspaceExpand_lock, 176 Mutex::_no_safepoint_check_flag); 177 create_new_virtual_space(word_size); 178 } 179 180 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 181 _virtual_space_list(NULL), 182 _current_virtual_space(NULL), 183 _is_class(true), 184 _reserved_words(0), 185 _committed_words(0), 186 _virtual_space_count(0) { 187 MutexLockerEx cl(MetaspaceExpand_lock, 188 Mutex::_no_safepoint_check_flag); 189 VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs); 190 bool succeeded = class_entry->initialize(); 191 if (succeeded) { 192 link_vs(class_entry); 193 } 194 } 195 196 size_t VirtualSpaceList::free_bytes() { 197 return current_virtual_space()->free_words_in_vs() * BytesPerWord; 198 } 199 200 // Allocate another meta virtual space and add it to the list. 201 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 202 assert_lock_strong(MetaspaceExpand_lock); 203 204 if (is_class()) { 205 assert(false, "We currently don't support more than one VirtualSpace for" 206 " the compressed class space. The initialization of the" 207 " CCS uses another code path and should not hit this path."); 208 return false; 209 } 210 211 if (vs_word_size == 0) { 212 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 213 return false; 214 } 215 216 // Reserve the space 217 size_t vs_byte_size = vs_word_size * BytesPerWord; 218 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); 219 220 // Allocate the meta virtual space and initialize it. 221 VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size); 222 if (!new_entry->initialize()) { 223 delete new_entry; 224 return false; 225 } else { 226 assert(new_entry->reserved_words() == vs_word_size, 227 "Reserved memory size differs from requested memory size"); 228 // ensure lock-free iteration sees fully initialized node 229 OrderAccess::storestore(); 230 link_vs(new_entry); 231 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created)); 232 return true; 233 } 234 } 235 236 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 237 if (virtual_space_list() == NULL) { 238 set_virtual_space_list(new_entry); 239 } else { 240 current_virtual_space()->set_next(new_entry); 241 } 242 set_current_virtual_space(new_entry); 243 inc_reserved_words(new_entry->reserved_words()); 244 inc_committed_words(new_entry->committed_words()); 245 inc_virtual_space_count(); 246 #ifdef ASSERT 247 new_entry->mangle(); 248 #endif 249 LogTarget(Trace, gc, metaspace) lt; 250 if (lt.is_enabled()) { 251 LogStream ls(lt); 252 VirtualSpaceNode* vsl = current_virtual_space(); 253 ResourceMark rm; 254 vsl->print_on(&ls); 255 } 256 } 257 258 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 259 size_t min_words, 260 size_t preferred_words) { 261 size_t before = node->committed_words(); 262 263 bool result = node->expand_by(min_words, preferred_words); 264 265 size_t after = node->committed_words(); 266 267 // after and before can be the same if the memory was pre-committed. 268 assert(after >= before, "Inconsistency"); 269 inc_committed_words(after - before); 270 271 return result; 272 } 273 274 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 275 assert_is_aligned(min_words, Metaspace::commit_alignment_words()); 276 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); 277 assert(min_words <= preferred_words, "Invalid arguments"); 278 279 const char* const class_or_not = (is_class() ? "class" : "non-class"); 280 281 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 282 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.", 283 class_or_not); 284 return false; 285 } 286 287 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 288 if (allowed_expansion_words < min_words) { 289 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).", 290 class_or_not); 291 return false; 292 } 293 294 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 295 296 // Commit more memory from the the current virtual space. 297 bool vs_expanded = expand_node_by(current_virtual_space(), 298 min_words, 299 max_expansion_words); 300 if (vs_expanded) { 301 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.", 302 class_or_not); 303 return true; 304 } 305 log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.", 306 class_or_not); 307 retire_current_virtual_space(); 308 309 // Get another virtual space. 310 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 311 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); 312 313 if (create_new_virtual_space(grow_vs_words)) { 314 if (current_virtual_space()->is_pre_committed()) { 315 // The memory was pre-committed, so we are done here. 316 assert(min_words <= current_virtual_space()->committed_words(), 317 "The new VirtualSpace was pre-committed, so it" 318 "should be large enough to fit the alloc request."); 319 return true; 320 } 321 322 return expand_node_by(current_virtual_space(), 323 min_words, 324 max_expansion_words); 325 } 326 327 return false; 328 } 329 330 // Given a chunk, calculate the largest possible padding space which 331 // could be required when allocating it. 332 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) { 333 const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class); 334 if (chunk_type != HumongousIndex) { 335 // Normal, non-humongous chunks are allocated at chunk size 336 // boundaries, so the largest padding space required would be that 337 // minus the smallest chunk size. 338 const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; 339 return chunk_word_size - smallest_chunk_size; 340 } else { 341 // Humongous chunks are allocated at smallest-chunksize 342 // boundaries, so there is no padding required. 343 return 0; 344 } 345 } 346 347 348 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { 349 350 // Allocate a chunk out of the current virtual space. 351 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); 352 353 if (next != NULL) { 354 return next; 355 } 356 357 // The expand amount is currently only determined by the requested sizes 358 // and not how much committed memory is left in the current virtual space. 359 360 // We must have enough space for the requested size and any 361 // additional reqired padding chunks. 362 const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class()); 363 364 size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words()); 365 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); 366 if (min_word_size >= preferred_word_size) { 367 // Can happen when humongous chunks are allocated. 368 preferred_word_size = min_word_size; 369 } 370 371 bool expanded = expand_by(min_word_size, preferred_word_size); 372 if (expanded) { 373 next = current_virtual_space()->get_chunk_vs(chunk_word_size); 374 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 375 } 376 377 return next; 378 } 379 380 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const { 381 st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT, 382 _virtual_space_count, p2i(_current_virtual_space)); 383 VirtualSpaceListIterator iter(virtual_space_list()); 384 while (iter.repeat()) { 385 st->cr(); 386 VirtualSpaceNode* node = iter.get_next(); 387 node->print_on(st, scale); 388 } 389 } 390 391 void VirtualSpaceList::print_map(outputStream* st) const { 392 VirtualSpaceNode* list = virtual_space_list(); 393 VirtualSpaceListIterator iter(list); 394 unsigned i = 0; 395 while (iter.repeat()) { 396 st->print_cr("Node %u:", i); 397 VirtualSpaceNode* node = iter.get_next(); 398 node->print_map(st, this->is_class()); 399 i ++; 400 } 401 } 402 403 } // namespace metaspace 404