1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/codeHeapState.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/dependencies.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/nmethod.hpp" 34 #include "code/pcDesc.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "jfr/jfrEvents.hpp" 37 #include "logging/log.hpp" 38 #include "logging/logStream.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/iterator.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "oops/method.inline.hpp" 43 #include "oops/objArrayOop.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "oops/verifyOopClosure.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/compilationPolicy.hpp" 48 #include "runtime/deoptimization.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/icache.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "runtime/safepointVerifiers.hpp" 54 #include "runtime/sweeper.hpp" 55 #include "runtime/vmThread.hpp" 56 #include "services/memoryService.hpp" 57 #include "utilities/align.hpp" 58 #include "utilities/vmError.hpp" 59 #include "utilities/xmlstream.hpp" 60 #ifdef COMPILER1 61 #include "c1/c1_Compilation.hpp" 62 #include "c1/c1_Compiler.hpp" 63 #endif 64 #ifdef COMPILER2 65 #include "opto/c2compiler.hpp" 66 #include "opto/compile.hpp" 67 #include "opto/node.hpp" 68 #endif 69 70 // Helper class for printing in CodeCache 71 class CodeBlob_sizes { 72 private: 73 int count; 74 int total_size; 75 int header_size; 76 int code_size; 77 int stub_size; 78 int relocation_size; 79 int scopes_oop_size; 80 int scopes_metadata_size; 81 int scopes_data_size; 82 int scopes_pcs_size; 83 84 public: 85 CodeBlob_sizes() { 86 count = 0; 87 total_size = 0; 88 header_size = 0; 89 code_size = 0; 90 stub_size = 0; 91 relocation_size = 0; 92 scopes_oop_size = 0; 93 scopes_metadata_size = 0; 94 scopes_data_size = 0; 95 scopes_pcs_size = 0; 96 } 97 98 int total() { return total_size; } 99 bool is_empty() { return count == 0; } 100 101 void print(const char* title) { 102 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 103 count, 104 title, 105 (int)(total() / K), 106 header_size * 100 / total_size, 107 relocation_size * 100 / total_size, 108 code_size * 100 / total_size, 109 stub_size * 100 / total_size, 110 scopes_oop_size * 100 / total_size, 111 scopes_metadata_size * 100 / total_size, 112 scopes_data_size * 100 / total_size, 113 scopes_pcs_size * 100 / total_size); 114 } 115 116 void add(CodeBlob* cb) { 117 count++; 118 total_size += cb->size(); 119 header_size += cb->header_size(); 120 relocation_size += cb->relocation_size(); 121 if (cb->is_nmethod()) { 122 nmethod* nm = cb->as_nmethod_or_null(); 123 code_size += nm->insts_size(); 124 stub_size += nm->stub_size(); 125 126 scopes_oop_size += nm->oops_size(); 127 scopes_metadata_size += nm->metadata_size(); 128 scopes_data_size += nm->scopes_data_size(); 129 scopes_pcs_size += nm->scopes_pcs_size(); 130 } else { 131 code_size += cb->code_size(); 132 } 133 } 134 }; 135 136 // Iterate over all CodeHeaps 137 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 138 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 139 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 140 141 // Iterate over all CodeBlobs (cb) on the given CodeHeap 142 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 143 144 address CodeCache::_low_bound = 0; 145 address CodeCache::_high_bound = 0; 146 int CodeCache::_number_of_nmethods_with_dependencies = 0; 147 bool CodeCache::_needs_cache_clean = false; 148 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 149 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL; 150 151 // Initialize arrays of CodeHeap subsets 152 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 153 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 154 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 155 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 156 157 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 158 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 159 // Prepare error message 160 const char* error = "Invalid code heap sizes"; 161 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 162 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 163 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 164 165 if (total_size > cache_size) { 166 // Some code heap sizes were explicitly set: total_size must be <= cache_size 167 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 168 vm_exit_during_initialization(error, message); 169 } else if (all_set && total_size != cache_size) { 170 // All code heap sizes were explicitly set: total_size must equal cache_size 171 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 172 vm_exit_during_initialization(error, message); 173 } 174 } 175 176 void CodeCache::initialize_heaps() { 177 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 178 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 179 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 180 size_t min_size = os::vm_page_size(); 181 size_t cache_size = ReservedCodeCacheSize; 182 size_t non_nmethod_size = NonNMethodCodeHeapSize; 183 size_t profiled_size = ProfiledCodeHeapSize; 184 size_t non_profiled_size = NonProfiledCodeHeapSize; 185 // Check if total size set via command line flags exceeds the reserved size 186 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 187 (profiled_set ? profiled_size : min_size), 188 (non_profiled_set ? non_profiled_size : min_size), 189 cache_size, 190 non_nmethod_set && profiled_set && non_profiled_set); 191 192 // Determine size of compiler buffers 193 size_t code_buffers_size = 0; 194 #ifdef COMPILER1 195 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 196 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 197 code_buffers_size += c1_count * Compiler::code_buffer_size(); 198 #endif 199 #ifdef COMPILER2 200 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 201 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 202 // Initial size of constant table (this may be increased if a compiled method needs more space) 203 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 204 #endif 205 206 // Increase default non_nmethod_size to account for compiler buffers 207 if (!non_nmethod_set) { 208 non_nmethod_size += code_buffers_size; 209 } 210 // Calculate default CodeHeap sizes if not set by user 211 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 212 // Check if we have enough space for the non-nmethod code heap 213 if (cache_size > non_nmethod_size) { 214 // Use the default value for non_nmethod_size and one half of the 215 // remaining size for non-profiled and one half for profiled methods 216 size_t remaining_size = cache_size - non_nmethod_size; 217 profiled_size = remaining_size / 2; 218 non_profiled_size = remaining_size - profiled_size; 219 } else { 220 // Use all space for the non-nmethod heap and set other heaps to minimal size 221 non_nmethod_size = cache_size - 2 * min_size; 222 profiled_size = min_size; 223 non_profiled_size = min_size; 224 } 225 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 226 // The user explicitly set some code heap sizes. Increase or decrease the (default) 227 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 228 // code heap sizes and then only change non-nmethod code heap size if still necessary. 229 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 230 if (non_profiled_set) { 231 if (!profiled_set) { 232 // Adapt size of profiled code heap 233 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 234 // Not enough space available, set to minimum size 235 diff_size += profiled_size - min_size; 236 profiled_size = min_size; 237 } else { 238 profiled_size += diff_size; 239 diff_size = 0; 240 } 241 } 242 } else if (profiled_set) { 243 // Adapt size of non-profiled code heap 244 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 245 // Not enough space available, set to minimum size 246 diff_size += non_profiled_size - min_size; 247 non_profiled_size = min_size; 248 } else { 249 non_profiled_size += diff_size; 250 diff_size = 0; 251 } 252 } else if (non_nmethod_set) { 253 // Distribute remaining size between profiled and non-profiled code heaps 254 diff_size = cache_size - non_nmethod_size; 255 profiled_size = diff_size / 2; 256 non_profiled_size = diff_size - profiled_size; 257 diff_size = 0; 258 } 259 if (diff_size != 0) { 260 // Use non-nmethod code heap for remaining space requirements 261 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 262 non_nmethod_size += diff_size; 263 } 264 } 265 266 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 267 if (!heap_available(CodeBlobType::MethodProfiled)) { 268 non_profiled_size += profiled_size; 269 profiled_size = 0; 270 } 271 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 272 if (!heap_available(CodeBlobType::MethodNonProfiled)) { 273 non_nmethod_size += non_profiled_size; 274 non_profiled_size = 0; 275 } 276 // Make sure we have enough space for VM internal code 277 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 278 if (non_nmethod_size < min_code_cache_size) { 279 vm_exit_during_initialization(err_msg( 280 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 281 non_nmethod_size/K, min_code_cache_size/K)); 282 } 283 284 // Verify sizes and update flag values 285 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 286 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 287 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 288 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 289 290 // If large page support is enabled, align code heaps according to large 291 // page size to make sure that code cache is covered by large pages. 292 const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity()); 293 non_nmethod_size = align_up(non_nmethod_size, alignment); 294 profiled_size = align_down(profiled_size, alignment); 295 296 // Reserve one continuous chunk of memory for CodeHeaps and split it into 297 // parts for the individual heaps. The memory layout looks like this: 298 // ---------- high ----------- 299 // Non-profiled nmethods 300 // Profiled nmethods 301 // Non-nmethods 302 // ---------- low ------------ 303 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 304 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 305 ReservedSpace rest = rs.last_part(non_nmethod_size); 306 ReservedSpace profiled_space = rest.first_part(profiled_size); 307 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 308 309 // Non-nmethods (stubs, adapters, ...) 310 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 311 // Tier 2 and tier 3 (profiled) methods 312 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 313 // Tier 1 and tier 4 (non-profiled) methods and native methods 314 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 315 } 316 317 size_t CodeCache::page_size(bool aligned) { 318 if (os::can_execute_large_page_memory()) { 319 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) : 320 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8); 321 } else { 322 return os::vm_page_size(); 323 } 324 } 325 326 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 327 // Align and reserve space for code cache 328 const size_t rs_ps = page_size(); 329 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); 330 const size_t rs_size = align_up(size, rs_align); 331 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); 332 if (!rs.is_reserved()) { 333 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 334 rs_size/K)); 335 } 336 337 // Initialize bounds 338 _low_bound = (address)rs.base(); 339 _high_bound = _low_bound + rs.size(); 340 return rs; 341 } 342 343 // Heaps available for allocation 344 bool CodeCache::heap_available(int code_blob_type) { 345 if (!SegmentedCodeCache) { 346 // No segmentation: use a single code heap 347 return (code_blob_type == CodeBlobType::All); 348 } else if (Arguments::is_interpreter_only()) { 349 // Interpreter only: we don't need any method code heaps 350 return (code_blob_type == CodeBlobType::NonNMethod); 351 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 352 // Tiered compilation: use all code heaps 353 return (code_blob_type < CodeBlobType::All); 354 } else { 355 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 356 return (code_blob_type == CodeBlobType::NonNMethod) || 357 (code_blob_type == CodeBlobType::MethodNonProfiled); 358 } 359 } 360 361 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 362 switch(code_blob_type) { 363 case CodeBlobType::NonNMethod: 364 return "NonNMethodCodeHeapSize"; 365 break; 366 case CodeBlobType::MethodNonProfiled: 367 return "NonProfiledCodeHeapSize"; 368 break; 369 case CodeBlobType::MethodProfiled: 370 return "ProfiledCodeHeapSize"; 371 break; 372 } 373 ShouldNotReachHere(); 374 return NULL; 375 } 376 377 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 378 if (lhs->code_blob_type() == rhs->code_blob_type()) { 379 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 380 } else { 381 return lhs->code_blob_type() - rhs->code_blob_type(); 382 } 383 } 384 385 void CodeCache::add_heap(CodeHeap* heap) { 386 assert(!Universe::is_fully_initialized(), "late heap addition?"); 387 388 _heaps->insert_sorted<code_heap_compare>(heap); 389 390 int type = heap->code_blob_type(); 391 if (code_blob_type_accepts_compiled(type)) { 392 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 393 } 394 if (code_blob_type_accepts_nmethod(type)) { 395 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 396 } 397 if (code_blob_type_accepts_allocable(type)) { 398 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 399 } 400 } 401 402 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 403 // Check if heap is needed 404 if (!heap_available(code_blob_type)) { 405 return; 406 } 407 408 // Create CodeHeap 409 CodeHeap* heap = new CodeHeap(name, code_blob_type); 410 add_heap(heap); 411 412 // Reserve Space 413 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 414 size_initial = align_up(size_initial, os::vm_page_size()); 415 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 416 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 417 heap->name(), size_initial/K)); 418 } 419 420 // Register the CodeHeap 421 MemoryService::add_code_heap_memory_pool(heap, name); 422 } 423 424 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 425 FOR_ALL_HEAPS(heap) { 426 if ((*heap)->contains(start)) { 427 return *heap; 428 } 429 } 430 return NULL; 431 } 432 433 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 434 assert(cb != NULL, "CodeBlob is null"); 435 FOR_ALL_HEAPS(heap) { 436 if ((*heap)->contains_blob(cb)) { 437 return *heap; 438 } 439 } 440 ShouldNotReachHere(); 441 return NULL; 442 } 443 444 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 445 FOR_ALL_HEAPS(heap) { 446 if ((*heap)->accepts(code_blob_type)) { 447 return *heap; 448 } 449 } 450 return NULL; 451 } 452 453 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 454 assert_locked_or_safepoint(CodeCache_lock); 455 assert(heap != NULL, "heap is null"); 456 return (CodeBlob*)heap->first(); 457 } 458 459 CodeBlob* CodeCache::first_blob(int code_blob_type) { 460 if (heap_available(code_blob_type)) { 461 return first_blob(get_code_heap(code_blob_type)); 462 } else { 463 return NULL; 464 } 465 } 466 467 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 468 assert_locked_or_safepoint(CodeCache_lock); 469 assert(heap != NULL, "heap is null"); 470 return (CodeBlob*)heap->next(cb); 471 } 472 473 /** 474 * Do not seize the CodeCache lock here--if the caller has not 475 * already done so, we are going to lose bigtime, since the code 476 * cache will contain a garbage CodeBlob until the caller can 477 * run the constructor for the CodeBlob subclass he is busy 478 * instantiating. 479 */ 480 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 481 // Possibly wakes up the sweeper thread. 482 NMethodSweeper::notify(code_blob_type); 483 assert_locked_or_safepoint(CodeCache_lock); 484 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 485 if (size <= 0) { 486 return NULL; 487 } 488 CodeBlob* cb = NULL; 489 490 // Get CodeHeap for the given CodeBlobType 491 CodeHeap* heap = get_code_heap(code_blob_type); 492 assert(heap != NULL, "heap is null"); 493 494 while (true) { 495 cb = (CodeBlob*)heap->allocate(size); 496 if (cb != NULL) break; 497 if (!heap->expand_by(CodeCacheExpansionSize)) { 498 // Save original type for error reporting 499 if (orig_code_blob_type == CodeBlobType::All) { 500 orig_code_blob_type = code_blob_type; 501 } 502 // Expansion failed 503 if (SegmentedCodeCache) { 504 // Fallback solution: Try to store code in another code heap. 505 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 506 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 507 // and force stack scanning if less than 10% of the code heap are free. 508 int type = code_blob_type; 509 switch (type) { 510 case CodeBlobType::NonNMethod: 511 type = CodeBlobType::MethodNonProfiled; 512 break; 513 case CodeBlobType::MethodNonProfiled: 514 type = CodeBlobType::MethodProfiled; 515 break; 516 case CodeBlobType::MethodProfiled: 517 // Avoid loop if we already tried that code heap 518 if (type == orig_code_blob_type) { 519 type = CodeBlobType::MethodNonProfiled; 520 } 521 break; 522 } 523 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 524 if (PrintCodeCacheExtension) { 525 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 526 heap->name(), get_code_heap(type)->name()); 527 } 528 return allocate(size, type, orig_code_blob_type); 529 } 530 } 531 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 532 CompileBroker::handle_full_code_cache(orig_code_blob_type); 533 return NULL; 534 } 535 if (PrintCodeCacheExtension) { 536 ResourceMark rm; 537 if (_nmethod_heaps->length() >= 1) { 538 tty->print("%s", heap->name()); 539 } else { 540 tty->print("CodeCache"); 541 } 542 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 543 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 544 (address)heap->high() - (address)heap->low_boundary()); 545 } 546 } 547 print_trace("allocation", cb, size); 548 return cb; 549 } 550 551 void CodeCache::free(CodeBlob* cb) { 552 assert_locked_or_safepoint(CodeCache_lock); 553 CodeHeap* heap = get_code_heap(cb); 554 print_trace("free", cb); 555 if (cb->is_nmethod()) { 556 heap->set_nmethod_count(heap->nmethod_count() - 1); 557 if (((nmethod *)cb)->has_dependencies()) { 558 _number_of_nmethods_with_dependencies--; 559 } 560 } 561 if (cb->is_adapter_blob()) { 562 heap->set_adapter_count(heap->adapter_count() - 1); 563 } 564 565 // Get heap for given CodeBlob and deallocate 566 get_code_heap(cb)->deallocate(cb); 567 568 assert(heap->blob_count() >= 0, "sanity check"); 569 } 570 571 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 572 assert_locked_or_safepoint(CodeCache_lock); 573 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 574 print_trace("free_unused_tail", cb); 575 576 // We also have to account for the extra space (i.e. header) used by the CodeBlob 577 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 578 used += CodeBlob::align_code_offset(cb->header_size()); 579 580 // Get heap for given CodeBlob and deallocate its unused tail 581 get_code_heap(cb)->deallocate_tail(cb, used); 582 // Adjust the sizes of the CodeBlob 583 cb->adjust_size(used); 584 } 585 586 void CodeCache::commit(CodeBlob* cb) { 587 // this is called by nmethod::nmethod, which must already own CodeCache_lock 588 assert_locked_or_safepoint(CodeCache_lock); 589 CodeHeap* heap = get_code_heap(cb); 590 if (cb->is_nmethod()) { 591 heap->set_nmethod_count(heap->nmethod_count() + 1); 592 if (((nmethod *)cb)->has_dependencies()) { 593 _number_of_nmethods_with_dependencies++; 594 } 595 } 596 if (cb->is_adapter_blob()) { 597 heap->set_adapter_count(heap->adapter_count() + 1); 598 } 599 600 // flush the hardware I-cache 601 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 602 } 603 604 bool CodeCache::contains(void *p) { 605 // S390 uses contains() in current_frame(), which is used before 606 // code cache initialization if NativeMemoryTracking=detail is set. 607 S390_ONLY(if (_heaps == NULL) return false;) 608 // It should be ok to call contains without holding a lock. 609 FOR_ALL_HEAPS(heap) { 610 if ((*heap)->contains(p)) { 611 return true; 612 } 613 } 614 return false; 615 } 616 617 bool CodeCache::contains(nmethod *nm) { 618 return contains((void *)nm); 619 } 620 621 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 622 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 623 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 624 CodeBlob* CodeCache::find_blob(void* start) { 625 CodeBlob* result = find_blob_unsafe(start); 626 // We could potentially look up non_entrant methods 627 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 628 return result; 629 } 630 631 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 632 // what you are doing) 633 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 634 // NMT can walk the stack before code cache is created 635 if (_heaps != NULL) { 636 CodeHeap* heap = get_code_heap_containing(start); 637 if (heap != NULL) { 638 return heap->find_blob_unsafe(start); 639 } 640 } 641 return NULL; 642 } 643 644 nmethod* CodeCache::find_nmethod(void* start) { 645 CodeBlob* cb = find_blob(start); 646 assert(cb->is_nmethod(), "did not find an nmethod"); 647 return (nmethod*)cb; 648 } 649 650 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 651 assert_locked_or_safepoint(CodeCache_lock); 652 FOR_ALL_HEAPS(heap) { 653 FOR_ALL_BLOBS(cb, *heap) { 654 f(cb); 655 } 656 } 657 } 658 659 void CodeCache::nmethods_do(void f(nmethod* nm)) { 660 assert_locked_or_safepoint(CodeCache_lock); 661 NMethodIterator iter; 662 while(iter.next()) { 663 f(iter.method()); 664 } 665 } 666 667 void CodeCache::metadata_do(void f(Metadata* m)) { 668 assert_locked_or_safepoint(CodeCache_lock); 669 NMethodIterator iter; 670 while(iter.next_alive()) { 671 iter.method()->metadata_do(f); 672 } 673 AOTLoader::metadata_do(f); 674 } 675 676 int CodeCache::alignment_unit() { 677 return (int)_heaps->first()->alignment_unit(); 678 } 679 680 int CodeCache::alignment_offset() { 681 return (int)_heaps->first()->alignment_offset(); 682 } 683 684 // Mark nmethods for unloading if they contain otherwise unreachable oops. 685 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 686 assert_locked_or_safepoint(CodeCache_lock); 687 CompiledMethodIterator iter; 688 while(iter.next_alive()) { 689 iter.method()->do_unloading(is_alive); 690 } 691 692 // Now that all the unloaded nmethods are known, cleanup caches 693 // before CLDG is purged. 694 // This is another code cache walk but it is moved from gc_epilogue. 695 // G1 does a parallel walk of the nmethods so cleans them up 696 // as it goes and doesn't call this. 697 do_unloading_nmethod_caches(unloading_occurred); 698 } 699 700 void CodeCache::blobs_do(CodeBlobClosure* f) { 701 assert_locked_or_safepoint(CodeCache_lock); 702 FOR_ALL_ALLOCABLE_HEAPS(heap) { 703 FOR_ALL_BLOBS(cb, *heap) { 704 if (cb->is_alive()) { 705 f->do_code_blob(cb); 706 #ifdef ASSERT 707 if (cb->is_nmethod()) { 708 Universe::heap()->verify_nmethod((nmethod*)cb); 709 } 710 #endif //ASSERT 711 } 712 } 713 } 714 } 715 716 // Walk the list of methods which might contain oops to the java heap. 717 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 718 assert_locked_or_safepoint(CodeCache_lock); 719 720 const bool fix_relocations = f->fix_relocations(); 721 debug_only(mark_scavenge_root_nmethods()); 722 723 nmethod* prev = NULL; 724 nmethod* cur = scavenge_root_nmethods(); 725 while (cur != NULL) { 726 debug_only(cur->clear_scavenge_root_marked()); 727 assert(cur->scavenge_root_not_marked(), ""); 728 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 729 730 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 731 LogTarget(Trace, gc, nmethod) lt; 732 if (lt.is_enabled()) { 733 LogStream ls(lt); 734 CompileTask::print(&ls, cur, 735 is_live ? "scavenge root " : "dead scavenge root", /*short_form:*/ true); 736 } 737 if (is_live) { 738 // Perform cur->oops_do(f), maybe just once per nmethod. 739 f->do_code_blob(cur); 740 } 741 nmethod* const next = cur->scavenge_root_link(); 742 // The scavengable nmethod list must contain all methods with scavengable 743 // oops. It is safe to include more nmethod on the list, but we do not 744 // expect any live non-scavengable nmethods on the list. 745 if (fix_relocations) { 746 if (!is_live || !cur->detect_scavenge_root_oops()) { 747 unlink_scavenge_root_nmethod(cur, prev); 748 } else { 749 prev = cur; 750 } 751 } 752 cur = next; 753 } 754 755 // Check for stray marks. 756 debug_only(verify_perm_nmethods(NULL)); 757 } 758 759 void CodeCache::register_scavenge_root_nmethod(nmethod* nm) { 760 assert_locked_or_safepoint(CodeCache_lock); 761 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) { 762 add_scavenge_root_nmethod(nm); 763 } 764 } 765 766 void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) { 767 nm->verify_scavenge_root_oops(); 768 } 769 770 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 771 assert_locked_or_safepoint(CodeCache_lock); 772 773 nm->set_on_scavenge_root_list(); 774 nm->set_scavenge_root_link(_scavenge_root_nmethods); 775 set_scavenge_root_nmethods(nm); 776 print_trace("add_scavenge_root", nm); 777 } 778 779 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 780 assert_locked_or_safepoint(CodeCache_lock); 781 782 assert((prev == NULL && scavenge_root_nmethods() == nm) || 783 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 784 785 print_trace("unlink_scavenge_root", nm); 786 if (prev == NULL) { 787 set_scavenge_root_nmethods(nm->scavenge_root_link()); 788 } else { 789 prev->set_scavenge_root_link(nm->scavenge_root_link()); 790 } 791 nm->set_scavenge_root_link(NULL); 792 nm->clear_on_scavenge_root_list(); 793 } 794 795 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 796 assert_locked_or_safepoint(CodeCache_lock); 797 798 print_trace("drop_scavenge_root", nm); 799 nmethod* prev = NULL; 800 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 801 if (cur == nm) { 802 unlink_scavenge_root_nmethod(cur, prev); 803 return; 804 } 805 prev = cur; 806 } 807 assert(false, "should have been on list"); 808 } 809 810 void CodeCache::prune_scavenge_root_nmethods() { 811 assert_locked_or_safepoint(CodeCache_lock); 812 813 debug_only(mark_scavenge_root_nmethods()); 814 815 nmethod* last = NULL; 816 nmethod* cur = scavenge_root_nmethods(); 817 while (cur != NULL) { 818 nmethod* next = cur->scavenge_root_link(); 819 debug_only(cur->clear_scavenge_root_marked()); 820 assert(cur->scavenge_root_not_marked(), ""); 821 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 822 823 if (!cur->is_zombie() && !cur->is_unloaded() 824 && cur->detect_scavenge_root_oops()) { 825 // Keep it. Advance 'last' to prevent deletion. 826 last = cur; 827 } else { 828 // Prune it from the list, so we don't have to look at it any more. 829 print_trace("prune_scavenge_root", cur); 830 unlink_scavenge_root_nmethod(cur, last); 831 } 832 cur = next; 833 } 834 835 // Check for stray marks. 836 debug_only(verify_perm_nmethods(NULL)); 837 } 838 839 #ifndef PRODUCT 840 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 841 // While we are here, verify the integrity of the list. 842 mark_scavenge_root_nmethods(); 843 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 844 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 845 cur->clear_scavenge_root_marked(); 846 } 847 verify_perm_nmethods(f); 848 } 849 850 // Temporarily mark nmethods that are claimed to be on the scavenge list. 851 void CodeCache::mark_scavenge_root_nmethods() { 852 NMethodIterator iter; 853 while(iter.next_alive()) { 854 nmethod* nm = iter.method(); 855 assert(nm->scavenge_root_not_marked(), "clean state"); 856 if (nm->on_scavenge_root_list()) 857 nm->set_scavenge_root_marked(); 858 } 859 } 860 861 // If the closure is given, run it on the unlisted nmethods. 862 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 863 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 864 NMethodIterator iter; 865 while(iter.next_alive()) { 866 nmethod* nm = iter.method(); 867 bool call_f = (f_or_null != NULL); 868 assert(nm->scavenge_root_not_marked(), "must be already processed"); 869 if (nm->on_scavenge_root_list()) 870 call_f = false; // don't show this one to the client 871 Universe::heap()->verify_nmethod(nm); 872 if (call_f) f_or_null->do_code_blob(nm); 873 } 874 } 875 #endif //PRODUCT 876 877 void CodeCache::verify_clean_inline_caches() { 878 #ifdef ASSERT 879 NMethodIterator iter; 880 while(iter.next_alive()) { 881 nmethod* nm = iter.method(); 882 assert(!nm->is_unloaded(), "Tautology"); 883 nm->verify_clean_inline_caches(); 884 nm->verify(); 885 } 886 #endif 887 } 888 889 void CodeCache::verify_icholder_relocations() { 890 #ifdef ASSERT 891 // make sure that we aren't leaking icholders 892 int count = 0; 893 FOR_ALL_HEAPS(heap) { 894 FOR_ALL_BLOBS(cb, *heap) { 895 CompiledMethod *nm = cb->as_compiled_method_or_null(); 896 if (nm != NULL) { 897 count += nm->verify_icholder_relocations(); 898 } 899 } 900 } 901 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 902 CompiledICHolder::live_count(), "must agree"); 903 #endif 904 } 905 906 // Defer freeing of concurrently cleaned ExceptionCache entries until 907 // after a global handshake operation. 908 void CodeCache::release_exception_cache(ExceptionCache* entry) { 909 if (SafepointSynchronize::is_at_safepoint()) { 910 delete entry; 911 } else { 912 for (;;) { 913 ExceptionCache* free_list_head = Atomic::load(&_exception_cache_purge_list); 914 entry->set_purge_list_next(free_list_head); 915 if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, free_list_head) == free_list_head) { 916 break; 917 } 918 } 919 } 920 } 921 922 // Delete exception caches that have been concurrently unlinked, 923 // followed by a global handshake operation. 924 void CodeCache::purge_exception_caches() { 925 ExceptionCache* curr = _exception_cache_purge_list; 926 while (curr != NULL) { 927 ExceptionCache* next = curr->purge_list_next(); 928 delete curr; 929 curr = next; 930 } 931 _exception_cache_purge_list = NULL; 932 } 933 934 void CodeCache::gc_prologue() { } 935 936 void CodeCache::gc_epilogue() { 937 prune_scavenge_root_nmethods(); 938 } 939 940 941 void CodeCache::do_unloading_nmethod_caches(bool class_unloading_occurred) { 942 assert_locked_or_safepoint(CodeCache_lock); 943 // Even if classes are not unloaded, there may have been some nmethods that are 944 // unloaded because oops in them are no longer reachable. 945 NOT_DEBUG(if (needs_cache_clean() || class_unloading_occurred)) { 946 CompiledMethodIterator iter; 947 while(iter.next_alive()) { 948 CompiledMethod* cm = iter.method(); 949 assert(!cm->is_unloaded(), "Tautology"); 950 DEBUG_ONLY(if (needs_cache_clean() || class_unloading_occurred)) { 951 // Clean up both unloaded klasses from nmethods and unloaded nmethods 952 // from inline caches. 953 cm->unload_nmethod_caches(/*parallel*/false, class_unloading_occurred); 954 } 955 DEBUG_ONLY(cm->verify()); 956 DEBUG_ONLY(cm->verify_oop_relocations()); 957 } 958 } 959 960 set_needs_cache_clean(false); 961 verify_icholder_relocations(); 962 } 963 964 void CodeCache::verify_oops() { 965 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 966 VerifyOopClosure voc; 967 NMethodIterator iter; 968 while(iter.next_alive()) { 969 nmethod* nm = iter.method(); 970 nm->oops_do(&voc); 971 nm->verify_oop_relocations(); 972 } 973 } 974 975 int CodeCache::blob_count(int code_blob_type) { 976 CodeHeap* heap = get_code_heap(code_blob_type); 977 return (heap != NULL) ? heap->blob_count() : 0; 978 } 979 980 int CodeCache::blob_count() { 981 int count = 0; 982 FOR_ALL_HEAPS(heap) { 983 count += (*heap)->blob_count(); 984 } 985 return count; 986 } 987 988 int CodeCache::nmethod_count(int code_blob_type) { 989 CodeHeap* heap = get_code_heap(code_blob_type); 990 return (heap != NULL) ? heap->nmethod_count() : 0; 991 } 992 993 int CodeCache::nmethod_count() { 994 int count = 0; 995 FOR_ALL_NMETHOD_HEAPS(heap) { 996 count += (*heap)->nmethod_count(); 997 } 998 return count; 999 } 1000 1001 int CodeCache::adapter_count(int code_blob_type) { 1002 CodeHeap* heap = get_code_heap(code_blob_type); 1003 return (heap != NULL) ? heap->adapter_count() : 0; 1004 } 1005 1006 int CodeCache::adapter_count() { 1007 int count = 0; 1008 FOR_ALL_HEAPS(heap) { 1009 count += (*heap)->adapter_count(); 1010 } 1011 return count; 1012 } 1013 1014 address CodeCache::low_bound(int code_blob_type) { 1015 CodeHeap* heap = get_code_heap(code_blob_type); 1016 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 1017 } 1018 1019 address CodeCache::high_bound(int code_blob_type) { 1020 CodeHeap* heap = get_code_heap(code_blob_type); 1021 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 1022 } 1023 1024 size_t CodeCache::capacity() { 1025 size_t cap = 0; 1026 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1027 cap += (*heap)->capacity(); 1028 } 1029 return cap; 1030 } 1031 1032 size_t CodeCache::unallocated_capacity(int code_blob_type) { 1033 CodeHeap* heap = get_code_heap(code_blob_type); 1034 return (heap != NULL) ? heap->unallocated_capacity() : 0; 1035 } 1036 1037 size_t CodeCache::unallocated_capacity() { 1038 size_t unallocated_cap = 0; 1039 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1040 unallocated_cap += (*heap)->unallocated_capacity(); 1041 } 1042 return unallocated_cap; 1043 } 1044 1045 size_t CodeCache::max_capacity() { 1046 size_t max_cap = 0; 1047 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1048 max_cap += (*heap)->max_capacity(); 1049 } 1050 return max_cap; 1051 } 1052 1053 /** 1054 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 1055 * is free, reverse_free_ratio() returns 4. 1056 */ 1057 double CodeCache::reverse_free_ratio(int code_blob_type) { 1058 CodeHeap* heap = get_code_heap(code_blob_type); 1059 if (heap == NULL) { 1060 return 0; 1061 } 1062 1063 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1064 double max_capacity = (double)heap->max_capacity(); 1065 double result = max_capacity / unallocated_capacity; 1066 assert (max_capacity >= unallocated_capacity, "Must be"); 1067 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1068 return result; 1069 } 1070 1071 size_t CodeCache::bytes_allocated_in_freelists() { 1072 size_t allocated_bytes = 0; 1073 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1074 allocated_bytes += (*heap)->allocated_in_freelist(); 1075 } 1076 return allocated_bytes; 1077 } 1078 1079 int CodeCache::allocated_segments() { 1080 int number_of_segments = 0; 1081 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1082 number_of_segments += (*heap)->allocated_segments(); 1083 } 1084 return number_of_segments; 1085 } 1086 1087 size_t CodeCache::freelists_length() { 1088 size_t length = 0; 1089 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1090 length += (*heap)->freelist_length(); 1091 } 1092 return length; 1093 } 1094 1095 void icache_init(); 1096 1097 void CodeCache::initialize() { 1098 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1099 #ifdef COMPILER2 1100 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1101 #endif 1102 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1103 // This was originally just a check of the alignment, causing failure, instead, round 1104 // the code cache to the page size. In particular, Solaris is moving to a larger 1105 // default page size. 1106 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1107 1108 if (SegmentedCodeCache) { 1109 // Use multiple code heaps 1110 initialize_heaps(); 1111 } else { 1112 // Use a single code heap 1113 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1114 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1115 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1116 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1117 add_heap(rs, "CodeCache", CodeBlobType::All); 1118 } 1119 1120 // Initialize ICache flush mechanism 1121 // This service is needed for os::register_code_area 1122 icache_init(); 1123 1124 // Give OS a chance to register generated code area. 1125 // This is used on Windows 64 bit platforms to register 1126 // Structured Exception Handlers for our generated code. 1127 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1128 } 1129 1130 void codeCache_init() { 1131 CodeCache::initialize(); 1132 // Load AOT libraries and add AOT code heaps. 1133 AOTLoader::initialize(); 1134 } 1135 1136 //------------------------------------------------------------------------------------------------ 1137 1138 int CodeCache::number_of_nmethods_with_dependencies() { 1139 return _number_of_nmethods_with_dependencies; 1140 } 1141 1142 void CodeCache::clear_inline_caches() { 1143 assert_locked_or_safepoint(CodeCache_lock); 1144 CompiledMethodIterator iter; 1145 while(iter.next_alive()) { 1146 iter.method()->clear_inline_caches(); 1147 } 1148 } 1149 1150 void CodeCache::cleanup_inline_caches() { 1151 assert_locked_or_safepoint(CodeCache_lock); 1152 NMethodIterator iter; 1153 while(iter.next_alive()) { 1154 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1155 } 1156 } 1157 1158 // Keeps track of time spent for checking dependencies 1159 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1160 1161 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1162 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1163 int number_of_marked_CodeBlobs = 0; 1164 1165 // search the hierarchy looking for nmethods which are affected by the loading of this class 1166 1167 // then search the interfaces this class implements looking for nmethods 1168 // which might be dependent of the fact that an interface only had one 1169 // implementor. 1170 // nmethod::check_all_dependencies works only correctly, if no safepoint 1171 // can happen 1172 NoSafepointVerifier nsv; 1173 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1174 Klass* d = str.klass(); 1175 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1176 } 1177 1178 #ifndef PRODUCT 1179 if (VerifyDependencies) { 1180 // Object pointers are used as unique identifiers for dependency arguments. This 1181 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1182 dependentCheckTime.start(); 1183 nmethod::check_all_dependencies(changes); 1184 dependentCheckTime.stop(); 1185 } 1186 #endif 1187 1188 return number_of_marked_CodeBlobs; 1189 } 1190 1191 CompiledMethod* CodeCache::find_compiled(void* start) { 1192 CodeBlob *cb = find_blob(start); 1193 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1194 return (CompiledMethod*)cb; 1195 } 1196 1197 bool CodeCache::is_far_target(address target) { 1198 #if INCLUDE_AOT 1199 return NativeCall::is_far_call(_low_bound, target) || 1200 NativeCall::is_far_call(_high_bound, target); 1201 #else 1202 return false; 1203 #endif 1204 } 1205 1206 #ifdef HOTSWAP 1207 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1208 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1209 int number_of_marked_CodeBlobs = 0; 1210 1211 // Deoptimize all methods of the evolving class itself 1212 Array<Method*>* old_methods = dependee->methods(); 1213 for (int i = 0; i < old_methods->length(); i++) { 1214 ResourceMark rm; 1215 Method* old_method = old_methods->at(i); 1216 CompiledMethod* nm = old_method->code(); 1217 if (nm != NULL) { 1218 nm->mark_for_deoptimization(); 1219 number_of_marked_CodeBlobs++; 1220 } 1221 } 1222 1223 CompiledMethodIterator iter; 1224 while(iter.next_alive()) { 1225 CompiledMethod* nm = iter.method(); 1226 if (nm->is_marked_for_deoptimization()) { 1227 // ...Already marked in the previous pass; don't count it again. 1228 } else if (nm->is_evol_dependent_on(dependee)) { 1229 ResourceMark rm; 1230 nm->mark_for_deoptimization(); 1231 number_of_marked_CodeBlobs++; 1232 } else { 1233 // flush caches in case they refer to a redefined Method* 1234 nm->clear_inline_caches(); 1235 } 1236 } 1237 1238 return number_of_marked_CodeBlobs; 1239 } 1240 #endif // HOTSWAP 1241 1242 1243 // Deoptimize all methods 1244 void CodeCache::mark_all_nmethods_for_deoptimization() { 1245 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1246 CompiledMethodIterator iter; 1247 while(iter.next_alive()) { 1248 CompiledMethod* nm = iter.method(); 1249 if (!nm->method()->is_method_handle_intrinsic()) { 1250 nm->mark_for_deoptimization(); 1251 } 1252 } 1253 } 1254 1255 int CodeCache::mark_for_deoptimization(Method* dependee) { 1256 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1257 int number_of_marked_CodeBlobs = 0; 1258 1259 CompiledMethodIterator iter; 1260 while(iter.next_alive()) { 1261 CompiledMethod* nm = iter.method(); 1262 if (nm->is_dependent_on_method(dependee)) { 1263 ResourceMark rm; 1264 nm->mark_for_deoptimization(); 1265 number_of_marked_CodeBlobs++; 1266 } 1267 } 1268 1269 return number_of_marked_CodeBlobs; 1270 } 1271 1272 void CodeCache::make_marked_nmethods_not_entrant() { 1273 assert_locked_or_safepoint(CodeCache_lock); 1274 CompiledMethodIterator iter; 1275 while(iter.next_alive()) { 1276 CompiledMethod* nm = iter.method(); 1277 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1278 nm->make_not_entrant(); 1279 } 1280 } 1281 } 1282 1283 // Flushes compiled methods dependent on dependee. 1284 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1285 assert_lock_strong(Compile_lock); 1286 1287 if (number_of_nmethods_with_dependencies() == 0) return; 1288 1289 // CodeCache can only be updated by a thread_in_VM and they will all be 1290 // stopped during the safepoint so CodeCache will be safe to update without 1291 // holding the CodeCache_lock. 1292 1293 KlassDepChange changes(dependee); 1294 1295 // Compute the dependent nmethods 1296 if (mark_for_deoptimization(changes) > 0) { 1297 // At least one nmethod has been marked for deoptimization 1298 VM_Deoptimize op; 1299 VMThread::execute(&op); 1300 } 1301 } 1302 1303 #ifdef HOTSWAP 1304 // Flushes compiled methods dependent on dependee in the evolutionary sense 1305 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1306 // --- Compile_lock is not held. However we are at a safepoint. 1307 assert_locked_or_safepoint(Compile_lock); 1308 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1309 1310 // CodeCache can only be updated by a thread_in_VM and they will all be 1311 // stopped during the safepoint so CodeCache will be safe to update without 1312 // holding the CodeCache_lock. 1313 1314 // Compute the dependent nmethods 1315 if (mark_for_evol_deoptimization(ev_k) > 0) { 1316 // At least one nmethod has been marked for deoptimization 1317 1318 // All this already happens inside a VM_Operation, so we'll do all the work here. 1319 // Stuff copied from VM_Deoptimize and modified slightly. 1320 1321 // We do not want any GCs to happen while we are in the middle of this VM operation 1322 ResourceMark rm; 1323 DeoptimizationMarker dm; 1324 1325 // Deoptimize all activations depending on marked nmethods 1326 Deoptimization::deoptimize_dependents(); 1327 1328 // Make the dependent methods not entrant 1329 make_marked_nmethods_not_entrant(); 1330 } 1331 } 1332 #endif // HOTSWAP 1333 1334 1335 // Flushes compiled methods dependent on dependee 1336 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1337 // --- Compile_lock is not held. However we are at a safepoint. 1338 assert_locked_or_safepoint(Compile_lock); 1339 1340 // CodeCache can only be updated by a thread_in_VM and they will all be 1341 // stopped dring the safepoint so CodeCache will be safe to update without 1342 // holding the CodeCache_lock. 1343 1344 // Compute the dependent nmethods 1345 if (mark_for_deoptimization(m_h()) > 0) { 1346 // At least one nmethod has been marked for deoptimization 1347 1348 // All this already happens inside a VM_Operation, so we'll do all the work here. 1349 // Stuff copied from VM_Deoptimize and modified slightly. 1350 1351 // We do not want any GCs to happen while we are in the middle of this VM operation 1352 ResourceMark rm; 1353 DeoptimizationMarker dm; 1354 1355 // Deoptimize all activations depending on marked nmethods 1356 Deoptimization::deoptimize_dependents(); 1357 1358 // Make the dependent methods not entrant 1359 make_marked_nmethods_not_entrant(); 1360 } 1361 } 1362 1363 void CodeCache::verify() { 1364 assert_locked_or_safepoint(CodeCache_lock); 1365 FOR_ALL_HEAPS(heap) { 1366 (*heap)->verify(); 1367 FOR_ALL_BLOBS(cb, *heap) { 1368 if (cb->is_alive()) { 1369 cb->verify(); 1370 } 1371 } 1372 } 1373 } 1374 1375 // A CodeHeap is full. Print out warning and report event. 1376 PRAGMA_DIAG_PUSH 1377 PRAGMA_FORMAT_NONLITERAL_IGNORED 1378 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1379 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1380 CodeHeap* heap = get_code_heap(code_blob_type); 1381 assert(heap != NULL, "heap is null"); 1382 1383 if ((heap->full_count() == 0) || print) { 1384 // Not yet reported for this heap, report 1385 if (SegmentedCodeCache) { 1386 ResourceMark rm; 1387 stringStream msg1_stream, msg2_stream; 1388 msg1_stream.print("%s is full. Compiler has been disabled.", 1389 get_code_heap_name(code_blob_type)); 1390 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1391 get_code_heap_flag_name(code_blob_type)); 1392 const char *msg1 = msg1_stream.as_string(); 1393 const char *msg2 = msg2_stream.as_string(); 1394 1395 log_warning(codecache)("%s", msg1); 1396 log_warning(codecache)("%s", msg2); 1397 warning("%s", msg1); 1398 warning("%s", msg2); 1399 } else { 1400 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1401 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1402 1403 log_warning(codecache)("%s", msg1); 1404 log_warning(codecache)("%s", msg2); 1405 warning("%s", msg1); 1406 warning("%s", msg2); 1407 } 1408 ResourceMark rm; 1409 stringStream s; 1410 // Dump code cache into a buffer before locking the tty. 1411 { 1412 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1413 print_summary(&s); 1414 } 1415 { 1416 ttyLocker ttyl; 1417 tty->print("%s", s.as_string()); 1418 } 1419 1420 if (heap->full_count() == 0) { 1421 LogTarget(Debug, codecache) lt; 1422 if (lt.is_enabled()) { 1423 CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot! 1424 } 1425 } 1426 } 1427 1428 heap->report_full(); 1429 1430 EventCodeCacheFull event; 1431 if (event.should_commit()) { 1432 event.set_codeBlobType((u1)code_blob_type); 1433 event.set_startAddress((u8)heap->low_boundary()); 1434 event.set_commitedTopAddress((u8)heap->high()); 1435 event.set_reservedTopAddress((u8)heap->high_boundary()); 1436 event.set_entryCount(heap->blob_count()); 1437 event.set_methodCount(heap->nmethod_count()); 1438 event.set_adaptorCount(heap->adapter_count()); 1439 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1440 event.set_fullCount(heap->full_count()); 1441 event.commit(); 1442 } 1443 } 1444 PRAGMA_DIAG_POP 1445 1446 void CodeCache::print_memory_overhead() { 1447 size_t wasted_bytes = 0; 1448 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1449 CodeHeap* curr_heap = *heap; 1450 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1451 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1452 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1453 } 1454 } 1455 // Print bytes that are allocated in the freelist 1456 ttyLocker ttl; 1457 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1458 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1459 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1460 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1461 } 1462 1463 //------------------------------------------------------------------------------------------------ 1464 // Non-product version 1465 1466 #ifndef PRODUCT 1467 1468 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1469 if (PrintCodeCache2) { // Need to add a new flag 1470 ResourceMark rm; 1471 if (size == 0) size = cb->size(); 1472 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1473 } 1474 } 1475 1476 void CodeCache::print_internals() { 1477 int nmethodCount = 0; 1478 int runtimeStubCount = 0; 1479 int adapterCount = 0; 1480 int deoptimizationStubCount = 0; 1481 int uncommonTrapStubCount = 0; 1482 int bufferBlobCount = 0; 1483 int total = 0; 1484 int nmethodAlive = 0; 1485 int nmethodNotEntrant = 0; 1486 int nmethodZombie = 0; 1487 int nmethodUnloaded = 0; 1488 int nmethodJava = 0; 1489 int nmethodNative = 0; 1490 int max_nm_size = 0; 1491 ResourceMark rm; 1492 1493 int i = 0; 1494 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1495 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1496 tty->print_cr("-- %s --", (*heap)->name()); 1497 } 1498 FOR_ALL_BLOBS(cb, *heap) { 1499 total++; 1500 if (cb->is_nmethod()) { 1501 nmethod* nm = (nmethod*)cb; 1502 1503 if (Verbose && nm->method() != NULL) { 1504 ResourceMark rm; 1505 char *method_name = nm->method()->name_and_sig_as_C_string(); 1506 tty->print("%s", method_name); 1507 if(nm->is_alive()) { tty->print_cr(" alive"); } 1508 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1509 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1510 } 1511 1512 nmethodCount++; 1513 1514 if(nm->is_alive()) { nmethodAlive++; } 1515 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1516 if(nm->is_zombie()) { nmethodZombie++; } 1517 if(nm->is_unloaded()) { nmethodUnloaded++; } 1518 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1519 1520 if(nm->method() != NULL && nm->is_java_method()) { 1521 nmethodJava++; 1522 max_nm_size = MAX2(max_nm_size, nm->size()); 1523 } 1524 } else if (cb->is_runtime_stub()) { 1525 runtimeStubCount++; 1526 } else if (cb->is_deoptimization_stub()) { 1527 deoptimizationStubCount++; 1528 } else if (cb->is_uncommon_trap_stub()) { 1529 uncommonTrapStubCount++; 1530 } else if (cb->is_adapter_blob()) { 1531 adapterCount++; 1532 } else if (cb->is_buffer_blob()) { 1533 bufferBlobCount++; 1534 } 1535 } 1536 } 1537 1538 int bucketSize = 512; 1539 int bucketLimit = max_nm_size / bucketSize + 1; 1540 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1541 memset(buckets, 0, sizeof(int) * bucketLimit); 1542 1543 NMethodIterator iter; 1544 while(iter.next()) { 1545 nmethod* nm = iter.method(); 1546 if(nm->method() != NULL && nm->is_java_method()) { 1547 buckets[nm->size() / bucketSize]++; 1548 } 1549 } 1550 1551 tty->print_cr("Code Cache Entries (total of %d)",total); 1552 tty->print_cr("-------------------------------------------------"); 1553 tty->print_cr("nmethods: %d",nmethodCount); 1554 tty->print_cr("\talive: %d",nmethodAlive); 1555 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1556 tty->print_cr("\tzombie: %d",nmethodZombie); 1557 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1558 tty->print_cr("\tjava: %d",nmethodJava); 1559 tty->print_cr("\tnative: %d",nmethodNative); 1560 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1561 tty->print_cr("adapters: %d",adapterCount); 1562 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1563 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1564 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1565 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1566 tty->print_cr("-------------------------------------------------"); 1567 1568 for(int i=0; i<bucketLimit; i++) { 1569 if(buckets[i] != 0) { 1570 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1571 tty->fill_to(40); 1572 tty->print_cr("%d",buckets[i]); 1573 } 1574 } 1575 1576 FREE_C_HEAP_ARRAY(int, buckets); 1577 print_memory_overhead(); 1578 } 1579 1580 #endif // !PRODUCT 1581 1582 void CodeCache::print() { 1583 print_summary(tty); 1584 1585 #ifndef PRODUCT 1586 if (!Verbose) return; 1587 1588 CodeBlob_sizes live; 1589 CodeBlob_sizes dead; 1590 1591 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1592 FOR_ALL_BLOBS(cb, *heap) { 1593 if (!cb->is_alive()) { 1594 dead.add(cb); 1595 } else { 1596 live.add(cb); 1597 } 1598 } 1599 } 1600 1601 tty->print_cr("CodeCache:"); 1602 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1603 1604 if (!live.is_empty()) { 1605 live.print("live"); 1606 } 1607 if (!dead.is_empty()) { 1608 dead.print("dead"); 1609 } 1610 1611 if (WizardMode) { 1612 // print the oop_map usage 1613 int code_size = 0; 1614 int number_of_blobs = 0; 1615 int number_of_oop_maps = 0; 1616 int map_size = 0; 1617 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1618 FOR_ALL_BLOBS(cb, *heap) { 1619 if (cb->is_alive()) { 1620 number_of_blobs++; 1621 code_size += cb->code_size(); 1622 ImmutableOopMapSet* set = cb->oop_maps(); 1623 if (set != NULL) { 1624 number_of_oop_maps += set->count(); 1625 map_size += set->nr_of_bytes(); 1626 } 1627 } 1628 } 1629 } 1630 tty->print_cr("OopMaps"); 1631 tty->print_cr(" #blobs = %d", number_of_blobs); 1632 tty->print_cr(" code size = %d", code_size); 1633 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1634 tty->print_cr(" map size = %d", map_size); 1635 } 1636 1637 #endif // !PRODUCT 1638 } 1639 1640 void CodeCache::print_summary(outputStream* st, bool detailed) { 1641 int full_count = 0; 1642 FOR_ALL_HEAPS(heap_iterator) { 1643 CodeHeap* heap = (*heap_iterator); 1644 size_t total = (heap->high_boundary() - heap->low_boundary()); 1645 if (_heaps->length() >= 1) { 1646 st->print("%s:", heap->name()); 1647 } else { 1648 st->print("CodeCache:"); 1649 } 1650 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1651 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1652 total/K, (total - heap->unallocated_capacity())/K, 1653 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1654 1655 if (detailed) { 1656 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1657 p2i(heap->low_boundary()), 1658 p2i(heap->high()), 1659 p2i(heap->high_boundary())); 1660 1661 full_count += get_codemem_full_count(heap->code_blob_type()); 1662 } 1663 } 1664 1665 if (detailed) { 1666 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1667 " adapters=" UINT32_FORMAT, 1668 blob_count(), nmethod_count(), adapter_count()); 1669 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1670 "enabled" : Arguments::mode() == Arguments::_int ? 1671 "disabled (interpreter mode)" : 1672 "disabled (not enough contiguous free space left)"); 1673 st->print_cr(" stopped_count=%d, restarted_count=%d", 1674 CompileBroker::get_total_compiler_stopped_count(), 1675 CompileBroker::get_total_compiler_restarted_count()); 1676 st->print_cr(" full_count=%d", full_count); 1677 } 1678 } 1679 1680 void CodeCache::print_codelist(outputStream* st) { 1681 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1682 1683 CompiledMethodIterator iter; 1684 while (iter.next_alive()) { 1685 CompiledMethod* cm = iter.method(); 1686 ResourceMark rm; 1687 char* method_name = cm->method()->name_and_sig_as_C_string(); 1688 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1689 cm->compile_id(), cm->comp_level(), cm->get_state(), 1690 method_name, 1691 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1692 } 1693 } 1694 1695 void CodeCache::print_layout(outputStream* st) { 1696 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1697 ResourceMark rm; 1698 print_summary(st, true); 1699 } 1700 1701 void CodeCache::log_state(outputStream* st) { 1702 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1703 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1704 blob_count(), nmethod_count(), adapter_count(), 1705 unallocated_capacity()); 1706 } 1707 1708 //---< BEGIN >--- CodeHeap State Analytics. 1709 1710 void CodeCache::aggregate(outputStream *out, const char* granularity) { 1711 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1712 CodeHeapState::aggregate(out, (*heap), granularity); 1713 } 1714 } 1715 1716 void CodeCache::discard(outputStream *out) { 1717 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1718 CodeHeapState::discard(out, (*heap)); 1719 } 1720 } 1721 1722 void CodeCache::print_usedSpace(outputStream *out) { 1723 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1724 CodeHeapState::print_usedSpace(out, (*heap)); 1725 } 1726 } 1727 1728 void CodeCache::print_freeSpace(outputStream *out) { 1729 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1730 CodeHeapState::print_freeSpace(out, (*heap)); 1731 } 1732 } 1733 1734 void CodeCache::print_count(outputStream *out) { 1735 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1736 CodeHeapState::print_count(out, (*heap)); 1737 } 1738 } 1739 1740 void CodeCache::print_space(outputStream *out) { 1741 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1742 CodeHeapState::print_space(out, (*heap)); 1743 } 1744 } 1745 1746 void CodeCache::print_age(outputStream *out) { 1747 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1748 CodeHeapState::print_age(out, (*heap)); 1749 } 1750 } 1751 1752 void CodeCache::print_names(outputStream *out) { 1753 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1754 CodeHeapState::print_names(out, (*heap)); 1755 } 1756 } 1757 //---< END >--- CodeHeap State Analytics.