1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/codeHeapState.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/dependencies.hpp" 32 #include "code/dependencyContext.hpp" 33 #include "code/icBuffer.hpp" 34 #include "code/nmethod.hpp" 35 #include "code/pcDesc.hpp" 36 #include "compiler/compilationPolicy.hpp" 37 #include "compiler/compileBroker.hpp" 38 #include "jfr/jfrEvents.hpp" 39 #include "logging/log.hpp" 40 #include "logging/logStream.hpp" 41 #include "memory/allocation.inline.hpp" 42 #include "memory/iterator.hpp" 43 #include "memory/resourceArea.hpp" 44 #include "memory/universe.hpp" 45 #include "oops/method.inline.hpp" 46 #include "oops/objArrayOop.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "oops/verifyOopClosure.hpp" 49 #include "runtime/arguments.hpp" 50 #include "runtime/atomic.hpp" 51 #include "runtime/deoptimization.hpp" 52 #include "runtime/handles.inline.hpp" 53 #include "runtime/icache.hpp" 54 #include "runtime/java.hpp" 55 #include "runtime/mutexLocker.hpp" 56 #include "runtime/safepointVerifiers.hpp" 57 #include "runtime/sweeper.hpp" 58 #include "runtime/vmThread.hpp" 59 #include "services/memoryService.hpp" 60 #include "utilities/align.hpp" 61 #include "utilities/vmError.hpp" 62 #include "utilities/xmlstream.hpp" 63 #ifdef COMPILER1 64 #include "c1/c1_Compilation.hpp" 65 #include "c1/c1_Compiler.hpp" 66 #endif 67 #ifdef COMPILER2 68 #include "opto/c2compiler.hpp" 69 #include "opto/compile.hpp" 70 #include "opto/node.hpp" 71 #endif 72 73 // Helper class for printing in CodeCache 74 class CodeBlob_sizes { 75 private: 76 int count; 77 int total_size; 78 int header_size; 79 int code_size; 80 int stub_size; 81 int relocation_size; 82 int scopes_oop_size; 83 int scopes_metadata_size; 84 int scopes_data_size; 85 int scopes_pcs_size; 86 87 public: 88 CodeBlob_sizes() { 89 count = 0; 90 total_size = 0; 91 header_size = 0; 92 code_size = 0; 93 stub_size = 0; 94 relocation_size = 0; 95 scopes_oop_size = 0; 96 scopes_metadata_size = 0; 97 scopes_data_size = 0; 98 scopes_pcs_size = 0; 99 } 100 101 int total() { return total_size; } 102 bool is_empty() { return count == 0; } 103 104 void print(const char* title) { 105 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 106 count, 107 title, 108 (int)(total() / K), 109 header_size * 100 / total_size, 110 relocation_size * 100 / total_size, 111 code_size * 100 / total_size, 112 stub_size * 100 / total_size, 113 scopes_oop_size * 100 / total_size, 114 scopes_metadata_size * 100 / total_size, 115 scopes_data_size * 100 / total_size, 116 scopes_pcs_size * 100 / total_size); 117 } 118 119 void add(CodeBlob* cb) { 120 count++; 121 total_size += cb->size(); 122 header_size += cb->header_size(); 123 relocation_size += cb->relocation_size(); 124 if (cb->is_nmethod()) { 125 nmethod* nm = cb->as_nmethod_or_null(); 126 code_size += nm->insts_size(); 127 stub_size += nm->stub_size(); 128 129 scopes_oop_size += nm->oops_size(); 130 scopes_metadata_size += nm->metadata_size(); 131 scopes_data_size += nm->scopes_data_size(); 132 scopes_pcs_size += nm->scopes_pcs_size(); 133 } else { 134 code_size += cb->code_size(); 135 } 136 } 137 }; 138 139 // Iterate over all CodeHeaps 140 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 141 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 142 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 143 144 // Iterate over all CodeBlobs (cb) on the given CodeHeap 145 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 146 147 address CodeCache::_low_bound = 0; 148 address CodeCache::_high_bound = 0; 149 int CodeCache::_number_of_nmethods_with_dependencies = 0; 150 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL; 151 152 // Initialize arrays of CodeHeap subsets 153 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 154 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 155 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 156 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 157 158 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 159 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 160 // Prepare error message 161 const char* error = "Invalid code heap sizes"; 162 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 163 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 164 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 165 166 if (total_size > cache_size) { 167 // Some code heap sizes were explicitly set: total_size must be <= cache_size 168 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 169 vm_exit_during_initialization(error, message); 170 } else if (all_set && total_size != cache_size) { 171 // All code heap sizes were explicitly set: total_size must equal cache_size 172 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 173 vm_exit_during_initialization(error, message); 174 } 175 } 176 177 void CodeCache::initialize_heaps() { 178 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 179 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 180 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 181 size_t min_size = os::vm_page_size(); 182 size_t cache_size = ReservedCodeCacheSize; 183 size_t non_nmethod_size = NonNMethodCodeHeapSize; 184 size_t profiled_size = ProfiledCodeHeapSize; 185 size_t non_profiled_size = NonProfiledCodeHeapSize; 186 // Check if total size set via command line flags exceeds the reserved size 187 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 188 (profiled_set ? profiled_size : min_size), 189 (non_profiled_set ? non_profiled_size : min_size), 190 cache_size, 191 non_nmethod_set && profiled_set && non_profiled_set); 192 193 // Determine size of compiler buffers 194 size_t code_buffers_size = 0; 195 #ifdef COMPILER1 196 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 197 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 198 code_buffers_size += c1_count * Compiler::code_buffer_size(); 199 #endif 200 #ifdef COMPILER2 201 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 202 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 203 // Initial size of constant table (this may be increased if a compiled method needs more space) 204 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 205 #endif 206 207 // Increase default non_nmethod_size to account for compiler buffers 208 if (!non_nmethod_set) { 209 non_nmethod_size += code_buffers_size; 210 } 211 // Calculate default CodeHeap sizes if not set by user 212 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 213 // Check if we have enough space for the non-nmethod code heap 214 if (cache_size > non_nmethod_size) { 215 // Use the default value for non_nmethod_size and one half of the 216 // remaining size for non-profiled and one half for profiled methods 217 size_t remaining_size = cache_size - non_nmethod_size; 218 profiled_size = remaining_size / 2; 219 non_profiled_size = remaining_size - profiled_size; 220 } else { 221 // Use all space for the non-nmethod heap and set other heaps to minimal size 222 non_nmethod_size = cache_size - 2 * min_size; 223 profiled_size = min_size; 224 non_profiled_size = min_size; 225 } 226 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 227 // The user explicitly set some code heap sizes. Increase or decrease the (default) 228 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 229 // code heap sizes and then only change non-nmethod code heap size if still necessary. 230 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 231 if (non_profiled_set) { 232 if (!profiled_set) { 233 // Adapt size of profiled code heap 234 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 235 // Not enough space available, set to minimum size 236 diff_size += profiled_size - min_size; 237 profiled_size = min_size; 238 } else { 239 profiled_size += diff_size; 240 diff_size = 0; 241 } 242 } 243 } else if (profiled_set) { 244 // Adapt size of non-profiled code heap 245 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 246 // Not enough space available, set to minimum size 247 diff_size += non_profiled_size - min_size; 248 non_profiled_size = min_size; 249 } else { 250 non_profiled_size += diff_size; 251 diff_size = 0; 252 } 253 } else if (non_nmethod_set) { 254 // Distribute remaining size between profiled and non-profiled code heaps 255 diff_size = cache_size - non_nmethod_size; 256 profiled_size = diff_size / 2; 257 non_profiled_size = diff_size - profiled_size; 258 diff_size = 0; 259 } 260 if (diff_size != 0) { 261 // Use non-nmethod code heap for remaining space requirements 262 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 263 non_nmethod_size += diff_size; 264 } 265 } 266 267 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 268 if (!heap_available(CodeBlobType::MethodProfiled)) { 269 non_profiled_size += profiled_size; 270 profiled_size = 0; 271 } 272 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 273 if (!heap_available(CodeBlobType::MethodNonProfiled)) { 274 non_nmethod_size += non_profiled_size; 275 non_profiled_size = 0; 276 } 277 // Make sure we have enough space for VM internal code 278 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 279 if (non_nmethod_size < min_code_cache_size) { 280 vm_exit_during_initialization(err_msg( 281 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 282 non_nmethod_size/K, min_code_cache_size/K)); 283 } 284 285 // Verify sizes and update flag values 286 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 287 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size); 288 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size); 289 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size); 290 291 // If large page support is enabled, align code heaps according to large 292 // page size to make sure that code cache is covered by large pages. 293 const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity()); 294 non_nmethod_size = align_up(non_nmethod_size, alignment); 295 profiled_size = align_down(profiled_size, alignment); 296 297 // Reserve one continuous chunk of memory for CodeHeaps and split it into 298 // parts for the individual heaps. The memory layout looks like this: 299 // ---------- high ----------- 300 // Non-profiled nmethods 301 // Profiled nmethods 302 // Non-nmethods 303 // ---------- low ------------ 304 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 305 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 306 ReservedSpace rest = rs.last_part(non_nmethod_size); 307 ReservedSpace profiled_space = rest.first_part(profiled_size); 308 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 309 310 // Non-nmethods (stubs, adapters, ...) 311 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 312 // Tier 2 and tier 3 (profiled) methods 313 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 314 // Tier 1 and tier 4 (non-profiled) methods and native methods 315 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 316 } 317 318 size_t CodeCache::page_size(bool aligned, size_t min_pages) { 319 if (os::can_execute_large_page_memory()) { 320 if (InitialCodeCacheSize < ReservedCodeCacheSize) { 321 // Make sure that the page size allows for an incremental commit of the reserved space 322 min_pages = MAX2(min_pages, (size_t)8); 323 } 324 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) : 325 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages); 326 } else { 327 return os::vm_page_size(); 328 } 329 } 330 331 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 332 // Align and reserve space for code cache 333 const size_t rs_ps = page_size(); 334 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); 335 const size_t rs_size = align_up(size, rs_align); 336 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); 337 if (!rs.is_reserved()) { 338 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 339 rs_size/K)); 340 } 341 342 // Initialize bounds 343 _low_bound = (address)rs.base(); 344 _high_bound = _low_bound + rs.size(); 345 return rs; 346 } 347 348 // Heaps available for allocation 349 bool CodeCache::heap_available(int code_blob_type) { 350 if (!SegmentedCodeCache) { 351 // No segmentation: use a single code heap 352 return (code_blob_type == CodeBlobType::All); 353 } else if (Arguments::is_interpreter_only()) { 354 // Interpreter only: we don't need any method code heaps 355 return (code_blob_type == CodeBlobType::NonNMethod); 356 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 357 // Tiered compilation: use all code heaps 358 return (code_blob_type < CodeBlobType::All); 359 } else { 360 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 361 return (code_blob_type == CodeBlobType::NonNMethod) || 362 (code_blob_type == CodeBlobType::MethodNonProfiled); 363 } 364 } 365 366 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 367 switch(code_blob_type) { 368 case CodeBlobType::NonNMethod: 369 return "NonNMethodCodeHeapSize"; 370 break; 371 case CodeBlobType::MethodNonProfiled: 372 return "NonProfiledCodeHeapSize"; 373 break; 374 case CodeBlobType::MethodProfiled: 375 return "ProfiledCodeHeapSize"; 376 break; 377 } 378 ShouldNotReachHere(); 379 return NULL; 380 } 381 382 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 383 if (lhs->code_blob_type() == rhs->code_blob_type()) { 384 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 385 } else { 386 return lhs->code_blob_type() - rhs->code_blob_type(); 387 } 388 } 389 390 void CodeCache::add_heap(CodeHeap* heap) { 391 assert(!Universe::is_fully_initialized(), "late heap addition?"); 392 393 _heaps->insert_sorted<code_heap_compare>(heap); 394 395 int type = heap->code_blob_type(); 396 if (code_blob_type_accepts_compiled(type)) { 397 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 398 } 399 if (code_blob_type_accepts_nmethod(type)) { 400 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 401 } 402 if (code_blob_type_accepts_allocable(type)) { 403 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 404 } 405 } 406 407 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 408 // Check if heap is needed 409 if (!heap_available(code_blob_type)) { 410 return; 411 } 412 413 // Create CodeHeap 414 CodeHeap* heap = new CodeHeap(name, code_blob_type); 415 add_heap(heap); 416 417 // Reserve Space 418 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 419 size_initial = align_up(size_initial, os::vm_page_size()); 420 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 421 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 422 heap->name(), size_initial/K)); 423 } 424 425 // Register the CodeHeap 426 MemoryService::add_code_heap_memory_pool(heap, name); 427 } 428 429 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 430 FOR_ALL_HEAPS(heap) { 431 if ((*heap)->contains(start)) { 432 return *heap; 433 } 434 } 435 return NULL; 436 } 437 438 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 439 assert(cb != NULL, "CodeBlob is null"); 440 FOR_ALL_HEAPS(heap) { 441 if ((*heap)->contains_blob(cb)) { 442 return *heap; 443 } 444 } 445 ShouldNotReachHere(); 446 return NULL; 447 } 448 449 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 450 FOR_ALL_HEAPS(heap) { 451 if ((*heap)->accepts(code_blob_type)) { 452 return *heap; 453 } 454 } 455 return NULL; 456 } 457 458 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 459 assert_locked_or_safepoint(CodeCache_lock); 460 assert(heap != NULL, "heap is null"); 461 return (CodeBlob*)heap->first(); 462 } 463 464 CodeBlob* CodeCache::first_blob(int code_blob_type) { 465 if (heap_available(code_blob_type)) { 466 return first_blob(get_code_heap(code_blob_type)); 467 } else { 468 return NULL; 469 } 470 } 471 472 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 473 assert_locked_or_safepoint(CodeCache_lock); 474 assert(heap != NULL, "heap is null"); 475 return (CodeBlob*)heap->next(cb); 476 } 477 478 /** 479 * Do not seize the CodeCache lock here--if the caller has not 480 * already done so, we are going to lose bigtime, since the code 481 * cache will contain a garbage CodeBlob until the caller can 482 * run the constructor for the CodeBlob subclass he is busy 483 * instantiating. 484 */ 485 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 486 // Possibly wakes up the sweeper thread. 487 NMethodSweeper::notify(code_blob_type); 488 assert_locked_or_safepoint(CodeCache_lock); 489 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 490 if (size <= 0) { 491 return NULL; 492 } 493 CodeBlob* cb = NULL; 494 495 // Get CodeHeap for the given CodeBlobType 496 CodeHeap* heap = get_code_heap(code_blob_type); 497 assert(heap != NULL, "heap is null"); 498 499 while (true) { 500 cb = (CodeBlob*)heap->allocate(size); 501 if (cb != NULL) break; 502 if (!heap->expand_by(CodeCacheExpansionSize)) { 503 // Save original type for error reporting 504 if (orig_code_blob_type == CodeBlobType::All) { 505 orig_code_blob_type = code_blob_type; 506 } 507 // Expansion failed 508 if (SegmentedCodeCache) { 509 // Fallback solution: Try to store code in another code heap. 510 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 511 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 512 // and force stack scanning if less than 10% of the code heap are free. 513 int type = code_blob_type; 514 switch (type) { 515 case CodeBlobType::NonNMethod: 516 type = CodeBlobType::MethodNonProfiled; 517 break; 518 case CodeBlobType::MethodNonProfiled: 519 type = CodeBlobType::MethodProfiled; 520 break; 521 case CodeBlobType::MethodProfiled: 522 // Avoid loop if we already tried that code heap 523 if (type == orig_code_blob_type) { 524 type = CodeBlobType::MethodNonProfiled; 525 } 526 break; 527 } 528 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 529 if (PrintCodeCacheExtension) { 530 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 531 heap->name(), get_code_heap(type)->name()); 532 } 533 return allocate(size, type, orig_code_blob_type); 534 } 535 } 536 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 537 CompileBroker::handle_full_code_cache(orig_code_blob_type); 538 return NULL; 539 } 540 if (PrintCodeCacheExtension) { 541 ResourceMark rm; 542 if (_nmethod_heaps->length() >= 1) { 543 tty->print("%s", heap->name()); 544 } else { 545 tty->print("CodeCache"); 546 } 547 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 548 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 549 (address)heap->high() - (address)heap->low_boundary()); 550 } 551 } 552 print_trace("allocation", cb, size); 553 return cb; 554 } 555 556 void CodeCache::free(CodeBlob* cb) { 557 assert_locked_or_safepoint(CodeCache_lock); 558 CodeHeap* heap = get_code_heap(cb); 559 print_trace("free", cb); 560 if (cb->is_nmethod()) { 561 heap->set_nmethod_count(heap->nmethod_count() - 1); 562 if (((nmethod *)cb)->has_dependencies()) { 563 _number_of_nmethods_with_dependencies--; 564 } 565 } 566 if (cb->is_adapter_blob()) { 567 heap->set_adapter_count(heap->adapter_count() - 1); 568 } 569 570 // Get heap for given CodeBlob and deallocate 571 get_code_heap(cb)->deallocate(cb); 572 573 assert(heap->blob_count() >= 0, "sanity check"); 574 } 575 576 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 577 assert_locked_or_safepoint(CodeCache_lock); 578 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 579 print_trace("free_unused_tail", cb); 580 581 // We also have to account for the extra space (i.e. header) used by the CodeBlob 582 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 583 used += CodeBlob::align_code_offset(cb->header_size()); 584 585 // Get heap for given CodeBlob and deallocate its unused tail 586 get_code_heap(cb)->deallocate_tail(cb, used); 587 // Adjust the sizes of the CodeBlob 588 cb->adjust_size(used); 589 } 590 591 void CodeCache::commit(CodeBlob* cb) { 592 // this is called by nmethod::nmethod, which must already own CodeCache_lock 593 assert_locked_or_safepoint(CodeCache_lock); 594 CodeHeap* heap = get_code_heap(cb); 595 if (cb->is_nmethod()) { 596 heap->set_nmethod_count(heap->nmethod_count() + 1); 597 if (((nmethod *)cb)->has_dependencies()) { 598 _number_of_nmethods_with_dependencies++; 599 } 600 } 601 if (cb->is_adapter_blob()) { 602 heap->set_adapter_count(heap->adapter_count() + 1); 603 } 604 605 // flush the hardware I-cache 606 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 607 } 608 609 bool CodeCache::contains(void *p) { 610 // S390 uses contains() in current_frame(), which is used before 611 // code cache initialization if NativeMemoryTracking=detail is set. 612 S390_ONLY(if (_heaps == NULL) return false;) 613 // It should be ok to call contains without holding a lock. 614 FOR_ALL_HEAPS(heap) { 615 if ((*heap)->contains(p)) { 616 return true; 617 } 618 } 619 return false; 620 } 621 622 bool CodeCache::contains(nmethod *nm) { 623 return contains((void *)nm); 624 } 625 626 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 627 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 628 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 629 CodeBlob* CodeCache::find_blob(void* start) { 630 CodeBlob* result = find_blob_unsafe(start); 631 // We could potentially look up non_entrant methods 632 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 633 return result; 634 } 635 636 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 637 // what you are doing) 638 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 639 // NMT can walk the stack before code cache is created 640 if (_heaps != NULL) { 641 CodeHeap* heap = get_code_heap_containing(start); 642 if (heap != NULL) { 643 return heap->find_blob_unsafe(start); 644 } 645 } 646 return NULL; 647 } 648 649 nmethod* CodeCache::find_nmethod(void* start) { 650 CodeBlob* cb = find_blob(start); 651 assert(cb->is_nmethod(), "did not find an nmethod"); 652 return (nmethod*)cb; 653 } 654 655 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 656 assert_locked_or_safepoint(CodeCache_lock); 657 FOR_ALL_HEAPS(heap) { 658 FOR_ALL_BLOBS(cb, *heap) { 659 f(cb); 660 } 661 } 662 } 663 664 void CodeCache::nmethods_do(void f(nmethod* nm)) { 665 assert_locked_or_safepoint(CodeCache_lock); 666 NMethodIterator iter(NMethodIterator::all_blobs); 667 while(iter.next()) { 668 f(iter.method()); 669 } 670 } 671 672 void CodeCache::metadata_do(MetadataClosure* f) { 673 assert_locked_or_safepoint(CodeCache_lock); 674 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 675 while(iter.next()) { 676 iter.method()->metadata_do(f); 677 } 678 AOTLoader::metadata_do(f); 679 } 680 681 int CodeCache::alignment_unit() { 682 return (int)_heaps->first()->alignment_unit(); 683 } 684 685 int CodeCache::alignment_offset() { 686 return (int)_heaps->first()->alignment_offset(); 687 } 688 689 // Mark nmethods for unloading if they contain otherwise unreachable oops. 690 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 691 assert_locked_or_safepoint(CodeCache_lock); 692 UnloadingScope scope(is_alive); 693 CompiledMethodIterator iter(CompiledMethodIterator::only_alive); 694 while(iter.next()) { 695 iter.method()->do_unloading(unloading_occurred); 696 } 697 } 698 699 void CodeCache::blobs_do(CodeBlobClosure* f) { 700 assert_locked_or_safepoint(CodeCache_lock); 701 FOR_ALL_ALLOCABLE_HEAPS(heap) { 702 FOR_ALL_BLOBS(cb, *heap) { 703 if (cb->is_alive()) { 704 f->do_code_blob(cb); 705 #ifdef ASSERT 706 if (cb->is_nmethod()) { 707 Universe::heap()->verify_nmethod((nmethod*)cb); 708 } 709 #endif //ASSERT 710 } 711 } 712 } 713 } 714 715 void CodeCache::verify_clean_inline_caches() { 716 #ifdef ASSERT 717 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 718 while(iter.next()) { 719 nmethod* nm = iter.method(); 720 assert(!nm->is_unloaded(), "Tautology"); 721 nm->verify_clean_inline_caches(); 722 nm->verify(); 723 } 724 #endif 725 } 726 727 void CodeCache::verify_icholder_relocations() { 728 #ifdef ASSERT 729 // make sure that we aren't leaking icholders 730 int count = 0; 731 FOR_ALL_HEAPS(heap) { 732 FOR_ALL_BLOBS(cb, *heap) { 733 CompiledMethod *nm = cb->as_compiled_method_or_null(); 734 if (nm != NULL) { 735 count += nm->verify_icholder_relocations(); 736 } 737 } 738 } 739 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 740 CompiledICHolder::live_count(), "must agree"); 741 #endif 742 } 743 744 // Defer freeing of concurrently cleaned ExceptionCache entries until 745 // after a global handshake operation. 746 void CodeCache::release_exception_cache(ExceptionCache* entry) { 747 if (SafepointSynchronize::is_at_safepoint()) { 748 delete entry; 749 } else { 750 for (;;) { 751 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); 752 entry->set_purge_list_next(purge_list_head); 753 if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) { 754 break; 755 } 756 } 757 } 758 } 759 760 // Delete exception caches that have been concurrently unlinked, 761 // followed by a global handshake operation. 762 void CodeCache::purge_exception_caches() { 763 ExceptionCache* curr = _exception_cache_purge_list; 764 while (curr != NULL) { 765 ExceptionCache* next = curr->purge_list_next(); 766 delete curr; 767 curr = next; 768 } 769 _exception_cache_purge_list = NULL; 770 } 771 772 uint8_t CodeCache::_unloading_cycle = 1; 773 774 void CodeCache::increment_unloading_cycle() { 775 // 2-bit value (see IsUnloadingState in nmethod.cpp for details) 776 // 0 is reserved for new methods. 777 _unloading_cycle = (_unloading_cycle + 1) % 4; 778 if (_unloading_cycle == 0) { 779 _unloading_cycle = 1; 780 } 781 } 782 783 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive) 784 : _is_unloading_behaviour(is_alive) 785 { 786 _saved_behaviour = IsUnloadingBehaviour::current(); 787 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour); 788 increment_unloading_cycle(); 789 DependencyContext::cleaning_start(); 790 } 791 792 CodeCache::UnloadingScope::~UnloadingScope() { 793 IsUnloadingBehaviour::set_current(_saved_behaviour); 794 DependencyContext::cleaning_end(); 795 } 796 797 void CodeCache::verify_oops() { 798 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 799 VerifyOopClosure voc; 800 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 801 while(iter.next()) { 802 nmethod* nm = iter.method(); 803 nm->oops_do(&voc); 804 nm->verify_oop_relocations(); 805 } 806 } 807 808 int CodeCache::blob_count(int code_blob_type) { 809 CodeHeap* heap = get_code_heap(code_blob_type); 810 return (heap != NULL) ? heap->blob_count() : 0; 811 } 812 813 int CodeCache::blob_count() { 814 int count = 0; 815 FOR_ALL_HEAPS(heap) { 816 count += (*heap)->blob_count(); 817 } 818 return count; 819 } 820 821 int CodeCache::nmethod_count(int code_blob_type) { 822 CodeHeap* heap = get_code_heap(code_blob_type); 823 return (heap != NULL) ? heap->nmethod_count() : 0; 824 } 825 826 int CodeCache::nmethod_count() { 827 int count = 0; 828 FOR_ALL_NMETHOD_HEAPS(heap) { 829 count += (*heap)->nmethod_count(); 830 } 831 return count; 832 } 833 834 int CodeCache::adapter_count(int code_blob_type) { 835 CodeHeap* heap = get_code_heap(code_blob_type); 836 return (heap != NULL) ? heap->adapter_count() : 0; 837 } 838 839 int CodeCache::adapter_count() { 840 int count = 0; 841 FOR_ALL_HEAPS(heap) { 842 count += (*heap)->adapter_count(); 843 } 844 return count; 845 } 846 847 address CodeCache::low_bound(int code_blob_type) { 848 CodeHeap* heap = get_code_heap(code_blob_type); 849 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 850 } 851 852 address CodeCache::high_bound(int code_blob_type) { 853 CodeHeap* heap = get_code_heap(code_blob_type); 854 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 855 } 856 857 size_t CodeCache::capacity() { 858 size_t cap = 0; 859 FOR_ALL_ALLOCABLE_HEAPS(heap) { 860 cap += (*heap)->capacity(); 861 } 862 return cap; 863 } 864 865 size_t CodeCache::unallocated_capacity(int code_blob_type) { 866 CodeHeap* heap = get_code_heap(code_blob_type); 867 return (heap != NULL) ? heap->unallocated_capacity() : 0; 868 } 869 870 size_t CodeCache::unallocated_capacity() { 871 size_t unallocated_cap = 0; 872 FOR_ALL_ALLOCABLE_HEAPS(heap) { 873 unallocated_cap += (*heap)->unallocated_capacity(); 874 } 875 return unallocated_cap; 876 } 877 878 size_t CodeCache::max_capacity() { 879 size_t max_cap = 0; 880 FOR_ALL_ALLOCABLE_HEAPS(heap) { 881 max_cap += (*heap)->max_capacity(); 882 } 883 return max_cap; 884 } 885 886 /** 887 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 888 * is free, reverse_free_ratio() returns 4. 889 */ 890 double CodeCache::reverse_free_ratio(int code_blob_type) { 891 CodeHeap* heap = get_code_heap(code_blob_type); 892 if (heap == NULL) { 893 return 0; 894 } 895 896 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 897 double max_capacity = (double)heap->max_capacity(); 898 double result = max_capacity / unallocated_capacity; 899 assert (max_capacity >= unallocated_capacity, "Must be"); 900 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 901 return result; 902 } 903 904 size_t CodeCache::bytes_allocated_in_freelists() { 905 size_t allocated_bytes = 0; 906 FOR_ALL_ALLOCABLE_HEAPS(heap) { 907 allocated_bytes += (*heap)->allocated_in_freelist(); 908 } 909 return allocated_bytes; 910 } 911 912 int CodeCache::allocated_segments() { 913 int number_of_segments = 0; 914 FOR_ALL_ALLOCABLE_HEAPS(heap) { 915 number_of_segments += (*heap)->allocated_segments(); 916 } 917 return number_of_segments; 918 } 919 920 size_t CodeCache::freelists_length() { 921 size_t length = 0; 922 FOR_ALL_ALLOCABLE_HEAPS(heap) { 923 length += (*heap)->freelist_length(); 924 } 925 return length; 926 } 927 928 void icache_init(); 929 930 void CodeCache::initialize() { 931 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 932 #ifdef COMPILER2 933 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 934 #endif 935 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 936 // This was originally just a check of the alignment, causing failure, instead, round 937 // the code cache to the page size. In particular, Solaris is moving to a larger 938 // default page size. 939 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 940 941 if (SegmentedCodeCache) { 942 // Use multiple code heaps 943 initialize_heaps(); 944 } else { 945 // Use a single code heap 946 FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0); 947 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0); 948 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0); 949 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 950 add_heap(rs, "CodeCache", CodeBlobType::All); 951 } 952 953 // Initialize ICache flush mechanism 954 // This service is needed for os::register_code_area 955 icache_init(); 956 957 // Give OS a chance to register generated code area. 958 // This is used on Windows 64 bit platforms to register 959 // Structured Exception Handlers for our generated code. 960 os::register_code_area((char*)low_bound(), (char*)high_bound()); 961 } 962 963 void codeCache_init() { 964 CodeCache::initialize(); 965 // Load AOT libraries and add AOT code heaps. 966 AOTLoader::initialize(); 967 } 968 969 //------------------------------------------------------------------------------------------------ 970 971 int CodeCache::number_of_nmethods_with_dependencies() { 972 return _number_of_nmethods_with_dependencies; 973 } 974 975 void CodeCache::clear_inline_caches() { 976 assert_locked_or_safepoint(CodeCache_lock); 977 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 978 while(iter.next()) { 979 iter.method()->clear_inline_caches(); 980 } 981 } 982 983 void CodeCache::cleanup_inline_caches() { 984 assert_locked_or_safepoint(CodeCache_lock); 985 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 986 while(iter.next()) { 987 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 988 } 989 } 990 991 // Keeps track of time spent for checking dependencies 992 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 993 994 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 995 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 996 int number_of_marked_CodeBlobs = 0; 997 998 // search the hierarchy looking for nmethods which are affected by the loading of this class 999 1000 // then search the interfaces this class implements looking for nmethods 1001 // which might be dependent of the fact that an interface only had one 1002 // implementor. 1003 // nmethod::check_all_dependencies works only correctly, if no safepoint 1004 // can happen 1005 NoSafepointVerifier nsv; 1006 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1007 Klass* d = str.klass(); 1008 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1009 } 1010 1011 #ifndef PRODUCT 1012 if (VerifyDependencies) { 1013 // Object pointers are used as unique identifiers for dependency arguments. This 1014 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1015 dependentCheckTime.start(); 1016 nmethod::check_all_dependencies(changes); 1017 dependentCheckTime.stop(); 1018 } 1019 #endif 1020 1021 return number_of_marked_CodeBlobs; 1022 } 1023 1024 CompiledMethod* CodeCache::find_compiled(void* start) { 1025 CodeBlob *cb = find_blob(start); 1026 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1027 return (CompiledMethod*)cb; 1028 } 1029 1030 bool CodeCache::is_far_target(address target) { 1031 #if INCLUDE_AOT 1032 return NativeCall::is_far_call(_low_bound, target) || 1033 NativeCall::is_far_call(_high_bound, target); 1034 #else 1035 return false; 1036 #endif 1037 } 1038 1039 #ifdef INCLUDE_JVMTI 1040 // RedefineClasses support for unloading nmethods that are dependent on "old" methods. 1041 // We don't really expect this table to grow very large. If it does, it can become a hashtable. 1042 static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL; 1043 1044 static void add_to_old_table(CompiledMethod* c) { 1045 if (old_compiled_method_table == NULL) { 1046 old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, true); 1047 } 1048 old_compiled_method_table->push(c); 1049 } 1050 1051 static void reset_old_method_table() { 1052 if (old_compiled_method_table != NULL) { 1053 delete old_compiled_method_table; 1054 old_compiled_method_table = NULL; 1055 } 1056 } 1057 1058 // Remove this method when zombied or unloaded. 1059 void CodeCache::unregister_old_nmethod(CompiledMethod* c) { 1060 assert_lock_strong(CodeCache_lock); 1061 if (old_compiled_method_table != NULL) { 1062 int index = old_compiled_method_table->find(c); 1063 if (index != -1) { 1064 old_compiled_method_table->delete_at(index); 1065 } 1066 } 1067 } 1068 1069 void CodeCache::old_nmethods_do(MetadataClosure* f) { 1070 // Walk old method table and mark those on stack. 1071 int length = 0; 1072 if (old_compiled_method_table != NULL) { 1073 length = old_compiled_method_table->length(); 1074 for (int i = 0; i < length; i++) { 1075 CompiledMethod* cm = old_compiled_method_table->at(i); 1076 // Only walk alive nmethods, the dead ones will get removed by the sweeper. 1077 if (cm->is_alive()) { 1078 old_compiled_method_table->at(i)->metadata_do(f); 1079 } 1080 } 1081 } 1082 log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length); 1083 } 1084 1085 // Just marks the methods in this class as needing deoptimization 1086 void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1087 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1088 1089 // Mark dependent AOT nmethods, which are only found via the class redefined. 1090 // TODO: add dependencies to aotCompiledMethod's metadata section so this isn't 1091 // needed. 1092 AOTLoader::mark_evol_dependent_methods(dependee); 1093 } 1094 1095 1096 // Walk compiled methods and mark dependent methods for deoptimization. 1097 int CodeCache::mark_dependents_for_evol_deoptimization() { 1098 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1099 // Each redefinition creates a new set of nmethods that have references to "old" Methods 1100 // So delete old method table and create a new one. 1101 reset_old_method_table(); 1102 1103 int number_of_marked_CodeBlobs = 0; 1104 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1105 while(iter.next()) { 1106 CompiledMethod* nm = iter.method(); 1107 // Walk all alive nmethods to check for old Methods. 1108 // This includes methods whose inline caches point to old methods, so 1109 // inline cache clearing is unnecessary. 1110 if (nm->has_evol_metadata()) { 1111 nm->mark_for_deoptimization(); 1112 add_to_old_table(nm); 1113 number_of_marked_CodeBlobs++; 1114 } 1115 } 1116 1117 // return total count of nmethods marked for deoptimization, if zero the caller 1118 // can skip deoptimization 1119 return number_of_marked_CodeBlobs; 1120 } 1121 1122 void CodeCache::mark_all_nmethods_for_evol_deoptimization() { 1123 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1124 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1125 while(iter.next()) { 1126 CompiledMethod* nm = iter.method(); 1127 if (!nm->method()->is_method_handle_intrinsic()) { 1128 nm->mark_for_deoptimization(); 1129 if (nm->has_evol_metadata()) { 1130 add_to_old_table(nm); 1131 } 1132 } 1133 } 1134 } 1135 1136 // Flushes compiled methods dependent on redefined classes, that have already been 1137 // marked for deoptimization. 1138 void CodeCache::flush_evol_dependents() { 1139 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1140 1141 // CodeCache can only be updated by a thread_in_VM and they will all be 1142 // stopped during the safepoint so CodeCache will be safe to update without 1143 // holding the CodeCache_lock. 1144 1145 // At least one nmethod has been marked for deoptimization 1146 1147 Deoptimization::deoptimize_all_marked(); 1148 } 1149 #endif // INCLUDE_JVMTI 1150 1151 // Mark methods for deopt (if safe or possible). 1152 void CodeCache::mark_all_nmethods_for_deoptimization() { 1153 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1154 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1155 while(iter.next()) { 1156 CompiledMethod* nm = iter.method(); 1157 if (!nm->is_native_method()) { 1158 nm->mark_for_deoptimization(); 1159 } 1160 } 1161 } 1162 1163 int CodeCache::mark_for_deoptimization(Method* dependee) { 1164 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1165 int number_of_marked_CodeBlobs = 0; 1166 1167 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1168 while(iter.next()) { 1169 CompiledMethod* nm = iter.method(); 1170 if (nm->is_dependent_on_method(dependee)) { 1171 ResourceMark rm; 1172 nm->mark_for_deoptimization(); 1173 number_of_marked_CodeBlobs++; 1174 } 1175 } 1176 1177 return number_of_marked_CodeBlobs; 1178 } 1179 1180 void CodeCache::make_marked_nmethods_not_entrant() { 1181 assert_locked_or_safepoint(CodeCache_lock); 1182 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1183 while(iter.next()) { 1184 CompiledMethod* nm = iter.method(); 1185 if (nm->is_marked_for_deoptimization()) { 1186 nm->make_not_entrant(); 1187 } 1188 } 1189 } 1190 1191 // Flushes compiled methods dependent on dependee. 1192 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1193 assert_lock_strong(Compile_lock); 1194 1195 if (number_of_nmethods_with_dependencies() == 0) return; 1196 1197 KlassDepChange changes(dependee); 1198 1199 // Compute the dependent nmethods 1200 if (mark_for_deoptimization(changes) > 0) { 1201 // At least one nmethod has been marked for deoptimization 1202 Deoptimization::deoptimize_all_marked(); 1203 } 1204 } 1205 1206 // Flushes compiled methods dependent on dependee 1207 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1208 // --- Compile_lock is not held. However we are at a safepoint. 1209 assert_locked_or_safepoint(Compile_lock); 1210 1211 // Compute the dependent nmethods 1212 if (mark_for_deoptimization(m_h()) > 0) { 1213 Deoptimization::deoptimize_all_marked(); 1214 } 1215 } 1216 1217 void CodeCache::verify() { 1218 assert_locked_or_safepoint(CodeCache_lock); 1219 FOR_ALL_HEAPS(heap) { 1220 (*heap)->verify(); 1221 FOR_ALL_BLOBS(cb, *heap) { 1222 if (cb->is_alive()) { 1223 cb->verify(); 1224 } 1225 } 1226 } 1227 } 1228 1229 // A CodeHeap is full. Print out warning and report event. 1230 PRAGMA_DIAG_PUSH 1231 PRAGMA_FORMAT_NONLITERAL_IGNORED 1232 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1233 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1234 CodeHeap* heap = get_code_heap(code_blob_type); 1235 assert(heap != NULL, "heap is null"); 1236 1237 if ((heap->full_count() == 0) || print) { 1238 // Not yet reported for this heap, report 1239 if (SegmentedCodeCache) { 1240 ResourceMark rm; 1241 stringStream msg1_stream, msg2_stream; 1242 msg1_stream.print("%s is full. Compiler has been disabled.", 1243 get_code_heap_name(code_blob_type)); 1244 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1245 get_code_heap_flag_name(code_blob_type)); 1246 const char *msg1 = msg1_stream.as_string(); 1247 const char *msg2 = msg2_stream.as_string(); 1248 1249 log_warning(codecache)("%s", msg1); 1250 log_warning(codecache)("%s", msg2); 1251 warning("%s", msg1); 1252 warning("%s", msg2); 1253 } else { 1254 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1255 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1256 1257 log_warning(codecache)("%s", msg1); 1258 log_warning(codecache)("%s", msg2); 1259 warning("%s", msg1); 1260 warning("%s", msg2); 1261 } 1262 ResourceMark rm; 1263 stringStream s; 1264 // Dump code cache into a buffer before locking the tty. 1265 { 1266 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1267 print_summary(&s); 1268 } 1269 { 1270 ttyLocker ttyl; 1271 tty->print("%s", s.as_string()); 1272 } 1273 1274 if (heap->full_count() == 0) { 1275 if (PrintCodeHeapAnalytics) { 1276 CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot! 1277 } 1278 } 1279 } 1280 1281 heap->report_full(); 1282 1283 EventCodeCacheFull event; 1284 if (event.should_commit()) { 1285 event.set_codeBlobType((u1)code_blob_type); 1286 event.set_startAddress((u8)heap->low_boundary()); 1287 event.set_commitedTopAddress((u8)heap->high()); 1288 event.set_reservedTopAddress((u8)heap->high_boundary()); 1289 event.set_entryCount(heap->blob_count()); 1290 event.set_methodCount(heap->nmethod_count()); 1291 event.set_adaptorCount(heap->adapter_count()); 1292 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1293 event.set_fullCount(heap->full_count()); 1294 event.commit(); 1295 } 1296 } 1297 PRAGMA_DIAG_POP 1298 1299 void CodeCache::print_memory_overhead() { 1300 size_t wasted_bytes = 0; 1301 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1302 CodeHeap* curr_heap = *heap; 1303 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1304 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1305 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1306 } 1307 } 1308 // Print bytes that are allocated in the freelist 1309 ttyLocker ttl; 1310 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1311 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1312 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1313 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1314 } 1315 1316 //------------------------------------------------------------------------------------------------ 1317 // Non-product version 1318 1319 #ifndef PRODUCT 1320 1321 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1322 if (PrintCodeCache2) { // Need to add a new flag 1323 ResourceMark rm; 1324 if (size == 0) size = cb->size(); 1325 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1326 } 1327 } 1328 1329 void CodeCache::print_internals() { 1330 int nmethodCount = 0; 1331 int runtimeStubCount = 0; 1332 int adapterCount = 0; 1333 int deoptimizationStubCount = 0; 1334 int uncommonTrapStubCount = 0; 1335 int bufferBlobCount = 0; 1336 int total = 0; 1337 int nmethodAlive = 0; 1338 int nmethodNotEntrant = 0; 1339 int nmethodZombie = 0; 1340 int nmethodUnloaded = 0; 1341 int nmethodJava = 0; 1342 int nmethodNative = 0; 1343 int max_nm_size = 0; 1344 ResourceMark rm; 1345 1346 int i = 0; 1347 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1348 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1349 tty->print_cr("-- %s --", (*heap)->name()); 1350 } 1351 FOR_ALL_BLOBS(cb, *heap) { 1352 total++; 1353 if (cb->is_nmethod()) { 1354 nmethod* nm = (nmethod*)cb; 1355 1356 if (Verbose && nm->method() != NULL) { 1357 ResourceMark rm; 1358 char *method_name = nm->method()->name_and_sig_as_C_string(); 1359 tty->print("%s", method_name); 1360 if(nm->is_alive()) { tty->print_cr(" alive"); } 1361 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1362 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1363 } 1364 1365 nmethodCount++; 1366 1367 if(nm->is_alive()) { nmethodAlive++; } 1368 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1369 if(nm->is_zombie()) { nmethodZombie++; } 1370 if(nm->is_unloaded()) { nmethodUnloaded++; } 1371 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1372 1373 if(nm->method() != NULL && nm->is_java_method()) { 1374 nmethodJava++; 1375 max_nm_size = MAX2(max_nm_size, nm->size()); 1376 } 1377 } else if (cb->is_runtime_stub()) { 1378 runtimeStubCount++; 1379 } else if (cb->is_deoptimization_stub()) { 1380 deoptimizationStubCount++; 1381 } else if (cb->is_uncommon_trap_stub()) { 1382 uncommonTrapStubCount++; 1383 } else if (cb->is_adapter_blob()) { 1384 adapterCount++; 1385 } else if (cb->is_buffer_blob()) { 1386 bufferBlobCount++; 1387 } 1388 } 1389 } 1390 1391 int bucketSize = 512; 1392 int bucketLimit = max_nm_size / bucketSize + 1; 1393 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1394 memset(buckets, 0, sizeof(int) * bucketLimit); 1395 1396 NMethodIterator iter(NMethodIterator::all_blobs); 1397 while(iter.next()) { 1398 nmethod* nm = iter.method(); 1399 if(nm->method() != NULL && nm->is_java_method()) { 1400 buckets[nm->size() / bucketSize]++; 1401 } 1402 } 1403 1404 tty->print_cr("Code Cache Entries (total of %d)",total); 1405 tty->print_cr("-------------------------------------------------"); 1406 tty->print_cr("nmethods: %d",nmethodCount); 1407 tty->print_cr("\talive: %d",nmethodAlive); 1408 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1409 tty->print_cr("\tzombie: %d",nmethodZombie); 1410 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1411 tty->print_cr("\tjava: %d",nmethodJava); 1412 tty->print_cr("\tnative: %d",nmethodNative); 1413 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1414 tty->print_cr("adapters: %d",adapterCount); 1415 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1416 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1417 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1418 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1419 tty->print_cr("-------------------------------------------------"); 1420 1421 for(int i=0; i<bucketLimit; i++) { 1422 if(buckets[i] != 0) { 1423 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1424 tty->fill_to(40); 1425 tty->print_cr("%d",buckets[i]); 1426 } 1427 } 1428 1429 FREE_C_HEAP_ARRAY(int, buckets); 1430 print_memory_overhead(); 1431 } 1432 1433 #endif // !PRODUCT 1434 1435 void CodeCache::print() { 1436 print_summary(tty); 1437 1438 #ifndef PRODUCT 1439 if (!Verbose) return; 1440 1441 CodeBlob_sizes live; 1442 CodeBlob_sizes dead; 1443 1444 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1445 FOR_ALL_BLOBS(cb, *heap) { 1446 if (!cb->is_alive()) { 1447 dead.add(cb); 1448 } else { 1449 live.add(cb); 1450 } 1451 } 1452 } 1453 1454 tty->print_cr("CodeCache:"); 1455 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1456 1457 if (!live.is_empty()) { 1458 live.print("live"); 1459 } 1460 if (!dead.is_empty()) { 1461 dead.print("dead"); 1462 } 1463 1464 if (WizardMode) { 1465 // print the oop_map usage 1466 int code_size = 0; 1467 int number_of_blobs = 0; 1468 int number_of_oop_maps = 0; 1469 int map_size = 0; 1470 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1471 FOR_ALL_BLOBS(cb, *heap) { 1472 if (cb->is_alive()) { 1473 number_of_blobs++; 1474 code_size += cb->code_size(); 1475 ImmutableOopMapSet* set = cb->oop_maps(); 1476 if (set != NULL) { 1477 number_of_oop_maps += set->count(); 1478 map_size += set->nr_of_bytes(); 1479 } 1480 } 1481 } 1482 } 1483 tty->print_cr("OopMaps"); 1484 tty->print_cr(" #blobs = %d", number_of_blobs); 1485 tty->print_cr(" code size = %d", code_size); 1486 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1487 tty->print_cr(" map size = %d", map_size); 1488 } 1489 1490 #endif // !PRODUCT 1491 } 1492 1493 void CodeCache::print_summary(outputStream* st, bool detailed) { 1494 int full_count = 0; 1495 FOR_ALL_HEAPS(heap_iterator) { 1496 CodeHeap* heap = (*heap_iterator); 1497 size_t total = (heap->high_boundary() - heap->low_boundary()); 1498 if (_heaps->length() >= 1) { 1499 st->print("%s:", heap->name()); 1500 } else { 1501 st->print("CodeCache:"); 1502 } 1503 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1504 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1505 total/K, (total - heap->unallocated_capacity())/K, 1506 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1507 1508 if (detailed) { 1509 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1510 p2i(heap->low_boundary()), 1511 p2i(heap->high()), 1512 p2i(heap->high_boundary())); 1513 1514 full_count += get_codemem_full_count(heap->code_blob_type()); 1515 } 1516 } 1517 1518 if (detailed) { 1519 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1520 " adapters=" UINT32_FORMAT, 1521 blob_count(), nmethod_count(), adapter_count()); 1522 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1523 "enabled" : Arguments::mode() == Arguments::_int ? 1524 "disabled (interpreter mode)" : 1525 "disabled (not enough contiguous free space left)"); 1526 st->print_cr(" stopped_count=%d, restarted_count=%d", 1527 CompileBroker::get_total_compiler_stopped_count(), 1528 CompileBroker::get_total_compiler_restarted_count()); 1529 st->print_cr(" full_count=%d", full_count); 1530 } 1531 } 1532 1533 void CodeCache::print_codelist(outputStream* st) { 1534 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1535 1536 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1537 while (iter.next()) { 1538 CompiledMethod* cm = iter.method(); 1539 ResourceMark rm; 1540 char* method_name = cm->method()->name_and_sig_as_C_string(); 1541 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1542 cm->compile_id(), cm->comp_level(), cm->get_state(), 1543 method_name, 1544 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1545 } 1546 } 1547 1548 void CodeCache::print_layout(outputStream* st) { 1549 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1550 ResourceMark rm; 1551 print_summary(st, true); 1552 } 1553 1554 void CodeCache::log_state(outputStream* st) { 1555 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1556 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1557 blob_count(), nmethod_count(), adapter_count(), 1558 unallocated_capacity()); 1559 } 1560 1561 //---< BEGIN >--- CodeHeap State Analytics. 1562 1563 void CodeCache::aggregate(outputStream *out, size_t granularity) { 1564 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1565 CodeHeapState::aggregate(out, (*heap), granularity); 1566 } 1567 } 1568 1569 void CodeCache::discard(outputStream *out) { 1570 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1571 CodeHeapState::discard(out, (*heap)); 1572 } 1573 } 1574 1575 void CodeCache::print_usedSpace(outputStream *out) { 1576 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1577 CodeHeapState::print_usedSpace(out, (*heap)); 1578 } 1579 } 1580 1581 void CodeCache::print_freeSpace(outputStream *out) { 1582 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1583 CodeHeapState::print_freeSpace(out, (*heap)); 1584 } 1585 } 1586 1587 void CodeCache::print_count(outputStream *out) { 1588 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1589 CodeHeapState::print_count(out, (*heap)); 1590 } 1591 } 1592 1593 void CodeCache::print_space(outputStream *out) { 1594 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1595 CodeHeapState::print_space(out, (*heap)); 1596 } 1597 } 1598 1599 void CodeCache::print_age(outputStream *out) { 1600 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1601 CodeHeapState::print_age(out, (*heap)); 1602 } 1603 } 1604 1605 void CodeCache::print_names(outputStream *out) { 1606 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1607 CodeHeapState::print_names(out, (*heap)); 1608 } 1609 } 1610 //---< END >--- CodeHeap State Analytics.