1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/codeHeapState.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/dependencies.hpp" 32 #include "code/dependencyContext.hpp" 33 #include "code/icBuffer.hpp" 34 #include "code/nmethod.hpp" 35 #include "code/pcDesc.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "jfr/jfrEvents.hpp" 38 #include "logging/log.hpp" 39 #include "logging/logStream.hpp" 40 #include "memory/allocation.inline.hpp" 41 #include "memory/iterator.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "memory/universe.hpp" 44 #include "oops/method.inline.hpp" 45 #include "oops/objArrayOop.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "oops/verifyOopClosure.hpp" 48 #include "runtime/arguments.hpp" 49 #include "runtime/compilationPolicy.hpp" 50 #include "runtime/deoptimization.hpp" 51 #include "runtime/handles.inline.hpp" 52 #include "runtime/icache.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/safepointVerifiers.hpp" 56 #include "runtime/sweeper.hpp" 57 #include "runtime/vmThread.hpp" 58 #include "services/memoryService.hpp" 59 #include "utilities/align.hpp" 60 #include "utilities/vmError.hpp" 61 #include "utilities/xmlstream.hpp" 62 #ifdef COMPILER1 63 #include "c1/c1_Compilation.hpp" 64 #include "c1/c1_Compiler.hpp" 65 #endif 66 #ifdef COMPILER2 67 #include "opto/c2compiler.hpp" 68 #include "opto/compile.hpp" 69 #include "opto/node.hpp" 70 #endif 71 72 // Helper class for printing in CodeCache 73 class CodeBlob_sizes { 74 private: 75 int count; 76 int total_size; 77 int header_size; 78 int code_size; 79 int stub_size; 80 int relocation_size; 81 int scopes_oop_size; 82 int scopes_metadata_size; 83 int scopes_data_size; 84 int scopes_pcs_size; 85 86 public: 87 CodeBlob_sizes() { 88 count = 0; 89 total_size = 0; 90 header_size = 0; 91 code_size = 0; 92 stub_size = 0; 93 relocation_size = 0; 94 scopes_oop_size = 0; 95 scopes_metadata_size = 0; 96 scopes_data_size = 0; 97 scopes_pcs_size = 0; 98 } 99 100 int total() { return total_size; } 101 bool is_empty() { return count == 0; } 102 103 void print(const char* title) { 104 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 105 count, 106 title, 107 (int)(total() / K), 108 header_size * 100 / total_size, 109 relocation_size * 100 / total_size, 110 code_size * 100 / total_size, 111 stub_size * 100 / total_size, 112 scopes_oop_size * 100 / total_size, 113 scopes_metadata_size * 100 / total_size, 114 scopes_data_size * 100 / total_size, 115 scopes_pcs_size * 100 / total_size); 116 } 117 118 void add(CodeBlob* cb) { 119 count++; 120 total_size += cb->size(); 121 header_size += cb->header_size(); 122 relocation_size += cb->relocation_size(); 123 if (cb->is_nmethod()) { 124 nmethod* nm = cb->as_nmethod_or_null(); 125 code_size += nm->insts_size(); 126 stub_size += nm->stub_size(); 127 128 scopes_oop_size += nm->oops_size(); 129 scopes_metadata_size += nm->metadata_size(); 130 scopes_data_size += nm->scopes_data_size(); 131 scopes_pcs_size += nm->scopes_pcs_size(); 132 } else { 133 code_size += cb->code_size(); 134 } 135 } 136 }; 137 138 // Iterate over all CodeHeaps 139 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 140 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 141 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 142 143 // Iterate over all CodeBlobs (cb) on the given CodeHeap 144 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 145 146 address CodeCache::_low_bound = 0; 147 address CodeCache::_high_bound = 0; 148 int CodeCache::_number_of_nmethods_with_dependencies = 0; 149 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL; 150 151 // Initialize arrays of CodeHeap subsets 152 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 153 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 154 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 155 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 156 157 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 158 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 159 // Prepare error message 160 const char* error = "Invalid code heap sizes"; 161 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 162 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 163 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 164 165 if (total_size > cache_size) { 166 // Some code heap sizes were explicitly set: total_size must be <= cache_size 167 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 168 vm_exit_during_initialization(error, message); 169 } else if (all_set && total_size != cache_size) { 170 // All code heap sizes were explicitly set: total_size must equal cache_size 171 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 172 vm_exit_during_initialization(error, message); 173 } 174 } 175 176 void CodeCache::initialize_heaps() { 177 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 178 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 179 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 180 size_t min_size = os::vm_page_size(); 181 size_t cache_size = ReservedCodeCacheSize; 182 size_t non_nmethod_size = NonNMethodCodeHeapSize; 183 size_t profiled_size = ProfiledCodeHeapSize; 184 size_t non_profiled_size = NonProfiledCodeHeapSize; 185 // Check if total size set via command line flags exceeds the reserved size 186 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 187 (profiled_set ? profiled_size : min_size), 188 (non_profiled_set ? non_profiled_size : min_size), 189 cache_size, 190 non_nmethod_set && profiled_set && non_profiled_set); 191 192 // Determine size of compiler buffers 193 size_t code_buffers_size = 0; 194 #ifdef COMPILER1 195 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 196 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 197 code_buffers_size += c1_count * Compiler::code_buffer_size(); 198 #endif 199 #ifdef COMPILER2 200 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 201 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 202 // Initial size of constant table (this may be increased if a compiled method needs more space) 203 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 204 #endif 205 206 // Increase default non_nmethod_size to account for compiler buffers 207 if (!non_nmethod_set) { 208 non_nmethod_size += code_buffers_size; 209 } 210 // Calculate default CodeHeap sizes if not set by user 211 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 212 // Check if we have enough space for the non-nmethod code heap 213 if (cache_size > non_nmethod_size) { 214 // Use the default value for non_nmethod_size and one half of the 215 // remaining size for non-profiled and one half for profiled methods 216 size_t remaining_size = cache_size - non_nmethod_size; 217 profiled_size = remaining_size / 2; 218 non_profiled_size = remaining_size - profiled_size; 219 } else { 220 // Use all space for the non-nmethod heap and set other heaps to minimal size 221 non_nmethod_size = cache_size - 2 * min_size; 222 profiled_size = min_size; 223 non_profiled_size = min_size; 224 } 225 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 226 // The user explicitly set some code heap sizes. Increase or decrease the (default) 227 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 228 // code heap sizes and then only change non-nmethod code heap size if still necessary. 229 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 230 if (non_profiled_set) { 231 if (!profiled_set) { 232 // Adapt size of profiled code heap 233 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 234 // Not enough space available, set to minimum size 235 diff_size += profiled_size - min_size; 236 profiled_size = min_size; 237 } else { 238 profiled_size += diff_size; 239 diff_size = 0; 240 } 241 } 242 } else if (profiled_set) { 243 // Adapt size of non-profiled code heap 244 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 245 // Not enough space available, set to minimum size 246 diff_size += non_profiled_size - min_size; 247 non_profiled_size = min_size; 248 } else { 249 non_profiled_size += diff_size; 250 diff_size = 0; 251 } 252 } else if (non_nmethod_set) { 253 // Distribute remaining size between profiled and non-profiled code heaps 254 diff_size = cache_size - non_nmethod_size; 255 profiled_size = diff_size / 2; 256 non_profiled_size = diff_size - profiled_size; 257 diff_size = 0; 258 } 259 if (diff_size != 0) { 260 // Use non-nmethod code heap for remaining space requirements 261 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 262 non_nmethod_size += diff_size; 263 } 264 } 265 266 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 267 if (!heap_available(CodeBlobType::MethodProfiled)) { 268 non_profiled_size += profiled_size; 269 profiled_size = 0; 270 } 271 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 272 if (!heap_available(CodeBlobType::MethodNonProfiled)) { 273 non_nmethod_size += non_profiled_size; 274 non_profiled_size = 0; 275 } 276 // Make sure we have enough space for VM internal code 277 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 278 if (non_nmethod_size < min_code_cache_size) { 279 vm_exit_during_initialization(err_msg( 280 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 281 non_nmethod_size/K, min_code_cache_size/K)); 282 } 283 284 // Verify sizes and update flag values 285 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 286 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 287 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 288 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 289 290 // If large page support is enabled, align code heaps according to large 291 // page size to make sure that code cache is covered by large pages. 292 const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity()); 293 non_nmethod_size = align_up(non_nmethod_size, alignment); 294 profiled_size = align_down(profiled_size, alignment); 295 296 // Reserve one continuous chunk of memory for CodeHeaps and split it into 297 // parts for the individual heaps. The memory layout looks like this: 298 // ---------- high ----------- 299 // Non-profiled nmethods 300 // Profiled nmethods 301 // Non-nmethods 302 // ---------- low ------------ 303 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 304 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 305 ReservedSpace rest = rs.last_part(non_nmethod_size); 306 ReservedSpace profiled_space = rest.first_part(profiled_size); 307 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 308 309 // Non-nmethods (stubs, adapters, ...) 310 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 311 // Tier 2 and tier 3 (profiled) methods 312 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 313 // Tier 1 and tier 4 (non-profiled) methods and native methods 314 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 315 } 316 317 size_t CodeCache::page_size(bool aligned, size_t min_pages) { 318 if (os::can_execute_large_page_memory()) { 319 if (InitialCodeCacheSize < ReservedCodeCacheSize) { 320 // Make sure that the page size allows for an incremental commit of the reserved space 321 min_pages = MAX2(min_pages, (size_t)8); 322 } 323 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) : 324 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages); 325 } else { 326 return os::vm_page_size(); 327 } 328 } 329 330 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 331 // Align and reserve space for code cache 332 const size_t rs_ps = page_size(); 333 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); 334 const size_t rs_size = align_up(size, rs_align); 335 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); 336 if (!rs.is_reserved()) { 337 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 338 rs_size/K)); 339 } 340 341 // Initialize bounds 342 _low_bound = (address)rs.base(); 343 _high_bound = _low_bound + rs.size(); 344 return rs; 345 } 346 347 // Heaps available for allocation 348 bool CodeCache::heap_available(int code_blob_type) { 349 if (!SegmentedCodeCache) { 350 // No segmentation: use a single code heap 351 return (code_blob_type == CodeBlobType::All); 352 } else if (Arguments::is_interpreter_only()) { 353 // Interpreter only: we don't need any method code heaps 354 return (code_blob_type == CodeBlobType::NonNMethod); 355 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 356 // Tiered compilation: use all code heaps 357 return (code_blob_type < CodeBlobType::All); 358 } else { 359 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 360 return (code_blob_type == CodeBlobType::NonNMethod) || 361 (code_blob_type == CodeBlobType::MethodNonProfiled); 362 } 363 } 364 365 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 366 switch(code_blob_type) { 367 case CodeBlobType::NonNMethod: 368 return "NonNMethodCodeHeapSize"; 369 break; 370 case CodeBlobType::MethodNonProfiled: 371 return "NonProfiledCodeHeapSize"; 372 break; 373 case CodeBlobType::MethodProfiled: 374 return "ProfiledCodeHeapSize"; 375 break; 376 } 377 ShouldNotReachHere(); 378 return NULL; 379 } 380 381 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 382 if (lhs->code_blob_type() == rhs->code_blob_type()) { 383 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 384 } else { 385 return lhs->code_blob_type() - rhs->code_blob_type(); 386 } 387 } 388 389 void CodeCache::add_heap(CodeHeap* heap) { 390 assert(!Universe::is_fully_initialized(), "late heap addition?"); 391 392 _heaps->insert_sorted<code_heap_compare>(heap); 393 394 int type = heap->code_blob_type(); 395 if (code_blob_type_accepts_compiled(type)) { 396 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 397 } 398 if (code_blob_type_accepts_nmethod(type)) { 399 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 400 } 401 if (code_blob_type_accepts_allocable(type)) { 402 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 403 } 404 } 405 406 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 407 // Check if heap is needed 408 if (!heap_available(code_blob_type)) { 409 return; 410 } 411 412 // Create CodeHeap 413 CodeHeap* heap = new CodeHeap(name, code_blob_type); 414 add_heap(heap); 415 416 // Reserve Space 417 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 418 size_initial = align_up(size_initial, os::vm_page_size()); 419 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 420 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 421 heap->name(), size_initial/K)); 422 } 423 424 // Register the CodeHeap 425 MemoryService::add_code_heap_memory_pool(heap, name); 426 } 427 428 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 429 FOR_ALL_HEAPS(heap) { 430 if ((*heap)->contains(start)) { 431 return *heap; 432 } 433 } 434 return NULL; 435 } 436 437 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 438 assert(cb != NULL, "CodeBlob is null"); 439 FOR_ALL_HEAPS(heap) { 440 if ((*heap)->contains_blob(cb)) { 441 return *heap; 442 } 443 } 444 ShouldNotReachHere(); 445 return NULL; 446 } 447 448 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 449 FOR_ALL_HEAPS(heap) { 450 if ((*heap)->accepts(code_blob_type)) { 451 return *heap; 452 } 453 } 454 return NULL; 455 } 456 457 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 458 assert_locked_or_safepoint(CodeCache_lock); 459 assert(heap != NULL, "heap is null"); 460 return (CodeBlob*)heap->first(); 461 } 462 463 CodeBlob* CodeCache::first_blob(int code_blob_type) { 464 if (heap_available(code_blob_type)) { 465 return first_blob(get_code_heap(code_blob_type)); 466 } else { 467 return NULL; 468 } 469 } 470 471 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 472 assert_locked_or_safepoint(CodeCache_lock); 473 assert(heap != NULL, "heap is null"); 474 return (CodeBlob*)heap->next(cb); 475 } 476 477 /** 478 * Do not seize the CodeCache lock here--if the caller has not 479 * already done so, we are going to lose bigtime, since the code 480 * cache will contain a garbage CodeBlob until the caller can 481 * run the constructor for the CodeBlob subclass he is busy 482 * instantiating. 483 */ 484 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 485 // Possibly wakes up the sweeper thread. 486 NMethodSweeper::notify(code_blob_type); 487 assert_locked_or_safepoint(CodeCache_lock); 488 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 489 if (size <= 0) { 490 return NULL; 491 } 492 CodeBlob* cb = NULL; 493 494 // Get CodeHeap for the given CodeBlobType 495 CodeHeap* heap = get_code_heap(code_blob_type); 496 assert(heap != NULL, "heap is null"); 497 498 while (true) { 499 cb = (CodeBlob*)heap->allocate(size); 500 if (cb != NULL) break; 501 if (!heap->expand_by(CodeCacheExpansionSize)) { 502 // Save original type for error reporting 503 if (orig_code_blob_type == CodeBlobType::All) { 504 orig_code_blob_type = code_blob_type; 505 } 506 // Expansion failed 507 if (SegmentedCodeCache) { 508 // Fallback solution: Try to store code in another code heap. 509 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 510 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 511 // and force stack scanning if less than 10% of the code heap are free. 512 int type = code_blob_type; 513 switch (type) { 514 case CodeBlobType::NonNMethod: 515 type = CodeBlobType::MethodNonProfiled; 516 break; 517 case CodeBlobType::MethodNonProfiled: 518 type = CodeBlobType::MethodProfiled; 519 break; 520 case CodeBlobType::MethodProfiled: 521 // Avoid loop if we already tried that code heap 522 if (type == orig_code_blob_type) { 523 type = CodeBlobType::MethodNonProfiled; 524 } 525 break; 526 } 527 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 528 if (PrintCodeCacheExtension) { 529 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 530 heap->name(), get_code_heap(type)->name()); 531 } 532 return allocate(size, type, orig_code_blob_type); 533 } 534 } 535 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 536 CompileBroker::handle_full_code_cache(orig_code_blob_type); 537 return NULL; 538 } 539 if (PrintCodeCacheExtension) { 540 ResourceMark rm; 541 if (_nmethod_heaps->length() >= 1) { 542 tty->print("%s", heap->name()); 543 } else { 544 tty->print("CodeCache"); 545 } 546 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 547 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 548 (address)heap->high() - (address)heap->low_boundary()); 549 } 550 } 551 print_trace("allocation", cb, size); 552 return cb; 553 } 554 555 void CodeCache::free(CodeBlob* cb) { 556 assert_locked_or_safepoint(CodeCache_lock); 557 CodeHeap* heap = get_code_heap(cb); 558 print_trace("free", cb); 559 if (cb->is_nmethod()) { 560 heap->set_nmethod_count(heap->nmethod_count() - 1); 561 if (((nmethod *)cb)->has_dependencies()) { 562 _number_of_nmethods_with_dependencies--; 563 } 564 } 565 if (cb->is_adapter_blob()) { 566 heap->set_adapter_count(heap->adapter_count() - 1); 567 } 568 569 // Get heap for given CodeBlob and deallocate 570 get_code_heap(cb)->deallocate(cb); 571 572 assert(heap->blob_count() >= 0, "sanity check"); 573 } 574 575 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 576 assert_locked_or_safepoint(CodeCache_lock); 577 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 578 print_trace("free_unused_tail", cb); 579 580 // We also have to account for the extra space (i.e. header) used by the CodeBlob 581 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 582 used += CodeBlob::align_code_offset(cb->header_size()); 583 584 // Get heap for given CodeBlob and deallocate its unused tail 585 get_code_heap(cb)->deallocate_tail(cb, used); 586 // Adjust the sizes of the CodeBlob 587 cb->adjust_size(used); 588 } 589 590 void CodeCache::commit(CodeBlob* cb) { 591 // this is called by nmethod::nmethod, which must already own CodeCache_lock 592 assert_locked_or_safepoint(CodeCache_lock); 593 CodeHeap* heap = get_code_heap(cb); 594 if (cb->is_nmethod()) { 595 heap->set_nmethod_count(heap->nmethod_count() + 1); 596 if (((nmethod *)cb)->has_dependencies()) { 597 _number_of_nmethods_with_dependencies++; 598 } 599 } 600 if (cb->is_adapter_blob()) { 601 heap->set_adapter_count(heap->adapter_count() + 1); 602 } 603 604 // flush the hardware I-cache 605 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 606 } 607 608 bool CodeCache::contains(void *p) { 609 // S390 uses contains() in current_frame(), which is used before 610 // code cache initialization if NativeMemoryTracking=detail is set. 611 S390_ONLY(if (_heaps == NULL) return false;) 612 // It should be ok to call contains without holding a lock. 613 FOR_ALL_HEAPS(heap) { 614 if ((*heap)->contains(p)) { 615 return true; 616 } 617 } 618 return false; 619 } 620 621 bool CodeCache::contains(nmethod *nm) { 622 return contains((void *)nm); 623 } 624 625 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 626 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 627 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 628 CodeBlob* CodeCache::find_blob(void* start) { 629 CodeBlob* result = find_blob_unsafe(start); 630 // We could potentially look up non_entrant methods 631 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 632 return result; 633 } 634 635 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 636 // what you are doing) 637 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 638 // NMT can walk the stack before code cache is created 639 if (_heaps != NULL) { 640 CodeHeap* heap = get_code_heap_containing(start); 641 if (heap != NULL) { 642 return heap->find_blob_unsafe(start); 643 } 644 } 645 return NULL; 646 } 647 648 nmethod* CodeCache::find_nmethod(void* start) { 649 CodeBlob* cb = find_blob(start); 650 assert(cb->is_nmethod(), "did not find an nmethod"); 651 return (nmethod*)cb; 652 } 653 654 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 655 assert_locked_or_safepoint(CodeCache_lock); 656 FOR_ALL_HEAPS(heap) { 657 FOR_ALL_BLOBS(cb, *heap) { 658 f(cb); 659 } 660 } 661 } 662 663 void CodeCache::nmethods_do(void f(nmethod* nm)) { 664 assert_locked_or_safepoint(CodeCache_lock); 665 NMethodIterator iter(NMethodIterator::all_blobs); 666 while(iter.next()) { 667 f(iter.method()); 668 } 669 } 670 671 void CodeCache::metadata_do(MetadataClosure* f) { 672 assert_locked_or_safepoint(CodeCache_lock); 673 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 674 while(iter.next()) { 675 iter.method()->metadata_do(f); 676 } 677 AOTLoader::metadata_do(f); 678 } 679 680 int CodeCache::alignment_unit() { 681 return (int)_heaps->first()->alignment_unit(); 682 } 683 684 int CodeCache::alignment_offset() { 685 return (int)_heaps->first()->alignment_offset(); 686 } 687 688 // Mark nmethods for unloading if they contain otherwise unreachable oops. 689 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 690 assert_locked_or_safepoint(CodeCache_lock); 691 UnloadingScope scope(is_alive); 692 CompiledMethodIterator iter(CompiledMethodIterator::only_alive); 693 while(iter.next()) { 694 iter.method()->do_unloading(unloading_occurred); 695 } 696 } 697 698 void CodeCache::blobs_do(CodeBlobClosure* f) { 699 assert_locked_or_safepoint(CodeCache_lock); 700 FOR_ALL_ALLOCABLE_HEAPS(heap) { 701 FOR_ALL_BLOBS(cb, *heap) { 702 if (cb->is_alive()) { 703 f->do_code_blob(cb); 704 #ifdef ASSERT 705 if (cb->is_nmethod()) { 706 Universe::heap()->verify_nmethod((nmethod*)cb); 707 } 708 #endif //ASSERT 709 } 710 } 711 } 712 } 713 714 void CodeCache::verify_clean_inline_caches() { 715 #ifdef ASSERT 716 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 717 while(iter.next()) { 718 nmethod* nm = iter.method(); 719 assert(!nm->is_unloaded(), "Tautology"); 720 nm->verify_clean_inline_caches(); 721 nm->verify(); 722 } 723 #endif 724 } 725 726 void CodeCache::verify_icholder_relocations() { 727 #ifdef ASSERT 728 // make sure that we aren't leaking icholders 729 int count = 0; 730 FOR_ALL_HEAPS(heap) { 731 FOR_ALL_BLOBS(cb, *heap) { 732 CompiledMethod *nm = cb->as_compiled_method_or_null(); 733 if (nm != NULL) { 734 count += nm->verify_icholder_relocations(); 735 } 736 } 737 } 738 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 739 CompiledICHolder::live_count(), "must agree"); 740 #endif 741 } 742 743 // Defer freeing of concurrently cleaned ExceptionCache entries until 744 // after a global handshake operation. 745 void CodeCache::release_exception_cache(ExceptionCache* entry) { 746 if (SafepointSynchronize::is_at_safepoint()) { 747 delete entry; 748 } else { 749 for (;;) { 750 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); 751 entry->set_purge_list_next(purge_list_head); 752 if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) { 753 break; 754 } 755 } 756 } 757 } 758 759 // Delete exception caches that have been concurrently unlinked, 760 // followed by a global handshake operation. 761 void CodeCache::purge_exception_caches() { 762 ExceptionCache* curr = _exception_cache_purge_list; 763 while (curr != NULL) { 764 ExceptionCache* next = curr->purge_list_next(); 765 delete curr; 766 curr = next; 767 } 768 _exception_cache_purge_list = NULL; 769 } 770 771 uint8_t CodeCache::_unloading_cycle = 1; 772 773 void CodeCache::increment_unloading_cycle() { 774 if (_unloading_cycle == 1) { 775 _unloading_cycle = 2; 776 } else { 777 _unloading_cycle = 1; 778 } 779 } 780 781 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive) 782 : _is_unloading_behaviour(is_alive) 783 { 784 _saved_behaviour = IsUnloadingBehaviour::current(); 785 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour); 786 increment_unloading_cycle(); 787 DependencyContext::cleaning_start(); 788 } 789 790 CodeCache::UnloadingScope::~UnloadingScope() { 791 IsUnloadingBehaviour::set_current(_saved_behaviour); 792 DependencyContext::cleaning_end(); 793 } 794 795 void CodeCache::verify_oops() { 796 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 797 VerifyOopClosure voc; 798 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 799 while(iter.next()) { 800 nmethod* nm = iter.method(); 801 nm->oops_do(&voc); 802 nm->verify_oop_relocations(); 803 } 804 } 805 806 int CodeCache::blob_count(int code_blob_type) { 807 CodeHeap* heap = get_code_heap(code_blob_type); 808 return (heap != NULL) ? heap->blob_count() : 0; 809 } 810 811 int CodeCache::blob_count() { 812 int count = 0; 813 FOR_ALL_HEAPS(heap) { 814 count += (*heap)->blob_count(); 815 } 816 return count; 817 } 818 819 int CodeCache::nmethod_count(int code_blob_type) { 820 CodeHeap* heap = get_code_heap(code_blob_type); 821 return (heap != NULL) ? heap->nmethod_count() : 0; 822 } 823 824 int CodeCache::nmethod_count() { 825 int count = 0; 826 FOR_ALL_NMETHOD_HEAPS(heap) { 827 count += (*heap)->nmethod_count(); 828 } 829 return count; 830 } 831 832 int CodeCache::adapter_count(int code_blob_type) { 833 CodeHeap* heap = get_code_heap(code_blob_type); 834 return (heap != NULL) ? heap->adapter_count() : 0; 835 } 836 837 int CodeCache::adapter_count() { 838 int count = 0; 839 FOR_ALL_HEAPS(heap) { 840 count += (*heap)->adapter_count(); 841 } 842 return count; 843 } 844 845 address CodeCache::low_bound(int code_blob_type) { 846 CodeHeap* heap = get_code_heap(code_blob_type); 847 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 848 } 849 850 address CodeCache::high_bound(int code_blob_type) { 851 CodeHeap* heap = get_code_heap(code_blob_type); 852 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 853 } 854 855 size_t CodeCache::capacity() { 856 size_t cap = 0; 857 FOR_ALL_ALLOCABLE_HEAPS(heap) { 858 cap += (*heap)->capacity(); 859 } 860 return cap; 861 } 862 863 size_t CodeCache::unallocated_capacity(int code_blob_type) { 864 CodeHeap* heap = get_code_heap(code_blob_type); 865 return (heap != NULL) ? heap->unallocated_capacity() : 0; 866 } 867 868 size_t CodeCache::unallocated_capacity() { 869 size_t unallocated_cap = 0; 870 FOR_ALL_ALLOCABLE_HEAPS(heap) { 871 unallocated_cap += (*heap)->unallocated_capacity(); 872 } 873 return unallocated_cap; 874 } 875 876 size_t CodeCache::max_capacity() { 877 size_t max_cap = 0; 878 FOR_ALL_ALLOCABLE_HEAPS(heap) { 879 max_cap += (*heap)->max_capacity(); 880 } 881 return max_cap; 882 } 883 884 /** 885 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 886 * is free, reverse_free_ratio() returns 4. 887 */ 888 double CodeCache::reverse_free_ratio(int code_blob_type) { 889 CodeHeap* heap = get_code_heap(code_blob_type); 890 if (heap == NULL) { 891 return 0; 892 } 893 894 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 895 double max_capacity = (double)heap->max_capacity(); 896 double result = max_capacity / unallocated_capacity; 897 assert (max_capacity >= unallocated_capacity, "Must be"); 898 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 899 return result; 900 } 901 902 size_t CodeCache::bytes_allocated_in_freelists() { 903 size_t allocated_bytes = 0; 904 FOR_ALL_ALLOCABLE_HEAPS(heap) { 905 allocated_bytes += (*heap)->allocated_in_freelist(); 906 } 907 return allocated_bytes; 908 } 909 910 int CodeCache::allocated_segments() { 911 int number_of_segments = 0; 912 FOR_ALL_ALLOCABLE_HEAPS(heap) { 913 number_of_segments += (*heap)->allocated_segments(); 914 } 915 return number_of_segments; 916 } 917 918 size_t CodeCache::freelists_length() { 919 size_t length = 0; 920 FOR_ALL_ALLOCABLE_HEAPS(heap) { 921 length += (*heap)->freelist_length(); 922 } 923 return length; 924 } 925 926 void icache_init(); 927 928 void CodeCache::initialize() { 929 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 930 #ifdef COMPILER2 931 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 932 #endif 933 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 934 // This was originally just a check of the alignment, causing failure, instead, round 935 // the code cache to the page size. In particular, Solaris is moving to a larger 936 // default page size. 937 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 938 939 if (SegmentedCodeCache) { 940 // Use multiple code heaps 941 initialize_heaps(); 942 } else { 943 // Use a single code heap 944 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 945 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 946 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 947 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 948 add_heap(rs, "CodeCache", CodeBlobType::All); 949 } 950 951 // Initialize ICache flush mechanism 952 // This service is needed for os::register_code_area 953 icache_init(); 954 955 // Give OS a chance to register generated code area. 956 // This is used on Windows 64 bit platforms to register 957 // Structured Exception Handlers for our generated code. 958 os::register_code_area((char*)low_bound(), (char*)high_bound()); 959 } 960 961 void codeCache_init() { 962 CodeCache::initialize(); 963 // Load AOT libraries and add AOT code heaps. 964 AOTLoader::initialize(); 965 } 966 967 //------------------------------------------------------------------------------------------------ 968 969 int CodeCache::number_of_nmethods_with_dependencies() { 970 return _number_of_nmethods_with_dependencies; 971 } 972 973 void CodeCache::clear_inline_caches() { 974 assert_locked_or_safepoint(CodeCache_lock); 975 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 976 while(iter.next()) { 977 iter.method()->clear_inline_caches(); 978 } 979 } 980 981 void CodeCache::cleanup_inline_caches() { 982 assert_locked_or_safepoint(CodeCache_lock); 983 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 984 while(iter.next()) { 985 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 986 } 987 } 988 989 // Keeps track of time spent for checking dependencies 990 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 991 992 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 993 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 994 int number_of_marked_CodeBlobs = 0; 995 996 // search the hierarchy looking for nmethods which are affected by the loading of this class 997 998 // then search the interfaces this class implements looking for nmethods 999 // which might be dependent of the fact that an interface only had one 1000 // implementor. 1001 // nmethod::check_all_dependencies works only correctly, if no safepoint 1002 // can happen 1003 NoSafepointVerifier nsv; 1004 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1005 Klass* d = str.klass(); 1006 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1007 } 1008 1009 #ifndef PRODUCT 1010 if (VerifyDependencies) { 1011 // Object pointers are used as unique identifiers for dependency arguments. This 1012 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1013 dependentCheckTime.start(); 1014 nmethod::check_all_dependencies(changes); 1015 dependentCheckTime.stop(); 1016 } 1017 #endif 1018 1019 return number_of_marked_CodeBlobs; 1020 } 1021 1022 CompiledMethod* CodeCache::find_compiled(void* start) { 1023 CodeBlob *cb = find_blob(start); 1024 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1025 return (CompiledMethod*)cb; 1026 } 1027 1028 bool CodeCache::is_far_target(address target) { 1029 #if INCLUDE_AOT 1030 return NativeCall::is_far_call(_low_bound, target) || 1031 NativeCall::is_far_call(_high_bound, target); 1032 #else 1033 return false; 1034 #endif 1035 } 1036 1037 #ifdef INCLUDE_JVMTI 1038 // RedefineClasses support for unloading nmethods that are dependent on "old" methods. 1039 // We don't really expect this table to grow very large. If it does, it can become a hashtable. 1040 static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL; 1041 1042 static void add_to_old_table(CompiledMethod* c) { 1043 if (old_compiled_method_table == NULL) { 1044 old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, true); 1045 } 1046 old_compiled_method_table->push(c); 1047 } 1048 1049 static void reset_old_method_table() { 1050 if (old_compiled_method_table != NULL) { 1051 delete old_compiled_method_table; 1052 old_compiled_method_table = NULL; 1053 } 1054 } 1055 1056 // Remove this method when zombied or unloaded. 1057 void CodeCache::unregister_old_nmethod(CompiledMethod* c) { 1058 assert_lock_strong(CodeCache_lock); 1059 if (old_compiled_method_table != NULL) { 1060 int index = old_compiled_method_table->find(c); 1061 if (index != -1) { 1062 old_compiled_method_table->delete_at(index); 1063 } 1064 } 1065 } 1066 1067 void CodeCache::old_nmethods_do(MetadataClosure* f) { 1068 // Walk old method table and mark those on stack. 1069 int length = 0; 1070 if (old_compiled_method_table != NULL) { 1071 length = old_compiled_method_table->length(); 1072 for (int i = 0; i < length; i++) { 1073 CompiledMethod* cm = old_compiled_method_table->at(i); 1074 // Only walk alive nmethods, the dead ones will get removed by the sweeper. 1075 if (cm->is_alive()) { 1076 old_compiled_method_table->at(i)->metadata_do(f); 1077 } 1078 } 1079 } 1080 log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length); 1081 } 1082 1083 // Just marks the methods in this class as needing deoptimization 1084 void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1085 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1086 1087 // Mark dependent AOT nmethods, which are only found via the class redefined. 1088 // TODO: add dependencies to aotCompiledMethod's metadata section so this isn't 1089 // needed. 1090 AOTLoader::mark_evol_dependent_methods(dependee); 1091 } 1092 1093 1094 // Walk compiled methods and mark dependent methods for deoptimization. 1095 int CodeCache::mark_dependents_for_evol_deoptimization() { 1096 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1097 // Each redefinition creates a new set of nmethods that have references to "old" Methods 1098 // So delete old method table and create a new one. 1099 reset_old_method_table(); 1100 1101 int number_of_marked_CodeBlobs = 0; 1102 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1103 while(iter.next()) { 1104 CompiledMethod* nm = iter.method(); 1105 // Walk all alive nmethods to check for old Methods. 1106 // This includes methods whose inline caches point to old methods, so 1107 // inline cache clearing is unnecessary. 1108 if (nm->has_evol_metadata()) { 1109 nm->mark_for_deoptimization(); 1110 add_to_old_table(nm); 1111 number_of_marked_CodeBlobs++; 1112 } 1113 } 1114 1115 // return total count of nmethods marked for deoptimization, if zero the caller 1116 // can skip deoptimization 1117 return number_of_marked_CodeBlobs; 1118 } 1119 1120 void CodeCache::mark_all_nmethods_for_evol_deoptimization() { 1121 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1122 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1123 while(iter.next()) { 1124 CompiledMethod* nm = iter.method(); 1125 if (!nm->method()->is_method_handle_intrinsic()) { 1126 nm->mark_for_deoptimization(); 1127 if (nm->has_evol_metadata()) { 1128 add_to_old_table(nm); 1129 } 1130 } 1131 } 1132 } 1133 1134 // Flushes compiled methods dependent on redefined classes, that have already been 1135 // marked for deoptimization. 1136 void CodeCache::flush_evol_dependents() { 1137 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1138 1139 // CodeCache can only be updated by a thread_in_VM and they will all be 1140 // stopped during the safepoint so CodeCache will be safe to update without 1141 // holding the CodeCache_lock. 1142 1143 // At least one nmethod has been marked for deoptimization 1144 1145 Deoptimization::deoptimize_all_marked(); 1146 } 1147 #endif // INCLUDE_JVMTI 1148 1149 // Deoptimize all(most) methods 1150 void CodeCache::mark_all_nmethods_for_deoptimization() { 1151 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1152 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1153 while(iter.next()) { 1154 CompiledMethod* nm = iter.method(); 1155 // Not installed are unsafe to mark for deopt, normally never deopted. 1156 // A not_entrant method may become a zombie at any time, 1157 // since we don't know on which side of last safepoint it became not_entrant 1158 // (state must be in_use). 1159 // Native method are unsafe to mark for deopt, normally never deopted. 1160 if (!nm->method()->is_method_handle_intrinsic() && 1161 !nm->is_not_installed() && 1162 nm->is_in_use() && 1163 !nm->is_native_method()) { 1164 nm->mark_for_deoptimization(); 1165 } 1166 } 1167 } 1168 1169 int CodeCache::mark_for_deoptimization(Method* dependee) { 1170 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1171 int number_of_marked_CodeBlobs = 0; 1172 1173 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1174 while(iter.next()) { 1175 CompiledMethod* nm = iter.method(); 1176 if (nm->is_dependent_on_method(dependee)) { 1177 ResourceMark rm; 1178 nm->mark_for_deoptimization(); 1179 number_of_marked_CodeBlobs++; 1180 } 1181 } 1182 1183 return number_of_marked_CodeBlobs; 1184 } 1185 1186 void CodeCache::make_marked_nmethods_not_entrant() { 1187 assert_locked_or_safepoint(CodeCache_lock); 1188 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1189 while(iter.next()) { 1190 CompiledMethod* nm = iter.method(); 1191 // only_alive_and_not_unloading returns not_entrant nmethods. 1192 // A not_entrant can become a zombie at anytime, 1193 // if it was made not_entrant before previous safepoint/handshake. 1194 // We check that it is not not_entrant and not zombie, 1195 // by checking is_in_use(). 1196 if (nm->is_marked_for_deoptimization() && nm->is_in_use()) { 1197 nm->make_not_entrant(); 1198 } 1199 } 1200 } 1201 1202 // Flushes compiled methods dependent on dependee. 1203 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1204 assert_lock_strong(Compile_lock); 1205 1206 if (number_of_nmethods_with_dependencies() == 0) return; 1207 1208 KlassDepChange changes(dependee); 1209 1210 // Compute the dependent nmethods 1211 if (mark_for_deoptimization(changes) > 0) { 1212 // At least one nmethod has been marked for deoptimization 1213 Deoptimization::deoptimize_all_marked(); 1214 } 1215 } 1216 1217 // Flushes compiled methods dependent on dependee 1218 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1219 // --- Compile_lock is not held. However we are at a safepoint. 1220 assert_locked_or_safepoint(Compile_lock); 1221 1222 // Compute the dependent nmethods 1223 if (mark_for_deoptimization(m_h()) > 0) { 1224 Deoptimization::deoptimize_all_marked(); 1225 } 1226 } 1227 1228 void CodeCache::verify() { 1229 assert_locked_or_safepoint(CodeCache_lock); 1230 FOR_ALL_HEAPS(heap) { 1231 (*heap)->verify(); 1232 FOR_ALL_BLOBS(cb, *heap) { 1233 if (cb->is_alive()) { 1234 cb->verify(); 1235 } 1236 } 1237 } 1238 } 1239 1240 // A CodeHeap is full. Print out warning and report event. 1241 PRAGMA_DIAG_PUSH 1242 PRAGMA_FORMAT_NONLITERAL_IGNORED 1243 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1244 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1245 CodeHeap* heap = get_code_heap(code_blob_type); 1246 assert(heap != NULL, "heap is null"); 1247 1248 if ((heap->full_count() == 0) || print) { 1249 // Not yet reported for this heap, report 1250 if (SegmentedCodeCache) { 1251 ResourceMark rm; 1252 stringStream msg1_stream, msg2_stream; 1253 msg1_stream.print("%s is full. Compiler has been disabled.", 1254 get_code_heap_name(code_blob_type)); 1255 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1256 get_code_heap_flag_name(code_blob_type)); 1257 const char *msg1 = msg1_stream.as_string(); 1258 const char *msg2 = msg2_stream.as_string(); 1259 1260 log_warning(codecache)("%s", msg1); 1261 log_warning(codecache)("%s", msg2); 1262 warning("%s", msg1); 1263 warning("%s", msg2); 1264 } else { 1265 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1266 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1267 1268 log_warning(codecache)("%s", msg1); 1269 log_warning(codecache)("%s", msg2); 1270 warning("%s", msg1); 1271 warning("%s", msg2); 1272 } 1273 ResourceMark rm; 1274 stringStream s; 1275 // Dump code cache into a buffer before locking the tty. 1276 { 1277 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1278 print_summary(&s); 1279 } 1280 { 1281 ttyLocker ttyl; 1282 tty->print("%s", s.as_string()); 1283 } 1284 1285 if (heap->full_count() == 0) { 1286 if (PrintCodeHeapAnalytics) { 1287 CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot! 1288 } 1289 } 1290 } 1291 1292 heap->report_full(); 1293 1294 EventCodeCacheFull event; 1295 if (event.should_commit()) { 1296 event.set_codeBlobType((u1)code_blob_type); 1297 event.set_startAddress((u8)heap->low_boundary()); 1298 event.set_commitedTopAddress((u8)heap->high()); 1299 event.set_reservedTopAddress((u8)heap->high_boundary()); 1300 event.set_entryCount(heap->blob_count()); 1301 event.set_methodCount(heap->nmethod_count()); 1302 event.set_adaptorCount(heap->adapter_count()); 1303 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1304 event.set_fullCount(heap->full_count()); 1305 event.commit(); 1306 } 1307 } 1308 PRAGMA_DIAG_POP 1309 1310 void CodeCache::print_memory_overhead() { 1311 size_t wasted_bytes = 0; 1312 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1313 CodeHeap* curr_heap = *heap; 1314 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1315 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1316 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1317 } 1318 } 1319 // Print bytes that are allocated in the freelist 1320 ttyLocker ttl; 1321 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1322 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1323 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1324 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1325 } 1326 1327 //------------------------------------------------------------------------------------------------ 1328 // Non-product version 1329 1330 #ifndef PRODUCT 1331 1332 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1333 if (PrintCodeCache2) { // Need to add a new flag 1334 ResourceMark rm; 1335 if (size == 0) size = cb->size(); 1336 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1337 } 1338 } 1339 1340 void CodeCache::print_internals() { 1341 int nmethodCount = 0; 1342 int runtimeStubCount = 0; 1343 int adapterCount = 0; 1344 int deoptimizationStubCount = 0; 1345 int uncommonTrapStubCount = 0; 1346 int bufferBlobCount = 0; 1347 int total = 0; 1348 int nmethodAlive = 0; 1349 int nmethodNotEntrant = 0; 1350 int nmethodZombie = 0; 1351 int nmethodUnloaded = 0; 1352 int nmethodJava = 0; 1353 int nmethodNative = 0; 1354 int max_nm_size = 0; 1355 ResourceMark rm; 1356 1357 int i = 0; 1358 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1359 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1360 tty->print_cr("-- %s --", (*heap)->name()); 1361 } 1362 FOR_ALL_BLOBS(cb, *heap) { 1363 total++; 1364 if (cb->is_nmethod()) { 1365 nmethod* nm = (nmethod*)cb; 1366 1367 if (Verbose && nm->method() != NULL) { 1368 ResourceMark rm; 1369 char *method_name = nm->method()->name_and_sig_as_C_string(); 1370 tty->print("%s", method_name); 1371 if(nm->is_alive()) { tty->print_cr(" alive"); } 1372 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1373 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1374 } 1375 1376 nmethodCount++; 1377 1378 if(nm->is_alive()) { nmethodAlive++; } 1379 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1380 if(nm->is_zombie()) { nmethodZombie++; } 1381 if(nm->is_unloaded()) { nmethodUnloaded++; } 1382 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1383 1384 if(nm->method() != NULL && nm->is_java_method()) { 1385 nmethodJava++; 1386 max_nm_size = MAX2(max_nm_size, nm->size()); 1387 } 1388 } else if (cb->is_runtime_stub()) { 1389 runtimeStubCount++; 1390 } else if (cb->is_deoptimization_stub()) { 1391 deoptimizationStubCount++; 1392 } else if (cb->is_uncommon_trap_stub()) { 1393 uncommonTrapStubCount++; 1394 } else if (cb->is_adapter_blob()) { 1395 adapterCount++; 1396 } else if (cb->is_buffer_blob()) { 1397 bufferBlobCount++; 1398 } 1399 } 1400 } 1401 1402 int bucketSize = 512; 1403 int bucketLimit = max_nm_size / bucketSize + 1; 1404 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1405 memset(buckets, 0, sizeof(int) * bucketLimit); 1406 1407 NMethodIterator iter(NMethodIterator::all_blobs); 1408 while(iter.next()) { 1409 nmethod* nm = iter.method(); 1410 if(nm->method() != NULL && nm->is_java_method()) { 1411 buckets[nm->size() / bucketSize]++; 1412 } 1413 } 1414 1415 tty->print_cr("Code Cache Entries (total of %d)",total); 1416 tty->print_cr("-------------------------------------------------"); 1417 tty->print_cr("nmethods: %d",nmethodCount); 1418 tty->print_cr("\talive: %d",nmethodAlive); 1419 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1420 tty->print_cr("\tzombie: %d",nmethodZombie); 1421 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1422 tty->print_cr("\tjava: %d",nmethodJava); 1423 tty->print_cr("\tnative: %d",nmethodNative); 1424 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1425 tty->print_cr("adapters: %d",adapterCount); 1426 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1427 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1428 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1429 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1430 tty->print_cr("-------------------------------------------------"); 1431 1432 for(int i=0; i<bucketLimit; i++) { 1433 if(buckets[i] != 0) { 1434 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1435 tty->fill_to(40); 1436 tty->print_cr("%d",buckets[i]); 1437 } 1438 } 1439 1440 FREE_C_HEAP_ARRAY(int, buckets); 1441 print_memory_overhead(); 1442 } 1443 1444 #endif // !PRODUCT 1445 1446 void CodeCache::print() { 1447 print_summary(tty); 1448 1449 #ifndef PRODUCT 1450 if (!Verbose) return; 1451 1452 CodeBlob_sizes live; 1453 CodeBlob_sizes dead; 1454 1455 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1456 FOR_ALL_BLOBS(cb, *heap) { 1457 if (!cb->is_alive()) { 1458 dead.add(cb); 1459 } else { 1460 live.add(cb); 1461 } 1462 } 1463 } 1464 1465 tty->print_cr("CodeCache:"); 1466 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1467 1468 if (!live.is_empty()) { 1469 live.print("live"); 1470 } 1471 if (!dead.is_empty()) { 1472 dead.print("dead"); 1473 } 1474 1475 if (WizardMode) { 1476 // print the oop_map usage 1477 int code_size = 0; 1478 int number_of_blobs = 0; 1479 int number_of_oop_maps = 0; 1480 int map_size = 0; 1481 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1482 FOR_ALL_BLOBS(cb, *heap) { 1483 if (cb->is_alive()) { 1484 number_of_blobs++; 1485 code_size += cb->code_size(); 1486 ImmutableOopMapSet* set = cb->oop_maps(); 1487 if (set != NULL) { 1488 number_of_oop_maps += set->count(); 1489 map_size += set->nr_of_bytes(); 1490 } 1491 } 1492 } 1493 } 1494 tty->print_cr("OopMaps"); 1495 tty->print_cr(" #blobs = %d", number_of_blobs); 1496 tty->print_cr(" code size = %d", code_size); 1497 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1498 tty->print_cr(" map size = %d", map_size); 1499 } 1500 1501 #endif // !PRODUCT 1502 } 1503 1504 void CodeCache::print_summary(outputStream* st, bool detailed) { 1505 int full_count = 0; 1506 FOR_ALL_HEAPS(heap_iterator) { 1507 CodeHeap* heap = (*heap_iterator); 1508 size_t total = (heap->high_boundary() - heap->low_boundary()); 1509 if (_heaps->length() >= 1) { 1510 st->print("%s:", heap->name()); 1511 } else { 1512 st->print("CodeCache:"); 1513 } 1514 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1515 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1516 total/K, (total - heap->unallocated_capacity())/K, 1517 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1518 1519 if (detailed) { 1520 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1521 p2i(heap->low_boundary()), 1522 p2i(heap->high()), 1523 p2i(heap->high_boundary())); 1524 1525 full_count += get_codemem_full_count(heap->code_blob_type()); 1526 } 1527 } 1528 1529 if (detailed) { 1530 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1531 " adapters=" UINT32_FORMAT, 1532 blob_count(), nmethod_count(), adapter_count()); 1533 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1534 "enabled" : Arguments::mode() == Arguments::_int ? 1535 "disabled (interpreter mode)" : 1536 "disabled (not enough contiguous free space left)"); 1537 st->print_cr(" stopped_count=%d, restarted_count=%d", 1538 CompileBroker::get_total_compiler_stopped_count(), 1539 CompileBroker::get_total_compiler_restarted_count()); 1540 st->print_cr(" full_count=%d", full_count); 1541 } 1542 } 1543 1544 void CodeCache::print_codelist(outputStream* st) { 1545 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1546 1547 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1548 while (iter.next()) { 1549 CompiledMethod* cm = iter.method(); 1550 ResourceMark rm; 1551 char* method_name = cm->method()->name_and_sig_as_C_string(); 1552 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1553 cm->compile_id(), cm->comp_level(), cm->get_state(), 1554 method_name, 1555 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1556 } 1557 } 1558 1559 void CodeCache::print_layout(outputStream* st) { 1560 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1561 ResourceMark rm; 1562 print_summary(st, true); 1563 } 1564 1565 void CodeCache::log_state(outputStream* st) { 1566 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1567 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1568 blob_count(), nmethod_count(), adapter_count(), 1569 unallocated_capacity()); 1570 } 1571 1572 //---< BEGIN >--- CodeHeap State Analytics. 1573 1574 void CodeCache::aggregate(outputStream *out, const char* granularity) { 1575 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1576 CodeHeapState::aggregate(out, (*heap), granularity); 1577 } 1578 } 1579 1580 void CodeCache::discard(outputStream *out) { 1581 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1582 CodeHeapState::discard(out, (*heap)); 1583 } 1584 } 1585 1586 void CodeCache::print_usedSpace(outputStream *out) { 1587 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1588 CodeHeapState::print_usedSpace(out, (*heap)); 1589 } 1590 } 1591 1592 void CodeCache::print_freeSpace(outputStream *out) { 1593 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1594 CodeHeapState::print_freeSpace(out, (*heap)); 1595 } 1596 } 1597 1598 void CodeCache::print_count(outputStream *out) { 1599 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1600 CodeHeapState::print_count(out, (*heap)); 1601 } 1602 } 1603 1604 void CodeCache::print_space(outputStream *out) { 1605 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1606 CodeHeapState::print_space(out, (*heap)); 1607 } 1608 } 1609 1610 void CodeCache::print_age(outputStream *out) { 1611 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1612 CodeHeapState::print_age(out, (*heap)); 1613 } 1614 } 1615 1616 void CodeCache::print_names(outputStream *out) { 1617 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1618 CodeHeapState::print_names(out, (*heap)); 1619 } 1620 } 1621 //---< END >--- CodeHeap State Analytics.