1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/codeHeapState.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/dependencies.hpp" 32 #include "code/dependencyContext.hpp" 33 #include "code/icBuffer.hpp" 34 #include "code/nmethod.hpp" 35 #include "code/pcDesc.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "jfr/jfrEvents.hpp" 38 #include "logging/log.hpp" 39 #include "logging/logStream.hpp" 40 #include "memory/allocation.inline.hpp" 41 #include "memory/iterator.hpp" 42 #include "memory/resourceArea.hpp" 43 #include "oops/method.inline.hpp" 44 #include "oops/objArrayOop.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "oops/verifyOopClosure.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/compilationPolicy.hpp" 49 #include "runtime/deoptimization.hpp" 50 #include "runtime/handles.inline.hpp" 51 #include "runtime/icache.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/safepointVerifiers.hpp" 55 #include "runtime/sweeper.hpp" 56 #include "runtime/vmThread.hpp" 57 #include "services/memoryService.hpp" 58 #include "utilities/align.hpp" 59 #include "utilities/vmError.hpp" 60 #include "utilities/xmlstream.hpp" 61 #ifdef COMPILER1 62 #include "c1/c1_Compilation.hpp" 63 #include "c1/c1_Compiler.hpp" 64 #endif 65 #ifdef COMPILER2 66 #include "opto/c2compiler.hpp" 67 #include "opto/compile.hpp" 68 #include "opto/node.hpp" 69 #endif 70 71 // Helper class for printing in CodeCache 72 class CodeBlob_sizes { 73 private: 74 int count; 75 int total_size; 76 int header_size; 77 int code_size; 78 int stub_size; 79 int relocation_size; 80 int scopes_oop_size; 81 int scopes_metadata_size; 82 int scopes_data_size; 83 int scopes_pcs_size; 84 85 public: 86 CodeBlob_sizes() { 87 count = 0; 88 total_size = 0; 89 header_size = 0; 90 code_size = 0; 91 stub_size = 0; 92 relocation_size = 0; 93 scopes_oop_size = 0; 94 scopes_metadata_size = 0; 95 scopes_data_size = 0; 96 scopes_pcs_size = 0; 97 } 98 99 int total() { return total_size; } 100 bool is_empty() { return count == 0; } 101 102 void print(const char* title) { 103 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 104 count, 105 title, 106 (int)(total() / K), 107 header_size * 100 / total_size, 108 relocation_size * 100 / total_size, 109 code_size * 100 / total_size, 110 stub_size * 100 / total_size, 111 scopes_oop_size * 100 / total_size, 112 scopes_metadata_size * 100 / total_size, 113 scopes_data_size * 100 / total_size, 114 scopes_pcs_size * 100 / total_size); 115 } 116 117 void add(CodeBlob* cb) { 118 count++; 119 total_size += cb->size(); 120 header_size += cb->header_size(); 121 relocation_size += cb->relocation_size(); 122 if (cb->is_nmethod()) { 123 nmethod* nm = cb->as_nmethod_or_null(); 124 code_size += nm->insts_size(); 125 stub_size += nm->stub_size(); 126 127 scopes_oop_size += nm->oops_size(); 128 scopes_metadata_size += nm->metadata_size(); 129 scopes_data_size += nm->scopes_data_size(); 130 scopes_pcs_size += nm->scopes_pcs_size(); 131 } else { 132 code_size += cb->code_size(); 133 } 134 } 135 }; 136 137 // Iterate over all CodeHeaps 138 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 139 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 140 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 141 142 // Iterate over all CodeBlobs (cb) on the given CodeHeap 143 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 144 145 address CodeCache::_low_bound = 0; 146 address CodeCache::_high_bound = 0; 147 int CodeCache::_number_of_nmethods_with_dependencies = 0; 148 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL; 149 150 // Initialize arrays of CodeHeap subsets 151 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 152 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 153 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 154 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 155 156 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 157 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 158 // Prepare error message 159 const char* error = "Invalid code heap sizes"; 160 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 161 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 162 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 163 164 if (total_size > cache_size) { 165 // Some code heap sizes were explicitly set: total_size must be <= cache_size 166 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 167 vm_exit_during_initialization(error, message); 168 } else if (all_set && total_size != cache_size) { 169 // All code heap sizes were explicitly set: total_size must equal cache_size 170 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 171 vm_exit_during_initialization(error, message); 172 } 173 } 174 175 void CodeCache::initialize_heaps() { 176 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 177 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 178 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 179 size_t min_size = os::vm_page_size(); 180 size_t cache_size = ReservedCodeCacheSize; 181 size_t non_nmethod_size = NonNMethodCodeHeapSize; 182 size_t profiled_size = ProfiledCodeHeapSize; 183 size_t non_profiled_size = NonProfiledCodeHeapSize; 184 // Check if total size set via command line flags exceeds the reserved size 185 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 186 (profiled_set ? profiled_size : min_size), 187 (non_profiled_set ? non_profiled_size : min_size), 188 cache_size, 189 non_nmethod_set && profiled_set && non_profiled_set); 190 191 // Determine size of compiler buffers 192 size_t code_buffers_size = 0; 193 #ifdef COMPILER1 194 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 195 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 196 code_buffers_size += c1_count * Compiler::code_buffer_size(); 197 #endif 198 #ifdef COMPILER2 199 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 200 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 201 // Initial size of constant table (this may be increased if a compiled method needs more space) 202 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 203 #endif 204 205 // Increase default non_nmethod_size to account for compiler buffers 206 if (!non_nmethod_set) { 207 non_nmethod_size += code_buffers_size; 208 } 209 // Calculate default CodeHeap sizes if not set by user 210 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 211 // Check if we have enough space for the non-nmethod code heap 212 if (cache_size > non_nmethod_size) { 213 // Use the default value for non_nmethod_size and one half of the 214 // remaining size for non-profiled and one half for profiled methods 215 size_t remaining_size = cache_size - non_nmethod_size; 216 profiled_size = remaining_size / 2; 217 non_profiled_size = remaining_size - profiled_size; 218 } else { 219 // Use all space for the non-nmethod heap and set other heaps to minimal size 220 non_nmethod_size = cache_size - 2 * min_size; 221 profiled_size = min_size; 222 non_profiled_size = min_size; 223 } 224 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 225 // The user explicitly set some code heap sizes. Increase or decrease the (default) 226 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 227 // code heap sizes and then only change non-nmethod code heap size if still necessary. 228 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 229 if (non_profiled_set) { 230 if (!profiled_set) { 231 // Adapt size of profiled code heap 232 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 233 // Not enough space available, set to minimum size 234 diff_size += profiled_size - min_size; 235 profiled_size = min_size; 236 } else { 237 profiled_size += diff_size; 238 diff_size = 0; 239 } 240 } 241 } else if (profiled_set) { 242 // Adapt size of non-profiled code heap 243 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 244 // Not enough space available, set to minimum size 245 diff_size += non_profiled_size - min_size; 246 non_profiled_size = min_size; 247 } else { 248 non_profiled_size += diff_size; 249 diff_size = 0; 250 } 251 } else if (non_nmethod_set) { 252 // Distribute remaining size between profiled and non-profiled code heaps 253 diff_size = cache_size - non_nmethod_size; 254 profiled_size = diff_size / 2; 255 non_profiled_size = diff_size - profiled_size; 256 diff_size = 0; 257 } 258 if (diff_size != 0) { 259 // Use non-nmethod code heap for remaining space requirements 260 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 261 non_nmethod_size += diff_size; 262 } 263 } 264 265 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 266 if (!heap_available(CodeBlobType::MethodProfiled)) { 267 non_profiled_size += profiled_size; 268 profiled_size = 0; 269 } 270 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 271 if (!heap_available(CodeBlobType::MethodNonProfiled)) { 272 non_nmethod_size += non_profiled_size; 273 non_profiled_size = 0; 274 } 275 // Make sure we have enough space for VM internal code 276 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 277 if (non_nmethod_size < min_code_cache_size) { 278 vm_exit_during_initialization(err_msg( 279 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 280 non_nmethod_size/K, min_code_cache_size/K)); 281 } 282 283 // Verify sizes and update flag values 284 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 285 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 286 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 287 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 288 289 // If large page support is enabled, align code heaps according to large 290 // page size to make sure that code cache is covered by large pages. 291 const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity()); 292 non_nmethod_size = align_up(non_nmethod_size, alignment); 293 profiled_size = align_down(profiled_size, alignment); 294 295 // Reserve one continuous chunk of memory for CodeHeaps and split it into 296 // parts for the individual heaps. The memory layout looks like this: 297 // ---------- high ----------- 298 // Non-profiled nmethods 299 // Profiled nmethods 300 // Non-nmethods 301 // ---------- low ------------ 302 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 303 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 304 ReservedSpace rest = rs.last_part(non_nmethod_size); 305 ReservedSpace profiled_space = rest.first_part(profiled_size); 306 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 307 308 // Non-nmethods (stubs, adapters, ...) 309 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 310 // Tier 2 and tier 3 (profiled) methods 311 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 312 // Tier 1 and tier 4 (non-profiled) methods and native methods 313 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 314 } 315 316 size_t CodeCache::page_size(bool aligned, size_t min_pages) { 317 if (os::can_execute_large_page_memory()) { 318 if (InitialCodeCacheSize < ReservedCodeCacheSize) { 319 // Make sure that the page size allows for an incremental commit of the reserved space 320 min_pages = MAX2(min_pages, (size_t)8); 321 } 322 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) : 323 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages); 324 } else { 325 return os::vm_page_size(); 326 } 327 } 328 329 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 330 // Align and reserve space for code cache 331 const size_t rs_ps = page_size(); 332 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); 333 const size_t rs_size = align_up(size, rs_align); 334 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); 335 if (!rs.is_reserved()) { 336 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 337 rs_size/K)); 338 } 339 340 // Initialize bounds 341 _low_bound = (address)rs.base(); 342 _high_bound = _low_bound + rs.size(); 343 return rs; 344 } 345 346 // Heaps available for allocation 347 bool CodeCache::heap_available(int code_blob_type) { 348 if (!SegmentedCodeCache) { 349 // No segmentation: use a single code heap 350 return (code_blob_type == CodeBlobType::All); 351 } else if (Arguments::is_interpreter_only()) { 352 // Interpreter only: we don't need any method code heaps 353 return (code_blob_type == CodeBlobType::NonNMethod); 354 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 355 // Tiered compilation: use all code heaps 356 return (code_blob_type < CodeBlobType::All); 357 } else { 358 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 359 return (code_blob_type == CodeBlobType::NonNMethod) || 360 (code_blob_type == CodeBlobType::MethodNonProfiled); 361 } 362 } 363 364 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 365 switch(code_blob_type) { 366 case CodeBlobType::NonNMethod: 367 return "NonNMethodCodeHeapSize"; 368 break; 369 case CodeBlobType::MethodNonProfiled: 370 return "NonProfiledCodeHeapSize"; 371 break; 372 case CodeBlobType::MethodProfiled: 373 return "ProfiledCodeHeapSize"; 374 break; 375 } 376 ShouldNotReachHere(); 377 return NULL; 378 } 379 380 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 381 if (lhs->code_blob_type() == rhs->code_blob_type()) { 382 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 383 } else { 384 return lhs->code_blob_type() - rhs->code_blob_type(); 385 } 386 } 387 388 void CodeCache::add_heap(CodeHeap* heap) { 389 assert(!Universe::is_fully_initialized(), "late heap addition?"); 390 391 _heaps->insert_sorted<code_heap_compare>(heap); 392 393 int type = heap->code_blob_type(); 394 if (code_blob_type_accepts_compiled(type)) { 395 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 396 } 397 if (code_blob_type_accepts_nmethod(type)) { 398 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 399 } 400 if (code_blob_type_accepts_allocable(type)) { 401 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 402 } 403 } 404 405 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 406 // Check if heap is needed 407 if (!heap_available(code_blob_type)) { 408 return; 409 } 410 411 // Create CodeHeap 412 CodeHeap* heap = new CodeHeap(name, code_blob_type); 413 add_heap(heap); 414 415 // Reserve Space 416 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 417 size_initial = align_up(size_initial, os::vm_page_size()); 418 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 419 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 420 heap->name(), size_initial/K)); 421 } 422 423 // Register the CodeHeap 424 MemoryService::add_code_heap_memory_pool(heap, name); 425 } 426 427 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 428 FOR_ALL_HEAPS(heap) { 429 if ((*heap)->contains(start)) { 430 return *heap; 431 } 432 } 433 return NULL; 434 } 435 436 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 437 assert(cb != NULL, "CodeBlob is null"); 438 FOR_ALL_HEAPS(heap) { 439 if ((*heap)->contains_blob(cb)) { 440 return *heap; 441 } 442 } 443 ShouldNotReachHere(); 444 return NULL; 445 } 446 447 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 448 FOR_ALL_HEAPS(heap) { 449 if ((*heap)->accepts(code_blob_type)) { 450 return *heap; 451 } 452 } 453 return NULL; 454 } 455 456 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 457 assert_locked_or_safepoint(CodeCache_lock); 458 assert(heap != NULL, "heap is null"); 459 return (CodeBlob*)heap->first(); 460 } 461 462 CodeBlob* CodeCache::first_blob(int code_blob_type) { 463 if (heap_available(code_blob_type)) { 464 return first_blob(get_code_heap(code_blob_type)); 465 } else { 466 return NULL; 467 } 468 } 469 470 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 471 assert_locked_or_safepoint(CodeCache_lock); 472 assert(heap != NULL, "heap is null"); 473 return (CodeBlob*)heap->next(cb); 474 } 475 476 /** 477 * Do not seize the CodeCache lock here--if the caller has not 478 * already done so, we are going to lose bigtime, since the code 479 * cache will contain a garbage CodeBlob until the caller can 480 * run the constructor for the CodeBlob subclass he is busy 481 * instantiating. 482 */ 483 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 484 // Possibly wakes up the sweeper thread. 485 NMethodSweeper::notify(code_blob_type); 486 assert_locked_or_safepoint(CodeCache_lock); 487 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 488 if (size <= 0) { 489 return NULL; 490 } 491 CodeBlob* cb = NULL; 492 493 // Get CodeHeap for the given CodeBlobType 494 CodeHeap* heap = get_code_heap(code_blob_type); 495 assert(heap != NULL, "heap is null"); 496 497 while (true) { 498 cb = (CodeBlob*)heap->allocate(size); 499 if (cb != NULL) break; 500 if (!heap->expand_by(CodeCacheExpansionSize)) { 501 // Save original type for error reporting 502 if (orig_code_blob_type == CodeBlobType::All) { 503 orig_code_blob_type = code_blob_type; 504 } 505 // Expansion failed 506 if (SegmentedCodeCache) { 507 // Fallback solution: Try to store code in another code heap. 508 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 509 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 510 // and force stack scanning if less than 10% of the code heap are free. 511 int type = code_blob_type; 512 switch (type) { 513 case CodeBlobType::NonNMethod: 514 type = CodeBlobType::MethodNonProfiled; 515 break; 516 case CodeBlobType::MethodNonProfiled: 517 type = CodeBlobType::MethodProfiled; 518 break; 519 case CodeBlobType::MethodProfiled: 520 // Avoid loop if we already tried that code heap 521 if (type == orig_code_blob_type) { 522 type = CodeBlobType::MethodNonProfiled; 523 } 524 break; 525 } 526 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 527 if (PrintCodeCacheExtension) { 528 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 529 heap->name(), get_code_heap(type)->name()); 530 } 531 return allocate(size, type, orig_code_blob_type); 532 } 533 } 534 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 535 CompileBroker::handle_full_code_cache(orig_code_blob_type); 536 return NULL; 537 } 538 if (PrintCodeCacheExtension) { 539 ResourceMark rm; 540 if (_nmethod_heaps->length() >= 1) { 541 tty->print("%s", heap->name()); 542 } else { 543 tty->print("CodeCache"); 544 } 545 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 546 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 547 (address)heap->high() - (address)heap->low_boundary()); 548 } 549 } 550 print_trace("allocation", cb, size); 551 return cb; 552 } 553 554 void CodeCache::free(CodeBlob* cb) { 555 assert_locked_or_safepoint(CodeCache_lock); 556 CodeHeap* heap = get_code_heap(cb); 557 print_trace("free", cb); 558 if (cb->is_nmethod()) { 559 heap->set_nmethod_count(heap->nmethod_count() - 1); 560 if (((nmethod *)cb)->has_dependencies()) { 561 _number_of_nmethods_with_dependencies--; 562 } 563 } 564 if (cb->is_adapter_blob()) { 565 heap->set_adapter_count(heap->adapter_count() - 1); 566 } 567 568 // Get heap for given CodeBlob and deallocate 569 get_code_heap(cb)->deallocate(cb); 570 571 assert(heap->blob_count() >= 0, "sanity check"); 572 } 573 574 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 575 assert_locked_or_safepoint(CodeCache_lock); 576 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 577 print_trace("free_unused_tail", cb); 578 579 // We also have to account for the extra space (i.e. header) used by the CodeBlob 580 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 581 used += CodeBlob::align_code_offset(cb->header_size()); 582 583 // Get heap for given CodeBlob and deallocate its unused tail 584 get_code_heap(cb)->deallocate_tail(cb, used); 585 // Adjust the sizes of the CodeBlob 586 cb->adjust_size(used); 587 } 588 589 void CodeCache::commit(CodeBlob* cb) { 590 // this is called by nmethod::nmethod, which must already own CodeCache_lock 591 assert_locked_or_safepoint(CodeCache_lock); 592 CodeHeap* heap = get_code_heap(cb); 593 if (cb->is_nmethod()) { 594 heap->set_nmethod_count(heap->nmethod_count() + 1); 595 if (((nmethod *)cb)->has_dependencies()) { 596 _number_of_nmethods_with_dependencies++; 597 } 598 } 599 if (cb->is_adapter_blob()) { 600 heap->set_adapter_count(heap->adapter_count() + 1); 601 } 602 603 // flush the hardware I-cache 604 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 605 } 606 607 bool CodeCache::contains(void *p) { 608 // S390 uses contains() in current_frame(), which is used before 609 // code cache initialization if NativeMemoryTracking=detail is set. 610 S390_ONLY(if (_heaps == NULL) return false;) 611 // It should be ok to call contains without holding a lock. 612 FOR_ALL_HEAPS(heap) { 613 if ((*heap)->contains(p)) { 614 return true; 615 } 616 } 617 return false; 618 } 619 620 bool CodeCache::contains(nmethod *nm) { 621 return contains((void *)nm); 622 } 623 624 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 625 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 626 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 627 CodeBlob* CodeCache::find_blob(void* start) { 628 CodeBlob* result = find_blob_unsafe(start); 629 // We could potentially look up non_entrant methods 630 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 631 return result; 632 } 633 634 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 635 // what you are doing) 636 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 637 // NMT can walk the stack before code cache is created 638 if (_heaps != NULL) { 639 CodeHeap* heap = get_code_heap_containing(start); 640 if (heap != NULL) { 641 return heap->find_blob_unsafe(start); 642 } 643 } 644 return NULL; 645 } 646 647 nmethod* CodeCache::find_nmethod(void* start) { 648 CodeBlob* cb = find_blob(start); 649 assert(cb->is_nmethod(), "did not find an nmethod"); 650 return (nmethod*)cb; 651 } 652 653 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 654 assert_locked_or_safepoint(CodeCache_lock); 655 FOR_ALL_HEAPS(heap) { 656 FOR_ALL_BLOBS(cb, *heap) { 657 f(cb); 658 } 659 } 660 } 661 662 void CodeCache::nmethods_do(void f(nmethod* nm)) { 663 assert_locked_or_safepoint(CodeCache_lock); 664 NMethodIterator iter(NMethodIterator::all_blobs); 665 while(iter.next()) { 666 f(iter.method()); 667 } 668 } 669 670 void CodeCache::metadata_do(void f(Metadata* m)) { 671 assert_locked_or_safepoint(CodeCache_lock); 672 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 673 while(iter.next()) { 674 iter.method()->metadata_do(f); 675 } 676 AOTLoader::metadata_do(f); 677 } 678 679 int CodeCache::alignment_unit() { 680 return (int)_heaps->first()->alignment_unit(); 681 } 682 683 int CodeCache::alignment_offset() { 684 return (int)_heaps->first()->alignment_offset(); 685 } 686 687 // Mark nmethods for unloading if they contain otherwise unreachable oops. 688 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 689 assert_locked_or_safepoint(CodeCache_lock); 690 UnloadingScope scope(is_alive); 691 CompiledMethodIterator iter(CompiledMethodIterator::only_alive); 692 while(iter.next()) { 693 iter.method()->do_unloading(unloading_occurred); 694 } 695 } 696 697 void CodeCache::blobs_do(CodeBlobClosure* f) { 698 assert_locked_or_safepoint(CodeCache_lock); 699 FOR_ALL_ALLOCABLE_HEAPS(heap) { 700 FOR_ALL_BLOBS(cb, *heap) { 701 if (cb->is_alive()) { 702 f->do_code_blob(cb); 703 #ifdef ASSERT 704 if (cb->is_nmethod()) { 705 Universe::heap()->verify_nmethod((nmethod*)cb); 706 } 707 #endif //ASSERT 708 } 709 } 710 } 711 } 712 713 void CodeCache::verify_clean_inline_caches() { 714 #ifdef ASSERT 715 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 716 while(iter.next()) { 717 nmethod* nm = iter.method(); 718 assert(!nm->is_unloaded(), "Tautology"); 719 nm->verify_clean_inline_caches(); 720 nm->verify(); 721 } 722 #endif 723 } 724 725 void CodeCache::verify_icholder_relocations() { 726 #ifdef ASSERT 727 // make sure that we aren't leaking icholders 728 int count = 0; 729 FOR_ALL_HEAPS(heap) { 730 FOR_ALL_BLOBS(cb, *heap) { 731 CompiledMethod *nm = cb->as_compiled_method_or_null(); 732 if (nm != NULL) { 733 count += nm->verify_icholder_relocations(); 734 } 735 } 736 } 737 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 738 CompiledICHolder::live_count(), "must agree"); 739 #endif 740 } 741 742 // Defer freeing of concurrently cleaned ExceptionCache entries until 743 // after a global handshake operation. 744 void CodeCache::release_exception_cache(ExceptionCache* entry) { 745 if (SafepointSynchronize::is_at_safepoint()) { 746 delete entry; 747 } else { 748 for (;;) { 749 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); 750 entry->set_purge_list_next(purge_list_head); 751 if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) { 752 break; 753 } 754 } 755 } 756 } 757 758 // Delete exception caches that have been concurrently unlinked, 759 // followed by a global handshake operation. 760 void CodeCache::purge_exception_caches() { 761 ExceptionCache* curr = _exception_cache_purge_list; 762 while (curr != NULL) { 763 ExceptionCache* next = curr->purge_list_next(); 764 delete curr; 765 curr = next; 766 } 767 _exception_cache_purge_list = NULL; 768 } 769 770 uint8_t CodeCache::_unloading_cycle = 1; 771 772 void CodeCache::increment_unloading_cycle() { 773 if (_unloading_cycle == 1) { 774 _unloading_cycle = 2; 775 } else { 776 _unloading_cycle = 1; 777 } 778 } 779 780 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive) 781 : _is_unloading_behaviour(is_alive) 782 { 783 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour); 784 increment_unloading_cycle(); 785 DependencyContext::cleaning_start(); 786 } 787 788 CodeCache::UnloadingScope::~UnloadingScope() { 789 IsUnloadingBehaviour::set_current(NULL); 790 DependencyContext::cleaning_end(); 791 } 792 793 void CodeCache::verify_oops() { 794 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 795 VerifyOopClosure voc; 796 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 797 while(iter.next()) { 798 nmethod* nm = iter.method(); 799 nm->oops_do(&voc); 800 nm->verify_oop_relocations(); 801 } 802 } 803 804 int CodeCache::blob_count(int code_blob_type) { 805 CodeHeap* heap = get_code_heap(code_blob_type); 806 return (heap != NULL) ? heap->blob_count() : 0; 807 } 808 809 int CodeCache::blob_count() { 810 int count = 0; 811 FOR_ALL_HEAPS(heap) { 812 count += (*heap)->blob_count(); 813 } 814 return count; 815 } 816 817 int CodeCache::nmethod_count(int code_blob_type) { 818 CodeHeap* heap = get_code_heap(code_blob_type); 819 return (heap != NULL) ? heap->nmethod_count() : 0; 820 } 821 822 int CodeCache::nmethod_count() { 823 int count = 0; 824 FOR_ALL_NMETHOD_HEAPS(heap) { 825 count += (*heap)->nmethod_count(); 826 } 827 return count; 828 } 829 830 int CodeCache::adapter_count(int code_blob_type) { 831 CodeHeap* heap = get_code_heap(code_blob_type); 832 return (heap != NULL) ? heap->adapter_count() : 0; 833 } 834 835 int CodeCache::adapter_count() { 836 int count = 0; 837 FOR_ALL_HEAPS(heap) { 838 count += (*heap)->adapter_count(); 839 } 840 return count; 841 } 842 843 address CodeCache::low_bound(int code_blob_type) { 844 CodeHeap* heap = get_code_heap(code_blob_type); 845 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 846 } 847 848 address CodeCache::high_bound(int code_blob_type) { 849 CodeHeap* heap = get_code_heap(code_blob_type); 850 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 851 } 852 853 size_t CodeCache::capacity() { 854 size_t cap = 0; 855 FOR_ALL_ALLOCABLE_HEAPS(heap) { 856 cap += (*heap)->capacity(); 857 } 858 return cap; 859 } 860 861 size_t CodeCache::unallocated_capacity(int code_blob_type) { 862 CodeHeap* heap = get_code_heap(code_blob_type); 863 return (heap != NULL) ? heap->unallocated_capacity() : 0; 864 } 865 866 size_t CodeCache::unallocated_capacity() { 867 size_t unallocated_cap = 0; 868 FOR_ALL_ALLOCABLE_HEAPS(heap) { 869 unallocated_cap += (*heap)->unallocated_capacity(); 870 } 871 return unallocated_cap; 872 } 873 874 size_t CodeCache::max_capacity() { 875 size_t max_cap = 0; 876 FOR_ALL_ALLOCABLE_HEAPS(heap) { 877 max_cap += (*heap)->max_capacity(); 878 } 879 return max_cap; 880 } 881 882 /** 883 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 884 * is free, reverse_free_ratio() returns 4. 885 */ 886 double CodeCache::reverse_free_ratio(int code_blob_type) { 887 CodeHeap* heap = get_code_heap(code_blob_type); 888 if (heap == NULL) { 889 return 0; 890 } 891 892 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 893 double max_capacity = (double)heap->max_capacity(); 894 double result = max_capacity / unallocated_capacity; 895 assert (max_capacity >= unallocated_capacity, "Must be"); 896 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 897 return result; 898 } 899 900 size_t CodeCache::bytes_allocated_in_freelists() { 901 size_t allocated_bytes = 0; 902 FOR_ALL_ALLOCABLE_HEAPS(heap) { 903 allocated_bytes += (*heap)->allocated_in_freelist(); 904 } 905 return allocated_bytes; 906 } 907 908 int CodeCache::allocated_segments() { 909 int number_of_segments = 0; 910 FOR_ALL_ALLOCABLE_HEAPS(heap) { 911 number_of_segments += (*heap)->allocated_segments(); 912 } 913 return number_of_segments; 914 } 915 916 size_t CodeCache::freelists_length() { 917 size_t length = 0; 918 FOR_ALL_ALLOCABLE_HEAPS(heap) { 919 length += (*heap)->freelist_length(); 920 } 921 return length; 922 } 923 924 void icache_init(); 925 926 void CodeCache::initialize() { 927 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 928 #ifdef COMPILER2 929 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 930 #endif 931 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 932 // This was originally just a check of the alignment, causing failure, instead, round 933 // the code cache to the page size. In particular, Solaris is moving to a larger 934 // default page size. 935 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 936 937 if (SegmentedCodeCache) { 938 // Use multiple code heaps 939 initialize_heaps(); 940 } else { 941 // Use a single code heap 942 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 943 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 944 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 945 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 946 add_heap(rs, "CodeCache", CodeBlobType::All); 947 } 948 949 // Initialize ICache flush mechanism 950 // This service is needed for os::register_code_area 951 icache_init(); 952 953 // Give OS a chance to register generated code area. 954 // This is used on Windows 64 bit platforms to register 955 // Structured Exception Handlers for our generated code. 956 os::register_code_area((char*)low_bound(), (char*)high_bound()); 957 } 958 959 void codeCache_init() { 960 CodeCache::initialize(); 961 // Load AOT libraries and add AOT code heaps. 962 AOTLoader::initialize(); 963 } 964 965 //------------------------------------------------------------------------------------------------ 966 967 int CodeCache::number_of_nmethods_with_dependencies() { 968 return _number_of_nmethods_with_dependencies; 969 } 970 971 void CodeCache::clear_inline_caches() { 972 assert_locked_or_safepoint(CodeCache_lock); 973 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 974 while(iter.next()) { 975 iter.method()->clear_inline_caches(); 976 } 977 } 978 979 void CodeCache::cleanup_inline_caches() { 980 assert_locked_or_safepoint(CodeCache_lock); 981 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); 982 while(iter.next()) { 983 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 984 } 985 } 986 987 // Keeps track of time spent for checking dependencies 988 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 989 990 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 991 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 992 int number_of_marked_CodeBlobs = 0; 993 994 // search the hierarchy looking for nmethods which are affected by the loading of this class 995 996 // then search the interfaces this class implements looking for nmethods 997 // which might be dependent of the fact that an interface only had one 998 // implementor. 999 // nmethod::check_all_dependencies works only correctly, if no safepoint 1000 // can happen 1001 NoSafepointVerifier nsv; 1002 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1003 Klass* d = str.klass(); 1004 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1005 } 1006 1007 #ifndef PRODUCT 1008 if (VerifyDependencies) { 1009 // Object pointers are used as unique identifiers for dependency arguments. This 1010 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1011 dependentCheckTime.start(); 1012 nmethod::check_all_dependencies(changes); 1013 dependentCheckTime.stop(); 1014 } 1015 #endif 1016 1017 return number_of_marked_CodeBlobs; 1018 } 1019 1020 CompiledMethod* CodeCache::find_compiled(void* start) { 1021 CodeBlob *cb = find_blob(start); 1022 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1023 return (CompiledMethod*)cb; 1024 } 1025 1026 bool CodeCache::is_far_target(address target) { 1027 #if INCLUDE_AOT 1028 return NativeCall::is_far_call(_low_bound, target) || 1029 NativeCall::is_far_call(_high_bound, target); 1030 #else 1031 return false; 1032 #endif 1033 } 1034 1035 // Just marks the methods in this class as needing deoptimization 1036 void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1037 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1038 1039 // Deoptimize all methods of the evolving class itself 1040 Array<Method*>* old_methods = dependee->methods(); 1041 for (int i = 0; i < old_methods->length(); i++) { 1042 ResourceMark rm; 1043 Method* old_method = old_methods->at(i); 1044 CompiledMethod* nm = old_method->code(); 1045 if (nm != NULL) { 1046 nm->mark_for_deoptimization(); 1047 } 1048 } 1049 1050 // Mark dependent AOT nmethods, which are only found via the class redefined. 1051 AOTLoader::mark_evol_dependent_methods(dependee); 1052 } 1053 1054 // Walk compiled methods and mark dependent methods for deoptimization. 1055 int CodeCache::mark_dependents_for_evol_deoptimization() { 1056 int number_of_marked_CodeBlobs = 0; 1057 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1058 while(iter.next()) { 1059 CompiledMethod* nm = iter.method(); 1060 if (nm->is_marked_for_deoptimization()) { 1061 // ...Already marked in the previous pass; count it here. 1062 // Also counts AOT compiled methods, already marked. 1063 number_of_marked_CodeBlobs++; 1064 } else if (nm->is_evol_dependent()) { 1065 ResourceMark rm; 1066 nm->mark_for_deoptimization(); 1067 number_of_marked_CodeBlobs++; 1068 } else { 1069 // flush caches in case they refer to a redefined Method* 1070 nm->clear_inline_caches(); 1071 } 1072 } 1073 1074 // return total count of nmethods marked for deoptimization, if zero the caller 1075 // can skip deoptimization 1076 return number_of_marked_CodeBlobs; 1077 } 1078 1079 // Deoptimize all methods 1080 void CodeCache::mark_all_nmethods_for_deoptimization() { 1081 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1082 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1083 while(iter.next()) { 1084 CompiledMethod* nm = iter.method(); 1085 if (!nm->method()->is_method_handle_intrinsic()) { 1086 nm->mark_for_deoptimization(); 1087 } 1088 } 1089 } 1090 1091 int CodeCache::mark_for_deoptimization(Method* dependee) { 1092 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1093 int number_of_marked_CodeBlobs = 0; 1094 1095 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1096 while(iter.next()) { 1097 CompiledMethod* nm = iter.method(); 1098 if (nm->is_dependent_on_method(dependee)) { 1099 ResourceMark rm; 1100 nm->mark_for_deoptimization(); 1101 number_of_marked_CodeBlobs++; 1102 } 1103 } 1104 1105 return number_of_marked_CodeBlobs; 1106 } 1107 1108 void CodeCache::make_marked_nmethods_not_entrant() { 1109 assert_locked_or_safepoint(CodeCache_lock); 1110 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1111 while(iter.next()) { 1112 CompiledMethod* nm = iter.method(); 1113 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1114 nm->make_not_entrant(); 1115 } 1116 } 1117 } 1118 1119 // Flushes compiled methods dependent on dependee. 1120 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1121 assert_lock_strong(Compile_lock); 1122 1123 if (number_of_nmethods_with_dependencies() == 0) return; 1124 1125 // CodeCache can only be updated by a thread_in_VM and they will all be 1126 // stopped during the safepoint so CodeCache will be safe to update without 1127 // holding the CodeCache_lock. 1128 1129 KlassDepChange changes(dependee); 1130 1131 // Compute the dependent nmethods 1132 if (mark_for_deoptimization(changes) > 0) { 1133 // At least one nmethod has been marked for deoptimization 1134 VM_Deoptimize op; 1135 VMThread::execute(&op); 1136 } 1137 } 1138 1139 // Flushes compiled methods dependent on redefined classes, that have already been 1140 // marked for deoptimization. 1141 void CodeCache::flush_evol_dependents() { 1142 // --- Compile_lock is not held. However we are at a safepoint. 1143 assert_locked_or_safepoint(Compile_lock); 1144 1145 // CodeCache can only be updated by a thread_in_VM and they will all be 1146 // stopped during the safepoint so CodeCache will be safe to update without 1147 // holding the CodeCache_lock. 1148 1149 // At least one nmethod has been marked for deoptimization 1150 1151 // All this already happens inside a VM_Operation, so we'll do all the work here. 1152 // Stuff copied from VM_Deoptimize and modified slightly. 1153 1154 // We do not want any GCs to happen while we are in the middle of this VM operation 1155 ResourceMark rm; 1156 DeoptimizationMarker dm; 1157 1158 // Deoptimize all activations depending on marked nmethods 1159 Deoptimization::deoptimize_dependents(); 1160 1161 // Make the dependent methods not entrant 1162 make_marked_nmethods_not_entrant(); 1163 } 1164 1165 // Flushes compiled methods dependent on dependee 1166 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1167 // --- Compile_lock is not held. However we are at a safepoint. 1168 assert_locked_or_safepoint(Compile_lock); 1169 1170 // CodeCache can only be updated by a thread_in_VM and they will all be 1171 // stopped dring the safepoint so CodeCache will be safe to update without 1172 // holding the CodeCache_lock. 1173 1174 // Compute the dependent nmethods 1175 if (mark_for_deoptimization(m_h()) > 0) { 1176 // At least one nmethod has been marked for deoptimization 1177 1178 // All this already happens inside a VM_Operation, so we'll do all the work here. 1179 // Stuff copied from VM_Deoptimize and modified slightly. 1180 1181 // We do not want any GCs to happen while we are in the middle of this VM operation 1182 ResourceMark rm; 1183 DeoptimizationMarker dm; 1184 1185 // Deoptimize all activations depending on marked nmethods 1186 Deoptimization::deoptimize_dependents(); 1187 1188 // Make the dependent methods not entrant 1189 make_marked_nmethods_not_entrant(); 1190 } 1191 } 1192 1193 void CodeCache::verify() { 1194 assert_locked_or_safepoint(CodeCache_lock); 1195 FOR_ALL_HEAPS(heap) { 1196 (*heap)->verify(); 1197 FOR_ALL_BLOBS(cb, *heap) { 1198 if (cb->is_alive()) { 1199 cb->verify(); 1200 } 1201 } 1202 } 1203 } 1204 1205 // A CodeHeap is full. Print out warning and report event. 1206 PRAGMA_DIAG_PUSH 1207 PRAGMA_FORMAT_NONLITERAL_IGNORED 1208 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1209 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1210 CodeHeap* heap = get_code_heap(code_blob_type); 1211 assert(heap != NULL, "heap is null"); 1212 1213 if ((heap->full_count() == 0) || print) { 1214 // Not yet reported for this heap, report 1215 if (SegmentedCodeCache) { 1216 ResourceMark rm; 1217 stringStream msg1_stream, msg2_stream; 1218 msg1_stream.print("%s is full. Compiler has been disabled.", 1219 get_code_heap_name(code_blob_type)); 1220 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1221 get_code_heap_flag_name(code_blob_type)); 1222 const char *msg1 = msg1_stream.as_string(); 1223 const char *msg2 = msg2_stream.as_string(); 1224 1225 log_warning(codecache)("%s", msg1); 1226 log_warning(codecache)("%s", msg2); 1227 warning("%s", msg1); 1228 warning("%s", msg2); 1229 } else { 1230 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1231 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1232 1233 log_warning(codecache)("%s", msg1); 1234 log_warning(codecache)("%s", msg2); 1235 warning("%s", msg1); 1236 warning("%s", msg2); 1237 } 1238 ResourceMark rm; 1239 stringStream s; 1240 // Dump code cache into a buffer before locking the tty. 1241 { 1242 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1243 print_summary(&s); 1244 } 1245 { 1246 ttyLocker ttyl; 1247 tty->print("%s", s.as_string()); 1248 } 1249 1250 if (heap->full_count() == 0) { 1251 if (PrintCodeHeapAnalytics) { 1252 CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot! 1253 } 1254 } 1255 } 1256 1257 heap->report_full(); 1258 1259 EventCodeCacheFull event; 1260 if (event.should_commit()) { 1261 event.set_codeBlobType((u1)code_blob_type); 1262 event.set_startAddress((u8)heap->low_boundary()); 1263 event.set_commitedTopAddress((u8)heap->high()); 1264 event.set_reservedTopAddress((u8)heap->high_boundary()); 1265 event.set_entryCount(heap->blob_count()); 1266 event.set_methodCount(heap->nmethod_count()); 1267 event.set_adaptorCount(heap->adapter_count()); 1268 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1269 event.set_fullCount(heap->full_count()); 1270 event.commit(); 1271 } 1272 } 1273 PRAGMA_DIAG_POP 1274 1275 void CodeCache::print_memory_overhead() { 1276 size_t wasted_bytes = 0; 1277 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1278 CodeHeap* curr_heap = *heap; 1279 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1280 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1281 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1282 } 1283 } 1284 // Print bytes that are allocated in the freelist 1285 ttyLocker ttl; 1286 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1287 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1288 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1289 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1290 } 1291 1292 //------------------------------------------------------------------------------------------------ 1293 // Non-product version 1294 1295 #ifndef PRODUCT 1296 1297 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1298 if (PrintCodeCache2) { // Need to add a new flag 1299 ResourceMark rm; 1300 if (size == 0) size = cb->size(); 1301 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1302 } 1303 } 1304 1305 void CodeCache::print_internals() { 1306 int nmethodCount = 0; 1307 int runtimeStubCount = 0; 1308 int adapterCount = 0; 1309 int deoptimizationStubCount = 0; 1310 int uncommonTrapStubCount = 0; 1311 int bufferBlobCount = 0; 1312 int total = 0; 1313 int nmethodAlive = 0; 1314 int nmethodNotEntrant = 0; 1315 int nmethodZombie = 0; 1316 int nmethodUnloaded = 0; 1317 int nmethodJava = 0; 1318 int nmethodNative = 0; 1319 int max_nm_size = 0; 1320 ResourceMark rm; 1321 1322 int i = 0; 1323 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1324 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1325 tty->print_cr("-- %s --", (*heap)->name()); 1326 } 1327 FOR_ALL_BLOBS(cb, *heap) { 1328 total++; 1329 if (cb->is_nmethod()) { 1330 nmethod* nm = (nmethod*)cb; 1331 1332 if (Verbose && nm->method() != NULL) { 1333 ResourceMark rm; 1334 char *method_name = nm->method()->name_and_sig_as_C_string(); 1335 tty->print("%s", method_name); 1336 if(nm->is_alive()) { tty->print_cr(" alive"); } 1337 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1338 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1339 } 1340 1341 nmethodCount++; 1342 1343 if(nm->is_alive()) { nmethodAlive++; } 1344 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1345 if(nm->is_zombie()) { nmethodZombie++; } 1346 if(nm->is_unloaded()) { nmethodUnloaded++; } 1347 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1348 1349 if(nm->method() != NULL && nm->is_java_method()) { 1350 nmethodJava++; 1351 max_nm_size = MAX2(max_nm_size, nm->size()); 1352 } 1353 } else if (cb->is_runtime_stub()) { 1354 runtimeStubCount++; 1355 } else if (cb->is_deoptimization_stub()) { 1356 deoptimizationStubCount++; 1357 } else if (cb->is_uncommon_trap_stub()) { 1358 uncommonTrapStubCount++; 1359 } else if (cb->is_adapter_blob()) { 1360 adapterCount++; 1361 } else if (cb->is_buffer_blob()) { 1362 bufferBlobCount++; 1363 } 1364 } 1365 } 1366 1367 int bucketSize = 512; 1368 int bucketLimit = max_nm_size / bucketSize + 1; 1369 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1370 memset(buckets, 0, sizeof(int) * bucketLimit); 1371 1372 NMethodIterator iter(NMethodIterator::all_blobs); 1373 while(iter.next()) { 1374 nmethod* nm = iter.method(); 1375 if(nm->method() != NULL && nm->is_java_method()) { 1376 buckets[nm->size() / bucketSize]++; 1377 } 1378 } 1379 1380 tty->print_cr("Code Cache Entries (total of %d)",total); 1381 tty->print_cr("-------------------------------------------------"); 1382 tty->print_cr("nmethods: %d",nmethodCount); 1383 tty->print_cr("\talive: %d",nmethodAlive); 1384 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1385 tty->print_cr("\tzombie: %d",nmethodZombie); 1386 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1387 tty->print_cr("\tjava: %d",nmethodJava); 1388 tty->print_cr("\tnative: %d",nmethodNative); 1389 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1390 tty->print_cr("adapters: %d",adapterCount); 1391 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1392 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1393 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1394 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1395 tty->print_cr("-------------------------------------------------"); 1396 1397 for(int i=0; i<bucketLimit; i++) { 1398 if(buckets[i] != 0) { 1399 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1400 tty->fill_to(40); 1401 tty->print_cr("%d",buckets[i]); 1402 } 1403 } 1404 1405 FREE_C_HEAP_ARRAY(int, buckets); 1406 print_memory_overhead(); 1407 } 1408 1409 #endif // !PRODUCT 1410 1411 void CodeCache::print() { 1412 print_summary(tty); 1413 1414 #ifndef PRODUCT 1415 if (!Verbose) return; 1416 1417 CodeBlob_sizes live; 1418 CodeBlob_sizes dead; 1419 1420 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1421 FOR_ALL_BLOBS(cb, *heap) { 1422 if (!cb->is_alive()) { 1423 dead.add(cb); 1424 } else { 1425 live.add(cb); 1426 } 1427 } 1428 } 1429 1430 tty->print_cr("CodeCache:"); 1431 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1432 1433 if (!live.is_empty()) { 1434 live.print("live"); 1435 } 1436 if (!dead.is_empty()) { 1437 dead.print("dead"); 1438 } 1439 1440 if (WizardMode) { 1441 // print the oop_map usage 1442 int code_size = 0; 1443 int number_of_blobs = 0; 1444 int number_of_oop_maps = 0; 1445 int map_size = 0; 1446 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1447 FOR_ALL_BLOBS(cb, *heap) { 1448 if (cb->is_alive()) { 1449 number_of_blobs++; 1450 code_size += cb->code_size(); 1451 ImmutableOopMapSet* set = cb->oop_maps(); 1452 if (set != NULL) { 1453 number_of_oop_maps += set->count(); 1454 map_size += set->nr_of_bytes(); 1455 } 1456 } 1457 } 1458 } 1459 tty->print_cr("OopMaps"); 1460 tty->print_cr(" #blobs = %d", number_of_blobs); 1461 tty->print_cr(" code size = %d", code_size); 1462 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1463 tty->print_cr(" map size = %d", map_size); 1464 } 1465 1466 #endif // !PRODUCT 1467 } 1468 1469 void CodeCache::print_summary(outputStream* st, bool detailed) { 1470 int full_count = 0; 1471 FOR_ALL_HEAPS(heap_iterator) { 1472 CodeHeap* heap = (*heap_iterator); 1473 size_t total = (heap->high_boundary() - heap->low_boundary()); 1474 if (_heaps->length() >= 1) { 1475 st->print("%s:", heap->name()); 1476 } else { 1477 st->print("CodeCache:"); 1478 } 1479 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1480 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1481 total/K, (total - heap->unallocated_capacity())/K, 1482 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1483 1484 if (detailed) { 1485 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1486 p2i(heap->low_boundary()), 1487 p2i(heap->high()), 1488 p2i(heap->high_boundary())); 1489 1490 full_count += get_codemem_full_count(heap->code_blob_type()); 1491 } 1492 } 1493 1494 if (detailed) { 1495 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1496 " adapters=" UINT32_FORMAT, 1497 blob_count(), nmethod_count(), adapter_count()); 1498 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1499 "enabled" : Arguments::mode() == Arguments::_int ? 1500 "disabled (interpreter mode)" : 1501 "disabled (not enough contiguous free space left)"); 1502 st->print_cr(" stopped_count=%d, restarted_count=%d", 1503 CompileBroker::get_total_compiler_stopped_count(), 1504 CompileBroker::get_total_compiler_restarted_count()); 1505 st->print_cr(" full_count=%d", full_count); 1506 } 1507 } 1508 1509 void CodeCache::print_codelist(outputStream* st) { 1510 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1511 1512 CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); 1513 while (iter.next()) { 1514 CompiledMethod* cm = iter.method(); 1515 ResourceMark rm; 1516 char* method_name = cm->method()->name_and_sig_as_C_string(); 1517 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1518 cm->compile_id(), cm->comp_level(), cm->get_state(), 1519 method_name, 1520 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1521 } 1522 } 1523 1524 void CodeCache::print_layout(outputStream* st) { 1525 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1526 ResourceMark rm; 1527 print_summary(st, true); 1528 } 1529 1530 void CodeCache::log_state(outputStream* st) { 1531 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1532 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1533 blob_count(), nmethod_count(), adapter_count(), 1534 unallocated_capacity()); 1535 } 1536 1537 //---< BEGIN >--- CodeHeap State Analytics. 1538 1539 void CodeCache::aggregate(outputStream *out, const char* granularity) { 1540 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1541 CodeHeapState::aggregate(out, (*heap), granularity); 1542 } 1543 } 1544 1545 void CodeCache::discard(outputStream *out) { 1546 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1547 CodeHeapState::discard(out, (*heap)); 1548 } 1549 } 1550 1551 void CodeCache::print_usedSpace(outputStream *out) { 1552 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1553 CodeHeapState::print_usedSpace(out, (*heap)); 1554 } 1555 } 1556 1557 void CodeCache::print_freeSpace(outputStream *out) { 1558 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1559 CodeHeapState::print_freeSpace(out, (*heap)); 1560 } 1561 } 1562 1563 void CodeCache::print_count(outputStream *out) { 1564 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1565 CodeHeapState::print_count(out, (*heap)); 1566 } 1567 } 1568 1569 void CodeCache::print_space(outputStream *out) { 1570 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1571 CodeHeapState::print_space(out, (*heap)); 1572 } 1573 } 1574 1575 void CodeCache::print_age(outputStream *out) { 1576 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1577 CodeHeapState::print_age(out, (*heap)); 1578 } 1579 } 1580 1581 void CodeCache::print_names(outputStream *out) { 1582 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1583 CodeHeapState::print_names(out, (*heap)); 1584 } 1585 } 1586 //---< END >--- CodeHeap State Analytics.