1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/codeHeapState.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/dependencies.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/nmethod.hpp" 34 #include "code/pcDesc.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "jfr/jfrEvents.hpp" 37 #include "logging/log.hpp" 38 #include "logging/logStream.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/iterator.hpp" 41 #include "memory/resourceArea.hpp" 42 #include "oops/method.inline.hpp" 43 #include "oops/objArrayOop.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "oops/verifyOopClosure.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/compilationPolicy.hpp" 48 #include "runtime/deoptimization.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/icache.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "runtime/safepointVerifiers.hpp" 54 #include "runtime/sweeper.hpp" 55 #include "runtime/vmThread.hpp" 56 #include "services/memoryService.hpp" 57 #include "utilities/align.hpp" 58 #include "utilities/vmError.hpp" 59 #include "utilities/xmlstream.hpp" 60 #ifdef COMPILER1 61 #include "c1/c1_Compilation.hpp" 62 #include "c1/c1_Compiler.hpp" 63 #endif 64 #ifdef COMPILER2 65 #include "opto/c2compiler.hpp" 66 #include "opto/compile.hpp" 67 #include "opto/node.hpp" 68 #endif 69 70 // Helper class for printing in CodeCache 71 class CodeBlob_sizes { 72 private: 73 int count; 74 int total_size; 75 int header_size; 76 int code_size; 77 int stub_size; 78 int relocation_size; 79 int scopes_oop_size; 80 int scopes_metadata_size; 81 int scopes_data_size; 82 int scopes_pcs_size; 83 84 public: 85 CodeBlob_sizes() { 86 count = 0; 87 total_size = 0; 88 header_size = 0; 89 code_size = 0; 90 stub_size = 0; 91 relocation_size = 0; 92 scopes_oop_size = 0; 93 scopes_metadata_size = 0; 94 scopes_data_size = 0; 95 scopes_pcs_size = 0; 96 } 97 98 int total() { return total_size; } 99 bool is_empty() { return count == 0; } 100 101 void print(const char* title) { 102 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 103 count, 104 title, 105 (int)(total() / K), 106 header_size * 100 / total_size, 107 relocation_size * 100 / total_size, 108 code_size * 100 / total_size, 109 stub_size * 100 / total_size, 110 scopes_oop_size * 100 / total_size, 111 scopes_metadata_size * 100 / total_size, 112 scopes_data_size * 100 / total_size, 113 scopes_pcs_size * 100 / total_size); 114 } 115 116 void add(CodeBlob* cb) { 117 count++; 118 total_size += cb->size(); 119 header_size += cb->header_size(); 120 relocation_size += cb->relocation_size(); 121 if (cb->is_nmethod()) { 122 nmethod* nm = cb->as_nmethod_or_null(); 123 code_size += nm->insts_size(); 124 stub_size += nm->stub_size(); 125 126 scopes_oop_size += nm->oops_size(); 127 scopes_metadata_size += nm->metadata_size(); 128 scopes_data_size += nm->scopes_data_size(); 129 scopes_pcs_size += nm->scopes_pcs_size(); 130 } else { 131 code_size += cb->code_size(); 132 } 133 } 134 }; 135 136 // Iterate over all CodeHeaps 137 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 138 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 139 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 140 141 // Iterate over all CodeBlobs (cb) on the given CodeHeap 142 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 143 144 address CodeCache::_low_bound = 0; 145 address CodeCache::_high_bound = 0; 146 int CodeCache::_number_of_nmethods_with_dependencies = 0; 147 bool CodeCache::_needs_cache_clean = false; 148 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 149 150 // Initialize arrays of CodeHeap subsets 151 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 152 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 153 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 154 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 155 156 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 157 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 158 // Prepare error message 159 const char* error = "Invalid code heap sizes"; 160 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 161 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 162 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 163 164 if (total_size > cache_size) { 165 // Some code heap sizes were explicitly set: total_size must be <= cache_size 166 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 167 vm_exit_during_initialization(error, message); 168 } else if (all_set && total_size != cache_size) { 169 // All code heap sizes were explicitly set: total_size must equal cache_size 170 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 171 vm_exit_during_initialization(error, message); 172 } 173 } 174 175 void CodeCache::initialize_heaps() { 176 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 177 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 178 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 179 size_t min_size = os::vm_page_size(); 180 size_t cache_size = ReservedCodeCacheSize; 181 size_t non_nmethod_size = NonNMethodCodeHeapSize; 182 size_t profiled_size = ProfiledCodeHeapSize; 183 size_t non_profiled_size = NonProfiledCodeHeapSize; 184 // Check if total size set via command line flags exceeds the reserved size 185 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 186 (profiled_set ? profiled_size : min_size), 187 (non_profiled_set ? non_profiled_size : min_size), 188 cache_size, 189 non_nmethod_set && profiled_set && non_profiled_set); 190 191 // Determine size of compiler buffers 192 size_t code_buffers_size = 0; 193 #ifdef COMPILER1 194 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 195 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 196 code_buffers_size += c1_count * Compiler::code_buffer_size(); 197 #endif 198 #ifdef COMPILER2 199 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 200 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 201 // Initial size of constant table (this may be increased if a compiled method needs more space) 202 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 203 #endif 204 205 // Increase default non_nmethod_size to account for compiler buffers 206 if (!non_nmethod_set) { 207 non_nmethod_size += code_buffers_size; 208 } 209 // Calculate default CodeHeap sizes if not set by user 210 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 211 // Check if we have enough space for the non-nmethod code heap 212 if (cache_size > non_nmethod_size) { 213 // Use the default value for non_nmethod_size and one half of the 214 // remaining size for non-profiled and one half for profiled methods 215 size_t remaining_size = cache_size - non_nmethod_size; 216 profiled_size = remaining_size / 2; 217 non_profiled_size = remaining_size - profiled_size; 218 } else { 219 // Use all space for the non-nmethod heap and set other heaps to minimal size 220 non_nmethod_size = cache_size - 2 * min_size; 221 profiled_size = min_size; 222 non_profiled_size = min_size; 223 } 224 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 225 // The user explicitly set some code heap sizes. Increase or decrease the (default) 226 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 227 // code heap sizes and then only change non-nmethod code heap size if still necessary. 228 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 229 if (non_profiled_set) { 230 if (!profiled_set) { 231 // Adapt size of profiled code heap 232 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 233 // Not enough space available, set to minimum size 234 diff_size += profiled_size - min_size; 235 profiled_size = min_size; 236 } else { 237 profiled_size += diff_size; 238 diff_size = 0; 239 } 240 } 241 } else if (profiled_set) { 242 // Adapt size of non-profiled code heap 243 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 244 // Not enough space available, set to minimum size 245 diff_size += non_profiled_size - min_size; 246 non_profiled_size = min_size; 247 } else { 248 non_profiled_size += diff_size; 249 diff_size = 0; 250 } 251 } else if (non_nmethod_set) { 252 // Distribute remaining size between profiled and non-profiled code heaps 253 diff_size = cache_size - non_nmethod_size; 254 profiled_size = diff_size / 2; 255 non_profiled_size = diff_size - profiled_size; 256 diff_size = 0; 257 } 258 if (diff_size != 0) { 259 // Use non-nmethod code heap for remaining space requirements 260 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 261 non_nmethod_size += diff_size; 262 } 263 } 264 265 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 266 if (!heap_available(CodeBlobType::MethodProfiled)) { 267 non_profiled_size += profiled_size; 268 profiled_size = 0; 269 } 270 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 271 if (!heap_available(CodeBlobType::MethodNonProfiled)) { 272 non_nmethod_size += non_profiled_size; 273 non_profiled_size = 0; 274 } 275 // Make sure we have enough space for VM internal code 276 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 277 if (non_nmethod_size < min_code_cache_size) { 278 vm_exit_during_initialization(err_msg( 279 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 280 non_nmethod_size/K, min_code_cache_size/K)); 281 } 282 283 // Verify sizes and update flag values 284 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 285 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 286 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 287 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 288 289 // If large page support is enabled, align code heaps according to large 290 // page size to make sure that code cache is covered by large pages. 291 const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity()); 292 non_nmethod_size = align_up(non_nmethod_size, alignment); 293 profiled_size = align_down(profiled_size, alignment); 294 295 // Reserve one continuous chunk of memory for CodeHeaps and split it into 296 // parts for the individual heaps. The memory layout looks like this: 297 // ---------- high ----------- 298 // Non-profiled nmethods 299 // Profiled nmethods 300 // Non-nmethods 301 // ---------- low ------------ 302 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 303 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 304 ReservedSpace rest = rs.last_part(non_nmethod_size); 305 ReservedSpace profiled_space = rest.first_part(profiled_size); 306 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 307 308 // Non-nmethods (stubs, adapters, ...) 309 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 310 // Tier 2 and tier 3 (profiled) methods 311 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 312 // Tier 1 and tier 4 (non-profiled) methods and native methods 313 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 314 } 315 316 size_t CodeCache::page_size(bool aligned) { 317 if (os::can_execute_large_page_memory()) { 318 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) : 319 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8); 320 } else { 321 return os::vm_page_size(); 322 } 323 } 324 325 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 326 // Align and reserve space for code cache 327 const size_t rs_ps = page_size(); 328 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); 329 const size_t rs_size = align_up(size, rs_align); 330 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); 331 if (!rs.is_reserved()) { 332 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 333 rs_size/K)); 334 } 335 336 // Initialize bounds 337 _low_bound = (address)rs.base(); 338 _high_bound = _low_bound + rs.size(); 339 return rs; 340 } 341 342 // Heaps available for allocation 343 bool CodeCache::heap_available(int code_blob_type) { 344 if (!SegmentedCodeCache) { 345 // No segmentation: use a single code heap 346 return (code_blob_type == CodeBlobType::All); 347 } else if (Arguments::is_interpreter_only()) { 348 // Interpreter only: we don't need any method code heaps 349 return (code_blob_type == CodeBlobType::NonNMethod); 350 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 351 // Tiered compilation: use all code heaps 352 return (code_blob_type < CodeBlobType::All); 353 } else { 354 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 355 return (code_blob_type == CodeBlobType::NonNMethod) || 356 (code_blob_type == CodeBlobType::MethodNonProfiled); 357 } 358 } 359 360 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 361 switch(code_blob_type) { 362 case CodeBlobType::NonNMethod: 363 return "NonNMethodCodeHeapSize"; 364 break; 365 case CodeBlobType::MethodNonProfiled: 366 return "NonProfiledCodeHeapSize"; 367 break; 368 case CodeBlobType::MethodProfiled: 369 return "ProfiledCodeHeapSize"; 370 break; 371 } 372 ShouldNotReachHere(); 373 return NULL; 374 } 375 376 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 377 if (lhs->code_blob_type() == rhs->code_blob_type()) { 378 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 379 } else { 380 return lhs->code_blob_type() - rhs->code_blob_type(); 381 } 382 } 383 384 void CodeCache::add_heap(CodeHeap* heap) { 385 assert(!Universe::is_fully_initialized(), "late heap addition?"); 386 387 _heaps->insert_sorted<code_heap_compare>(heap); 388 389 int type = heap->code_blob_type(); 390 if (code_blob_type_accepts_compiled(type)) { 391 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 392 } 393 if (code_blob_type_accepts_nmethod(type)) { 394 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 395 } 396 if (code_blob_type_accepts_allocable(type)) { 397 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 398 } 399 } 400 401 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 402 // Check if heap is needed 403 if (!heap_available(code_blob_type)) { 404 return; 405 } 406 407 // Create CodeHeap 408 CodeHeap* heap = new CodeHeap(name, code_blob_type); 409 add_heap(heap); 410 411 // Reserve Space 412 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 413 size_initial = align_up(size_initial, os::vm_page_size()); 414 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 415 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 416 heap->name(), size_initial/K)); 417 } 418 419 // Register the CodeHeap 420 MemoryService::add_code_heap_memory_pool(heap, name); 421 } 422 423 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 424 FOR_ALL_HEAPS(heap) { 425 if ((*heap)->contains(start)) { 426 return *heap; 427 } 428 } 429 return NULL; 430 } 431 432 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 433 assert(cb != NULL, "CodeBlob is null"); 434 FOR_ALL_HEAPS(heap) { 435 if ((*heap)->contains_blob(cb)) { 436 return *heap; 437 } 438 } 439 ShouldNotReachHere(); 440 return NULL; 441 } 442 443 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 444 FOR_ALL_HEAPS(heap) { 445 if ((*heap)->accepts(code_blob_type)) { 446 return *heap; 447 } 448 } 449 return NULL; 450 } 451 452 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 453 assert_locked_or_safepoint(CodeCache_lock); 454 assert(heap != NULL, "heap is null"); 455 return (CodeBlob*)heap->first(); 456 } 457 458 CodeBlob* CodeCache::first_blob(int code_blob_type) { 459 if (heap_available(code_blob_type)) { 460 return first_blob(get_code_heap(code_blob_type)); 461 } else { 462 return NULL; 463 } 464 } 465 466 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 467 assert_locked_or_safepoint(CodeCache_lock); 468 assert(heap != NULL, "heap is null"); 469 return (CodeBlob*)heap->next(cb); 470 } 471 472 /** 473 * Do not seize the CodeCache lock here--if the caller has not 474 * already done so, we are going to lose bigtime, since the code 475 * cache will contain a garbage CodeBlob until the caller can 476 * run the constructor for the CodeBlob subclass he is busy 477 * instantiating. 478 */ 479 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 480 // Possibly wakes up the sweeper thread. 481 NMethodSweeper::notify(code_blob_type); 482 assert_locked_or_safepoint(CodeCache_lock); 483 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 484 if (size <= 0) { 485 return NULL; 486 } 487 CodeBlob* cb = NULL; 488 489 // Get CodeHeap for the given CodeBlobType 490 CodeHeap* heap = get_code_heap(code_blob_type); 491 assert(heap != NULL, "heap is null"); 492 493 while (true) { 494 cb = (CodeBlob*)heap->allocate(size); 495 if (cb != NULL) break; 496 if (!heap->expand_by(CodeCacheExpansionSize)) { 497 // Save original type for error reporting 498 if (orig_code_blob_type == CodeBlobType::All) { 499 orig_code_blob_type = code_blob_type; 500 } 501 // Expansion failed 502 if (SegmentedCodeCache) { 503 // Fallback solution: Try to store code in another code heap. 504 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 505 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 506 // and force stack scanning if less than 10% of the code heap are free. 507 int type = code_blob_type; 508 switch (type) { 509 case CodeBlobType::NonNMethod: 510 type = CodeBlobType::MethodNonProfiled; 511 break; 512 case CodeBlobType::MethodNonProfiled: 513 type = CodeBlobType::MethodProfiled; 514 break; 515 case CodeBlobType::MethodProfiled: 516 // Avoid loop if we already tried that code heap 517 if (type == orig_code_blob_type) { 518 type = CodeBlobType::MethodNonProfiled; 519 } 520 break; 521 } 522 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 523 if (PrintCodeCacheExtension) { 524 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 525 heap->name(), get_code_heap(type)->name()); 526 } 527 return allocate(size, type, orig_code_blob_type); 528 } 529 } 530 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 531 CompileBroker::handle_full_code_cache(orig_code_blob_type); 532 return NULL; 533 } 534 if (PrintCodeCacheExtension) { 535 ResourceMark rm; 536 if (_nmethod_heaps->length() >= 1) { 537 tty->print("%s", heap->name()); 538 } else { 539 tty->print("CodeCache"); 540 } 541 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 542 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 543 (address)heap->high() - (address)heap->low_boundary()); 544 } 545 } 546 print_trace("allocation", cb, size); 547 return cb; 548 } 549 550 void CodeCache::free(CodeBlob* cb) { 551 assert_locked_or_safepoint(CodeCache_lock); 552 CodeHeap* heap = get_code_heap(cb); 553 print_trace("free", cb); 554 if (cb->is_nmethod()) { 555 heap->set_nmethod_count(heap->nmethod_count() - 1); 556 if (((nmethod *)cb)->has_dependencies()) { 557 _number_of_nmethods_with_dependencies--; 558 } 559 } 560 if (cb->is_adapter_blob()) { 561 heap->set_adapter_count(heap->adapter_count() - 1); 562 } 563 564 // Get heap for given CodeBlob and deallocate 565 get_code_heap(cb)->deallocate(cb); 566 567 assert(heap->blob_count() >= 0, "sanity check"); 568 } 569 570 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 571 assert_locked_or_safepoint(CodeCache_lock); 572 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 573 print_trace("free_unused_tail", cb); 574 575 // We also have to account for the extra space (i.e. header) used by the CodeBlob 576 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 577 used += CodeBlob::align_code_offset(cb->header_size()); 578 579 // Get heap for given CodeBlob and deallocate its unused tail 580 get_code_heap(cb)->deallocate_tail(cb, used); 581 // Adjust the sizes of the CodeBlob 582 cb->adjust_size(used); 583 } 584 585 void CodeCache::commit(CodeBlob* cb) { 586 // this is called by nmethod::nmethod, which must already own CodeCache_lock 587 assert_locked_or_safepoint(CodeCache_lock); 588 CodeHeap* heap = get_code_heap(cb); 589 if (cb->is_nmethod()) { 590 heap->set_nmethod_count(heap->nmethod_count() + 1); 591 if (((nmethod *)cb)->has_dependencies()) { 592 _number_of_nmethods_with_dependencies++; 593 } 594 } 595 if (cb->is_adapter_blob()) { 596 heap->set_adapter_count(heap->adapter_count() + 1); 597 } 598 599 // flush the hardware I-cache 600 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 601 } 602 603 bool CodeCache::contains(void *p) { 604 // S390 uses contains() in current_frame(), which is used before 605 // code cache initialization if NativeMemoryTracking=detail is set. 606 S390_ONLY(if (_heaps == NULL) return false;) 607 // It should be ok to call contains without holding a lock. 608 FOR_ALL_HEAPS(heap) { 609 if ((*heap)->contains(p)) { 610 return true; 611 } 612 } 613 return false; 614 } 615 616 bool CodeCache::contains(nmethod *nm) { 617 return contains((void *)nm); 618 } 619 620 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 621 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 622 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 623 CodeBlob* CodeCache::find_blob(void* start) { 624 CodeBlob* result = find_blob_unsafe(start); 625 // We could potentially look up non_entrant methods 626 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 627 return result; 628 } 629 630 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 631 // what you are doing) 632 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 633 // NMT can walk the stack before code cache is created 634 if (_heaps != NULL) { 635 CodeHeap* heap = get_code_heap_containing(start); 636 if (heap != NULL) { 637 return heap->find_blob_unsafe(start); 638 } 639 } 640 return NULL; 641 } 642 643 nmethod* CodeCache::find_nmethod(void* start) { 644 CodeBlob* cb = find_blob(start); 645 assert(cb->is_nmethod(), "did not find an nmethod"); 646 return (nmethod*)cb; 647 } 648 649 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 650 assert_locked_or_safepoint(CodeCache_lock); 651 FOR_ALL_HEAPS(heap) { 652 FOR_ALL_BLOBS(cb, *heap) { 653 f(cb); 654 } 655 } 656 } 657 658 void CodeCache::nmethods_do(void f(nmethod* nm)) { 659 assert_locked_or_safepoint(CodeCache_lock); 660 NMethodIterator iter; 661 while(iter.next()) { 662 f(iter.method()); 663 } 664 } 665 666 void CodeCache::metadata_do(void f(Metadata* m)) { 667 assert_locked_or_safepoint(CodeCache_lock); 668 NMethodIterator iter; 669 while(iter.next_alive()) { 670 iter.method()->metadata_do(f); 671 } 672 AOTLoader::metadata_do(f); 673 } 674 675 int CodeCache::alignment_unit() { 676 return (int)_heaps->first()->alignment_unit(); 677 } 678 679 int CodeCache::alignment_offset() { 680 return (int)_heaps->first()->alignment_offset(); 681 } 682 683 // Mark nmethods for unloading if they contain otherwise unreachable oops. 684 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 685 assert_locked_or_safepoint(CodeCache_lock); 686 CompiledMethodIterator iter; 687 while(iter.next_alive()) { 688 iter.method()->do_unloading(is_alive); 689 } 690 691 // Now that all the unloaded nmethods are known, cleanup caches 692 // before CLDG is purged. 693 // This is another code cache walk but it is moved from gc_epilogue. 694 // G1 does a parallel walk of the nmethods so cleans them up 695 // as it goes and doesn't call this. 696 do_unloading_nmethod_caches(unloading_occurred); 697 } 698 699 void CodeCache::blobs_do(CodeBlobClosure* f) { 700 assert_locked_or_safepoint(CodeCache_lock); 701 FOR_ALL_ALLOCABLE_HEAPS(heap) { 702 FOR_ALL_BLOBS(cb, *heap) { 703 if (cb->is_alive()) { 704 f->do_code_blob(cb); 705 #ifdef ASSERT 706 if (cb->is_nmethod()) { 707 Universe::heap()->verify_nmethod((nmethod*)cb); 708 } 709 #endif //ASSERT 710 } 711 } 712 } 713 } 714 715 // Walk the list of methods which might contain oops to the java heap. 716 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 717 assert_locked_or_safepoint(CodeCache_lock); 718 719 const bool fix_relocations = f->fix_relocations(); 720 debug_only(mark_scavenge_root_nmethods()); 721 722 nmethod* prev = NULL; 723 nmethod* cur = scavenge_root_nmethods(); 724 while (cur != NULL) { 725 debug_only(cur->clear_scavenge_root_marked()); 726 assert(cur->scavenge_root_not_marked(), ""); 727 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 728 729 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 730 LogTarget(Trace, gc, nmethod) lt; 731 if (lt.is_enabled()) { 732 LogStream ls(lt); 733 CompileTask::print(&ls, cur, 734 is_live ? "scavenge root " : "dead scavenge root", /*short_form:*/ true); 735 } 736 if (is_live) { 737 // Perform cur->oops_do(f), maybe just once per nmethod. 738 f->do_code_blob(cur); 739 } 740 nmethod* const next = cur->scavenge_root_link(); 741 // The scavengable nmethod list must contain all methods with scavengable 742 // oops. It is safe to include more nmethod on the list, but we do not 743 // expect any live non-scavengable nmethods on the list. 744 if (fix_relocations) { 745 if (!is_live || !cur->detect_scavenge_root_oops()) { 746 unlink_scavenge_root_nmethod(cur, prev); 747 } else { 748 prev = cur; 749 } 750 } 751 cur = next; 752 } 753 754 // Check for stray marks. 755 debug_only(verify_perm_nmethods(NULL)); 756 } 757 758 void CodeCache::register_scavenge_root_nmethod(nmethod* nm) { 759 assert_locked_or_safepoint(CodeCache_lock); 760 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) { 761 add_scavenge_root_nmethod(nm); 762 } 763 } 764 765 void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) { 766 nm->verify_scavenge_root_oops(); 767 } 768 769 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 770 assert_locked_or_safepoint(CodeCache_lock); 771 772 nm->set_on_scavenge_root_list(); 773 nm->set_scavenge_root_link(_scavenge_root_nmethods); 774 set_scavenge_root_nmethods(nm); 775 print_trace("add_scavenge_root", nm); 776 } 777 778 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 779 assert_locked_or_safepoint(CodeCache_lock); 780 781 assert((prev == NULL && scavenge_root_nmethods() == nm) || 782 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 783 784 print_trace("unlink_scavenge_root", nm); 785 if (prev == NULL) { 786 set_scavenge_root_nmethods(nm->scavenge_root_link()); 787 } else { 788 prev->set_scavenge_root_link(nm->scavenge_root_link()); 789 } 790 nm->set_scavenge_root_link(NULL); 791 nm->clear_on_scavenge_root_list(); 792 } 793 794 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 795 assert_locked_or_safepoint(CodeCache_lock); 796 797 print_trace("drop_scavenge_root", nm); 798 nmethod* prev = NULL; 799 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 800 if (cur == nm) { 801 unlink_scavenge_root_nmethod(cur, prev); 802 return; 803 } 804 prev = cur; 805 } 806 assert(false, "should have been on list"); 807 } 808 809 void CodeCache::prune_scavenge_root_nmethods() { 810 assert_locked_or_safepoint(CodeCache_lock); 811 812 debug_only(mark_scavenge_root_nmethods()); 813 814 nmethod* last = NULL; 815 nmethod* cur = scavenge_root_nmethods(); 816 while (cur != NULL) { 817 nmethod* next = cur->scavenge_root_link(); 818 debug_only(cur->clear_scavenge_root_marked()); 819 assert(cur->scavenge_root_not_marked(), ""); 820 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 821 822 if (!cur->is_zombie() && !cur->is_unloaded() 823 && cur->detect_scavenge_root_oops()) { 824 // Keep it. Advance 'last' to prevent deletion. 825 last = cur; 826 } else { 827 // Prune it from the list, so we don't have to look at it any more. 828 print_trace("prune_scavenge_root", cur); 829 unlink_scavenge_root_nmethod(cur, last); 830 } 831 cur = next; 832 } 833 834 // Check for stray marks. 835 debug_only(verify_perm_nmethods(NULL)); 836 } 837 838 #ifndef PRODUCT 839 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 840 // While we are here, verify the integrity of the list. 841 mark_scavenge_root_nmethods(); 842 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 843 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 844 cur->clear_scavenge_root_marked(); 845 } 846 verify_perm_nmethods(f); 847 } 848 849 // Temporarily mark nmethods that are claimed to be on the scavenge list. 850 void CodeCache::mark_scavenge_root_nmethods() { 851 NMethodIterator iter; 852 while(iter.next_alive()) { 853 nmethod* nm = iter.method(); 854 assert(nm->scavenge_root_not_marked(), "clean state"); 855 if (nm->on_scavenge_root_list()) 856 nm->set_scavenge_root_marked(); 857 } 858 } 859 860 // If the closure is given, run it on the unlisted nmethods. 861 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 862 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 863 NMethodIterator iter; 864 while(iter.next_alive()) { 865 nmethod* nm = iter.method(); 866 bool call_f = (f_or_null != NULL); 867 assert(nm->scavenge_root_not_marked(), "must be already processed"); 868 if (nm->on_scavenge_root_list()) 869 call_f = false; // don't show this one to the client 870 Universe::heap()->verify_nmethod(nm); 871 if (call_f) f_or_null->do_code_blob(nm); 872 } 873 } 874 #endif //PRODUCT 875 876 void CodeCache::verify_clean_inline_caches() { 877 #ifdef ASSERT 878 NMethodIterator iter; 879 while(iter.next_alive()) { 880 nmethod* nm = iter.method(); 881 assert(!nm->is_unloaded(), "Tautology"); 882 nm->verify_clean_inline_caches(); 883 nm->verify(); 884 } 885 #endif 886 } 887 888 void CodeCache::verify_icholder_relocations() { 889 #ifdef ASSERT 890 // make sure that we aren't leaking icholders 891 int count = 0; 892 FOR_ALL_HEAPS(heap) { 893 FOR_ALL_BLOBS(cb, *heap) { 894 CompiledMethod *nm = cb->as_compiled_method_or_null(); 895 if (nm != NULL) { 896 count += nm->verify_icholder_relocations(); 897 } 898 } 899 } 900 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 901 CompiledICHolder::live_count(), "must agree"); 902 #endif 903 } 904 905 void CodeCache::gc_prologue() { } 906 907 void CodeCache::gc_epilogue() { 908 prune_scavenge_root_nmethods(); 909 } 910 911 912 void CodeCache::do_unloading_nmethod_caches(bool class_unloading_occurred) { 913 assert_locked_or_safepoint(CodeCache_lock); 914 // Even if classes are not unloaded, there may have been some nmethods that are 915 // unloaded because oops in them are no longer reachable. 916 NOT_DEBUG(if (needs_cache_clean() || class_unloading_occurred)) { 917 CompiledMethodIterator iter; 918 while(iter.next_alive()) { 919 CompiledMethod* cm = iter.method(); 920 assert(!cm->is_unloaded(), "Tautology"); 921 DEBUG_ONLY(if (needs_cache_clean() || class_unloading_occurred)) { 922 // Clean up both unloaded klasses from nmethods and unloaded nmethods 923 // from inline caches. 924 cm->unload_nmethod_caches(/*parallel*/false, class_unloading_occurred); 925 } 926 DEBUG_ONLY(cm->verify()); 927 DEBUG_ONLY(cm->verify_oop_relocations()); 928 } 929 } 930 931 set_needs_cache_clean(false); 932 verify_icholder_relocations(); 933 } 934 935 void CodeCache::verify_oops() { 936 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 937 VerifyOopClosure voc; 938 NMethodIterator iter; 939 while(iter.next_alive()) { 940 nmethod* nm = iter.method(); 941 nm->oops_do(&voc); 942 nm->verify_oop_relocations(); 943 } 944 } 945 946 int CodeCache::blob_count(int code_blob_type) { 947 CodeHeap* heap = get_code_heap(code_blob_type); 948 return (heap != NULL) ? heap->blob_count() : 0; 949 } 950 951 int CodeCache::blob_count() { 952 int count = 0; 953 FOR_ALL_HEAPS(heap) { 954 count += (*heap)->blob_count(); 955 } 956 return count; 957 } 958 959 int CodeCache::nmethod_count(int code_blob_type) { 960 CodeHeap* heap = get_code_heap(code_blob_type); 961 return (heap != NULL) ? heap->nmethod_count() : 0; 962 } 963 964 int CodeCache::nmethod_count() { 965 int count = 0; 966 FOR_ALL_NMETHOD_HEAPS(heap) { 967 count += (*heap)->nmethod_count(); 968 } 969 return count; 970 } 971 972 int CodeCache::adapter_count(int code_blob_type) { 973 CodeHeap* heap = get_code_heap(code_blob_type); 974 return (heap != NULL) ? heap->adapter_count() : 0; 975 } 976 977 int CodeCache::adapter_count() { 978 int count = 0; 979 FOR_ALL_HEAPS(heap) { 980 count += (*heap)->adapter_count(); 981 } 982 return count; 983 } 984 985 address CodeCache::low_bound(int code_blob_type) { 986 CodeHeap* heap = get_code_heap(code_blob_type); 987 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 988 } 989 990 address CodeCache::high_bound(int code_blob_type) { 991 CodeHeap* heap = get_code_heap(code_blob_type); 992 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 993 } 994 995 size_t CodeCache::capacity() { 996 size_t cap = 0; 997 FOR_ALL_ALLOCABLE_HEAPS(heap) { 998 cap += (*heap)->capacity(); 999 } 1000 return cap; 1001 } 1002 1003 size_t CodeCache::unallocated_capacity(int code_blob_type) { 1004 CodeHeap* heap = get_code_heap(code_blob_type); 1005 return (heap != NULL) ? heap->unallocated_capacity() : 0; 1006 } 1007 1008 size_t CodeCache::unallocated_capacity() { 1009 size_t unallocated_cap = 0; 1010 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1011 unallocated_cap += (*heap)->unallocated_capacity(); 1012 } 1013 return unallocated_cap; 1014 } 1015 1016 size_t CodeCache::max_capacity() { 1017 size_t max_cap = 0; 1018 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1019 max_cap += (*heap)->max_capacity(); 1020 } 1021 return max_cap; 1022 } 1023 1024 /** 1025 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 1026 * is free, reverse_free_ratio() returns 4. 1027 */ 1028 double CodeCache::reverse_free_ratio(int code_blob_type) { 1029 CodeHeap* heap = get_code_heap(code_blob_type); 1030 if (heap == NULL) { 1031 return 0; 1032 } 1033 1034 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1035 double max_capacity = (double)heap->max_capacity(); 1036 double result = max_capacity / unallocated_capacity; 1037 assert (max_capacity >= unallocated_capacity, "Must be"); 1038 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1039 return result; 1040 } 1041 1042 size_t CodeCache::bytes_allocated_in_freelists() { 1043 size_t allocated_bytes = 0; 1044 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1045 allocated_bytes += (*heap)->allocated_in_freelist(); 1046 } 1047 return allocated_bytes; 1048 } 1049 1050 int CodeCache::allocated_segments() { 1051 int number_of_segments = 0; 1052 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1053 number_of_segments += (*heap)->allocated_segments(); 1054 } 1055 return number_of_segments; 1056 } 1057 1058 size_t CodeCache::freelists_length() { 1059 size_t length = 0; 1060 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1061 length += (*heap)->freelist_length(); 1062 } 1063 return length; 1064 } 1065 1066 void icache_init(); 1067 1068 void CodeCache::initialize() { 1069 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1070 #ifdef COMPILER2 1071 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1072 #endif 1073 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1074 // This was originally just a check of the alignment, causing failure, instead, round 1075 // the code cache to the page size. In particular, Solaris is moving to a larger 1076 // default page size. 1077 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1078 1079 if (SegmentedCodeCache) { 1080 // Use multiple code heaps 1081 initialize_heaps(); 1082 } else { 1083 // Use a single code heap 1084 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1085 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1086 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1087 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1088 add_heap(rs, "CodeCache", CodeBlobType::All); 1089 } 1090 1091 // Initialize ICache flush mechanism 1092 // This service is needed for os::register_code_area 1093 icache_init(); 1094 1095 // Give OS a chance to register generated code area. 1096 // This is used on Windows 64 bit platforms to register 1097 // Structured Exception Handlers for our generated code. 1098 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1099 } 1100 1101 void codeCache_init() { 1102 CodeCache::initialize(); 1103 // Load AOT libraries and add AOT code heaps. 1104 AOTLoader::initialize(); 1105 } 1106 1107 //------------------------------------------------------------------------------------------------ 1108 1109 int CodeCache::number_of_nmethods_with_dependencies() { 1110 return _number_of_nmethods_with_dependencies; 1111 } 1112 1113 void CodeCache::clear_inline_caches() { 1114 assert_locked_or_safepoint(CodeCache_lock); 1115 CompiledMethodIterator iter; 1116 while(iter.next_alive()) { 1117 iter.method()->clear_inline_caches(); 1118 } 1119 } 1120 1121 void CodeCache::cleanup_inline_caches() { 1122 assert_locked_or_safepoint(CodeCache_lock); 1123 NMethodIterator iter; 1124 while(iter.next_alive()) { 1125 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1126 } 1127 } 1128 1129 // Keeps track of time spent for checking dependencies 1130 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1131 1132 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1133 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1134 int number_of_marked_CodeBlobs = 0; 1135 1136 // search the hierarchy looking for nmethods which are affected by the loading of this class 1137 1138 // then search the interfaces this class implements looking for nmethods 1139 // which might be dependent of the fact that an interface only had one 1140 // implementor. 1141 // nmethod::check_all_dependencies works only correctly, if no safepoint 1142 // can happen 1143 NoSafepointVerifier nsv; 1144 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1145 Klass* d = str.klass(); 1146 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1147 } 1148 1149 #ifndef PRODUCT 1150 if (VerifyDependencies) { 1151 // Object pointers are used as unique identifiers for dependency arguments. This 1152 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1153 dependentCheckTime.start(); 1154 nmethod::check_all_dependencies(changes); 1155 dependentCheckTime.stop(); 1156 } 1157 #endif 1158 1159 return number_of_marked_CodeBlobs; 1160 } 1161 1162 CompiledMethod* CodeCache::find_compiled(void* start) { 1163 CodeBlob *cb = find_blob(start); 1164 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1165 return (CompiledMethod*)cb; 1166 } 1167 1168 bool CodeCache::is_far_target(address target) { 1169 #if INCLUDE_AOT 1170 return NativeCall::is_far_call(_low_bound, target) || 1171 NativeCall::is_far_call(_high_bound, target); 1172 #else 1173 return false; 1174 #endif 1175 } 1176 1177 #ifdef HOTSWAP 1178 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1179 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1180 int number_of_marked_CodeBlobs = 0; 1181 1182 // Deoptimize all methods of the evolving class itself 1183 Array<Method*>* old_methods = dependee->methods(); 1184 for (int i = 0; i < old_methods->length(); i++) { 1185 ResourceMark rm; 1186 Method* old_method = old_methods->at(i); 1187 CompiledMethod* nm = old_method->code(); 1188 if (nm != NULL) { 1189 nm->mark_for_deoptimization(); 1190 number_of_marked_CodeBlobs++; 1191 } 1192 } 1193 1194 CompiledMethodIterator iter; 1195 while(iter.next_alive()) { 1196 CompiledMethod* nm = iter.method(); 1197 if (nm->is_marked_for_deoptimization()) { 1198 // ...Already marked in the previous pass; don't count it again. 1199 } else if (nm->is_evol_dependent_on(dependee)) { 1200 ResourceMark rm; 1201 nm->mark_for_deoptimization(); 1202 number_of_marked_CodeBlobs++; 1203 } else { 1204 // flush caches in case they refer to a redefined Method* 1205 nm->clear_inline_caches(); 1206 } 1207 } 1208 1209 return number_of_marked_CodeBlobs; 1210 } 1211 #endif // HOTSWAP 1212 1213 1214 // Deoptimize all methods 1215 void CodeCache::mark_all_nmethods_for_deoptimization() { 1216 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1217 CompiledMethodIterator iter; 1218 while(iter.next_alive()) { 1219 CompiledMethod* nm = iter.method(); 1220 if (!nm->method()->is_method_handle_intrinsic()) { 1221 nm->mark_for_deoptimization(); 1222 } 1223 } 1224 } 1225 1226 int CodeCache::mark_for_deoptimization(Method* dependee) { 1227 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1228 int number_of_marked_CodeBlobs = 0; 1229 1230 CompiledMethodIterator iter; 1231 while(iter.next_alive()) { 1232 CompiledMethod* nm = iter.method(); 1233 if (nm->is_dependent_on_method(dependee)) { 1234 ResourceMark rm; 1235 nm->mark_for_deoptimization(); 1236 number_of_marked_CodeBlobs++; 1237 } 1238 } 1239 1240 return number_of_marked_CodeBlobs; 1241 } 1242 1243 void CodeCache::make_marked_nmethods_not_entrant() { 1244 assert_locked_or_safepoint(CodeCache_lock); 1245 CompiledMethodIterator iter; 1246 while(iter.next_alive()) { 1247 CompiledMethod* nm = iter.method(); 1248 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1249 nm->make_not_entrant(); 1250 } 1251 } 1252 } 1253 1254 // Flushes compiled methods dependent on dependee. 1255 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1256 assert_lock_strong(Compile_lock); 1257 1258 if (number_of_nmethods_with_dependencies() == 0) return; 1259 1260 // CodeCache can only be updated by a thread_in_VM and they will all be 1261 // stopped during the safepoint so CodeCache will be safe to update without 1262 // holding the CodeCache_lock. 1263 1264 KlassDepChange changes(dependee); 1265 1266 // Compute the dependent nmethods 1267 if (mark_for_deoptimization(changes) > 0) { 1268 // At least one nmethod has been marked for deoptimization 1269 VM_Deoptimize op; 1270 VMThread::execute(&op); 1271 } 1272 } 1273 1274 #ifdef HOTSWAP 1275 // Flushes compiled methods dependent on dependee in the evolutionary sense 1276 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1277 // --- Compile_lock is not held. However we are at a safepoint. 1278 assert_locked_or_safepoint(Compile_lock); 1279 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1280 1281 // CodeCache can only be updated by a thread_in_VM and they will all be 1282 // stopped during the safepoint so CodeCache will be safe to update without 1283 // holding the CodeCache_lock. 1284 1285 // Compute the dependent nmethods 1286 if (mark_for_evol_deoptimization(ev_k) > 0) { 1287 // At least one nmethod has been marked for deoptimization 1288 1289 // All this already happens inside a VM_Operation, so we'll do all the work here. 1290 // Stuff copied from VM_Deoptimize and modified slightly. 1291 1292 // We do not want any GCs to happen while we are in the middle of this VM operation 1293 ResourceMark rm; 1294 DeoptimizationMarker dm; 1295 1296 // Deoptimize all activations depending on marked nmethods 1297 Deoptimization::deoptimize_dependents(); 1298 1299 // Make the dependent methods not entrant 1300 make_marked_nmethods_not_entrant(); 1301 } 1302 } 1303 #endif // HOTSWAP 1304 1305 1306 // Flushes compiled methods dependent on dependee 1307 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1308 // --- Compile_lock is not held. However we are at a safepoint. 1309 assert_locked_or_safepoint(Compile_lock); 1310 1311 // CodeCache can only be updated by a thread_in_VM and they will all be 1312 // stopped dring the safepoint so CodeCache will be safe to update without 1313 // holding the CodeCache_lock. 1314 1315 // Compute the dependent nmethods 1316 if (mark_for_deoptimization(m_h()) > 0) { 1317 // At least one nmethod has been marked for deoptimization 1318 1319 // All this already happens inside a VM_Operation, so we'll do all the work here. 1320 // Stuff copied from VM_Deoptimize and modified slightly. 1321 1322 // We do not want any GCs to happen while we are in the middle of this VM operation 1323 ResourceMark rm; 1324 DeoptimizationMarker dm; 1325 1326 // Deoptimize all activations depending on marked nmethods 1327 Deoptimization::deoptimize_dependents(); 1328 1329 // Make the dependent methods not entrant 1330 make_marked_nmethods_not_entrant(); 1331 } 1332 } 1333 1334 void CodeCache::verify() { 1335 assert_locked_or_safepoint(CodeCache_lock); 1336 FOR_ALL_HEAPS(heap) { 1337 (*heap)->verify(); 1338 FOR_ALL_BLOBS(cb, *heap) { 1339 if (cb->is_alive()) { 1340 cb->verify(); 1341 } 1342 } 1343 } 1344 } 1345 1346 // A CodeHeap is full. Print out warning and report event. 1347 PRAGMA_DIAG_PUSH 1348 PRAGMA_FORMAT_NONLITERAL_IGNORED 1349 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1350 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1351 CodeHeap* heap = get_code_heap(code_blob_type); 1352 assert(heap != NULL, "heap is null"); 1353 1354 if ((heap->full_count() == 0) || print) { 1355 // Not yet reported for this heap, report 1356 if (SegmentedCodeCache) { 1357 ResourceMark rm; 1358 stringStream msg1_stream, msg2_stream; 1359 msg1_stream.print("%s is full. Compiler has been disabled.", 1360 get_code_heap_name(code_blob_type)); 1361 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1362 get_code_heap_flag_name(code_blob_type)); 1363 const char *msg1 = msg1_stream.as_string(); 1364 const char *msg2 = msg2_stream.as_string(); 1365 1366 log_warning(codecache)("%s", msg1); 1367 log_warning(codecache)("%s", msg2); 1368 warning("%s", msg1); 1369 warning("%s", msg2); 1370 } else { 1371 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1372 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1373 1374 log_warning(codecache)("%s", msg1); 1375 log_warning(codecache)("%s", msg2); 1376 warning("%s", msg1); 1377 warning("%s", msg2); 1378 } 1379 ResourceMark rm; 1380 stringStream s; 1381 // Dump code cache into a buffer before locking the tty. 1382 { 1383 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1384 print_summary(&s); 1385 } 1386 { 1387 ttyLocker ttyl; 1388 tty->print("%s", s.as_string()); 1389 } 1390 1391 if (heap->full_count() == 0) { 1392 LogTarget(Debug, codecache) lt; 1393 if (lt.is_enabled()) { 1394 CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot! 1395 } 1396 } 1397 } 1398 1399 heap->report_full(); 1400 1401 EventCodeCacheFull event; 1402 if (event.should_commit()) { 1403 event.set_codeBlobType((u1)code_blob_type); 1404 event.set_startAddress((u8)heap->low_boundary()); 1405 event.set_commitedTopAddress((u8)heap->high()); 1406 event.set_reservedTopAddress((u8)heap->high_boundary()); 1407 event.set_entryCount(heap->blob_count()); 1408 event.set_methodCount(heap->nmethod_count()); 1409 event.set_adaptorCount(heap->adapter_count()); 1410 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1411 event.set_fullCount(heap->full_count()); 1412 event.commit(); 1413 } 1414 } 1415 PRAGMA_DIAG_POP 1416 1417 void CodeCache::print_memory_overhead() { 1418 size_t wasted_bytes = 0; 1419 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1420 CodeHeap* curr_heap = *heap; 1421 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1422 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1423 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1424 } 1425 } 1426 // Print bytes that are allocated in the freelist 1427 ttyLocker ttl; 1428 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1429 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1430 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1431 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1432 } 1433 1434 //------------------------------------------------------------------------------------------------ 1435 // Non-product version 1436 1437 #ifndef PRODUCT 1438 1439 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1440 if (PrintCodeCache2) { // Need to add a new flag 1441 ResourceMark rm; 1442 if (size == 0) size = cb->size(); 1443 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1444 } 1445 } 1446 1447 void CodeCache::print_internals() { 1448 int nmethodCount = 0; 1449 int runtimeStubCount = 0; 1450 int adapterCount = 0; 1451 int deoptimizationStubCount = 0; 1452 int uncommonTrapStubCount = 0; 1453 int bufferBlobCount = 0; 1454 int total = 0; 1455 int nmethodAlive = 0; 1456 int nmethodNotEntrant = 0; 1457 int nmethodZombie = 0; 1458 int nmethodUnloaded = 0; 1459 int nmethodJava = 0; 1460 int nmethodNative = 0; 1461 int max_nm_size = 0; 1462 ResourceMark rm; 1463 1464 int i = 0; 1465 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1466 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1467 tty->print_cr("-- %s --", (*heap)->name()); 1468 } 1469 FOR_ALL_BLOBS(cb, *heap) { 1470 total++; 1471 if (cb->is_nmethod()) { 1472 nmethod* nm = (nmethod*)cb; 1473 1474 if (Verbose && nm->method() != NULL) { 1475 ResourceMark rm; 1476 char *method_name = nm->method()->name_and_sig_as_C_string(); 1477 tty->print("%s", method_name); 1478 if(nm->is_alive()) { tty->print_cr(" alive"); } 1479 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1480 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1481 } 1482 1483 nmethodCount++; 1484 1485 if(nm->is_alive()) { nmethodAlive++; } 1486 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1487 if(nm->is_zombie()) { nmethodZombie++; } 1488 if(nm->is_unloaded()) { nmethodUnloaded++; } 1489 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1490 1491 if(nm->method() != NULL && nm->is_java_method()) { 1492 nmethodJava++; 1493 max_nm_size = MAX2(max_nm_size, nm->size()); 1494 } 1495 } else if (cb->is_runtime_stub()) { 1496 runtimeStubCount++; 1497 } else if (cb->is_deoptimization_stub()) { 1498 deoptimizationStubCount++; 1499 } else if (cb->is_uncommon_trap_stub()) { 1500 uncommonTrapStubCount++; 1501 } else if (cb->is_adapter_blob()) { 1502 adapterCount++; 1503 } else if (cb->is_buffer_blob()) { 1504 bufferBlobCount++; 1505 } 1506 } 1507 } 1508 1509 int bucketSize = 512; 1510 int bucketLimit = max_nm_size / bucketSize + 1; 1511 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1512 memset(buckets, 0, sizeof(int) * bucketLimit); 1513 1514 NMethodIterator iter; 1515 while(iter.next()) { 1516 nmethod* nm = iter.method(); 1517 if(nm->method() != NULL && nm->is_java_method()) { 1518 buckets[nm->size() / bucketSize]++; 1519 } 1520 } 1521 1522 tty->print_cr("Code Cache Entries (total of %d)",total); 1523 tty->print_cr("-------------------------------------------------"); 1524 tty->print_cr("nmethods: %d",nmethodCount); 1525 tty->print_cr("\talive: %d",nmethodAlive); 1526 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1527 tty->print_cr("\tzombie: %d",nmethodZombie); 1528 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1529 tty->print_cr("\tjava: %d",nmethodJava); 1530 tty->print_cr("\tnative: %d",nmethodNative); 1531 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1532 tty->print_cr("adapters: %d",adapterCount); 1533 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1534 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1535 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1536 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1537 tty->print_cr("-------------------------------------------------"); 1538 1539 for(int i=0; i<bucketLimit; i++) { 1540 if(buckets[i] != 0) { 1541 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1542 tty->fill_to(40); 1543 tty->print_cr("%d",buckets[i]); 1544 } 1545 } 1546 1547 FREE_C_HEAP_ARRAY(int, buckets); 1548 print_memory_overhead(); 1549 } 1550 1551 #endif // !PRODUCT 1552 1553 void CodeCache::print() { 1554 print_summary(tty); 1555 1556 #ifndef PRODUCT 1557 if (!Verbose) return; 1558 1559 CodeBlob_sizes live; 1560 CodeBlob_sizes dead; 1561 1562 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1563 FOR_ALL_BLOBS(cb, *heap) { 1564 if (!cb->is_alive()) { 1565 dead.add(cb); 1566 } else { 1567 live.add(cb); 1568 } 1569 } 1570 } 1571 1572 tty->print_cr("CodeCache:"); 1573 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1574 1575 if (!live.is_empty()) { 1576 live.print("live"); 1577 } 1578 if (!dead.is_empty()) { 1579 dead.print("dead"); 1580 } 1581 1582 if (WizardMode) { 1583 // print the oop_map usage 1584 int code_size = 0; 1585 int number_of_blobs = 0; 1586 int number_of_oop_maps = 0; 1587 int map_size = 0; 1588 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1589 FOR_ALL_BLOBS(cb, *heap) { 1590 if (cb->is_alive()) { 1591 number_of_blobs++; 1592 code_size += cb->code_size(); 1593 ImmutableOopMapSet* set = cb->oop_maps(); 1594 if (set != NULL) { 1595 number_of_oop_maps += set->count(); 1596 map_size += set->nr_of_bytes(); 1597 } 1598 } 1599 } 1600 } 1601 tty->print_cr("OopMaps"); 1602 tty->print_cr(" #blobs = %d", number_of_blobs); 1603 tty->print_cr(" code size = %d", code_size); 1604 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1605 tty->print_cr(" map size = %d", map_size); 1606 } 1607 1608 #endif // !PRODUCT 1609 } 1610 1611 void CodeCache::print_summary(outputStream* st, bool detailed) { 1612 int full_count = 0; 1613 FOR_ALL_HEAPS(heap_iterator) { 1614 CodeHeap* heap = (*heap_iterator); 1615 size_t total = (heap->high_boundary() - heap->low_boundary()); 1616 if (_heaps->length() >= 1) { 1617 st->print("%s:", heap->name()); 1618 } else { 1619 st->print("CodeCache:"); 1620 } 1621 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1622 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1623 total/K, (total - heap->unallocated_capacity())/K, 1624 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1625 1626 if (detailed) { 1627 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1628 p2i(heap->low_boundary()), 1629 p2i(heap->high()), 1630 p2i(heap->high_boundary())); 1631 1632 full_count += get_codemem_full_count(heap->code_blob_type()); 1633 } 1634 } 1635 1636 if (detailed) { 1637 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1638 " adapters=" UINT32_FORMAT, 1639 blob_count(), nmethod_count(), adapter_count()); 1640 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1641 "enabled" : Arguments::mode() == Arguments::_int ? 1642 "disabled (interpreter mode)" : 1643 "disabled (not enough contiguous free space left)"); 1644 st->print_cr(" stopped_count=%d, restarted_count=%d", 1645 CompileBroker::get_total_compiler_stopped_count(), 1646 CompileBroker::get_total_compiler_restarted_count()); 1647 st->print_cr(" full_count=%d", full_count); 1648 } 1649 } 1650 1651 void CodeCache::print_codelist(outputStream* st) { 1652 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1653 1654 CompiledMethodIterator iter; 1655 while (iter.next_alive()) { 1656 CompiledMethod* cm = iter.method(); 1657 ResourceMark rm; 1658 char* method_name = cm->method()->name_and_sig_as_C_string(); 1659 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1660 cm->compile_id(), cm->comp_level(), cm->get_state(), 1661 method_name, 1662 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1663 } 1664 } 1665 1666 void CodeCache::print_layout(outputStream* st) { 1667 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1668 ResourceMark rm; 1669 print_summary(st, true); 1670 } 1671 1672 void CodeCache::log_state(outputStream* st) { 1673 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1674 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1675 blob_count(), nmethod_count(), adapter_count(), 1676 unallocated_capacity()); 1677 } 1678 1679 //---< BEGIN >--- CodeHeap State Analytics. 1680 1681 void CodeCache::aggregate(outputStream *out, const char* granularity) { 1682 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1683 CodeHeapState::aggregate(out, (*heap), granularity); 1684 } 1685 } 1686 1687 void CodeCache::discard(outputStream *out) { 1688 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1689 CodeHeapState::discard(out, (*heap)); 1690 } 1691 } 1692 1693 void CodeCache::print_usedSpace(outputStream *out) { 1694 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1695 CodeHeapState::print_usedSpace(out, (*heap)); 1696 } 1697 } 1698 1699 void CodeCache::print_freeSpace(outputStream *out) { 1700 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1701 CodeHeapState::print_freeSpace(out, (*heap)); 1702 } 1703 } 1704 1705 void CodeCache::print_count(outputStream *out) { 1706 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1707 CodeHeapState::print_count(out, (*heap)); 1708 } 1709 } 1710 1711 void CodeCache::print_space(outputStream *out) { 1712 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1713 CodeHeapState::print_space(out, (*heap)); 1714 } 1715 } 1716 1717 void CodeCache::print_age(outputStream *out) { 1718 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1719 CodeHeapState::print_age(out, (*heap)); 1720 } 1721 } 1722 1723 void CodeCache::print_names(outputStream *out) { 1724 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1725 CodeHeapState::print_names(out, (*heap)); 1726 } 1727 } 1728 //---< END >--- CodeHeap State Analytics.