1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "gc/shared/gcLocker.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/verifyOopClosure.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/compilationPolicy.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/icache.hpp" 48 #include "runtime/java.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/sweeper.hpp" 51 #include "services/memoryService.hpp" 52 #include "trace/tracing.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/vmError.hpp" 55 #include "utilities/xmlstream.hpp" 56 #ifdef COMPILER1 57 #include "c1/c1_Compilation.hpp" 58 #include "c1/c1_Compiler.hpp" 59 #endif 60 #ifdef COMPILER2 61 #include "opto/c2compiler.hpp" 62 #include "opto/compile.hpp" 63 #include "opto/node.hpp" 64 #endif 65 66 // Helper class for printing in CodeCache 67 class CodeBlob_sizes { 68 private: 69 int count; 70 int total_size; 71 int header_size; 72 int code_size; 73 int stub_size; 74 int relocation_size; 75 int scopes_oop_size; 76 int scopes_metadata_size; 77 int scopes_data_size; 78 int scopes_pcs_size; 79 80 public: 81 CodeBlob_sizes() { 82 count = 0; 83 total_size = 0; 84 header_size = 0; 85 code_size = 0; 86 stub_size = 0; 87 relocation_size = 0; 88 scopes_oop_size = 0; 89 scopes_metadata_size = 0; 90 scopes_data_size = 0; 91 scopes_pcs_size = 0; 92 } 93 94 int total() { return total_size; } 95 bool is_empty() { return count == 0; } 96 97 void print(const char* title) { 98 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 99 count, 100 title, 101 (int)(total() / K), 102 header_size * 100 / total_size, 103 relocation_size * 100 / total_size, 104 code_size * 100 / total_size, 105 stub_size * 100 / total_size, 106 scopes_oop_size * 100 / total_size, 107 scopes_metadata_size * 100 / total_size, 108 scopes_data_size * 100 / total_size, 109 scopes_pcs_size * 100 / total_size); 110 } 111 112 void add(CodeBlob* cb) { 113 count++; 114 total_size += cb->size(); 115 header_size += cb->header_size(); 116 relocation_size += cb->relocation_size(); 117 if (cb->is_nmethod()) { 118 nmethod* nm = cb->as_nmethod_or_null(); 119 code_size += nm->insts_size(); 120 stub_size += nm->stub_size(); 121 122 scopes_oop_size += nm->oops_size(); 123 scopes_metadata_size += nm->metadata_size(); 124 scopes_data_size += nm->scopes_data_size(); 125 scopes_pcs_size += nm->scopes_pcs_size(); 126 } else { 127 code_size += cb->code_size(); 128 } 129 } 130 }; 131 132 // Iterate over all CodeHeaps 133 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 134 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 135 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 136 137 // Iterate over all CodeBlobs (cb) on the given CodeHeap 138 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 139 140 address CodeCache::_low_bound = 0; 141 address CodeCache::_high_bound = 0; 142 int CodeCache::_number_of_nmethods_with_dependencies = 0; 143 bool CodeCache::_needs_cache_clean = false; 144 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 145 146 // Initialize arrays of CodeHeap subsets 147 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 148 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 149 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 150 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 151 152 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 153 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 154 // Prepare error message 155 const char* error = "Invalid code heap sizes"; 156 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 157 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 158 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 159 160 if (total_size > cache_size) { 161 // Some code heap sizes were explicitly set: total_size must be <= cache_size 162 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 163 vm_exit_during_initialization(error, message); 164 } else if (all_set && total_size != cache_size) { 165 // All code heap sizes were explicitly set: total_size must equal cache_size 166 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 167 vm_exit_during_initialization(error, message); 168 } 169 } 170 171 void CodeCache::initialize_heaps() { 172 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 173 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 174 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 175 size_t min_size = os::vm_page_size(); 176 size_t cache_size = ReservedCodeCacheSize; 177 size_t non_nmethod_size = NonNMethodCodeHeapSize; 178 size_t profiled_size = ProfiledCodeHeapSize; 179 size_t non_profiled_size = NonProfiledCodeHeapSize; 180 // Check if total size set via command line flags exceeds the reserved size 181 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 182 (profiled_set ? profiled_size : min_size), 183 (non_profiled_set ? non_profiled_size : min_size), 184 cache_size, 185 non_nmethod_set && profiled_set && non_profiled_set); 186 187 // Determine size of compiler buffers 188 size_t code_buffers_size = 0; 189 #ifdef COMPILER1 190 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 191 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 192 code_buffers_size += c1_count * Compiler::code_buffer_size(); 193 #endif 194 #ifdef COMPILER2 195 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 196 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 197 // Initial size of constant table (this may be increased if a compiled method needs more space) 198 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 199 #endif 200 201 // Increase default non_nmethod_size to account for compiler buffers 202 if (!non_nmethod_set) { 203 non_nmethod_size += code_buffers_size; 204 } 205 // Calculate default CodeHeap sizes if not set by user 206 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 207 // Check if we have enough space for the non-nmethod code heap 208 if (cache_size > non_nmethod_size) { 209 // Use the default value for non_nmethod_size and one half of the 210 // remaining size for non-profiled and one half for profiled methods 211 size_t remaining_size = cache_size - non_nmethod_size; 212 profiled_size = remaining_size / 2; 213 non_profiled_size = remaining_size - profiled_size; 214 } else { 215 // Use all space for the non-nmethod heap and set other heaps to minimal size 216 non_nmethod_size = cache_size - 2 * min_size; 217 profiled_size = min_size; 218 non_profiled_size = min_size; 219 } 220 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 221 // The user explicitly set some code heap sizes. Increase or decrease the (default) 222 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 223 // code heap sizes and then only change non-nmethod code heap size if still necessary. 224 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 225 if (non_profiled_set) { 226 if (!profiled_set) { 227 // Adapt size of profiled code heap 228 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 229 // Not enough space available, set to minimum size 230 diff_size += profiled_size - min_size; 231 profiled_size = min_size; 232 } else { 233 profiled_size += diff_size; 234 diff_size = 0; 235 } 236 } 237 } else if (profiled_set) { 238 // Adapt size of non-profiled code heap 239 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 240 // Not enough space available, set to minimum size 241 diff_size += non_profiled_size - min_size; 242 non_profiled_size = min_size; 243 } else { 244 non_profiled_size += diff_size; 245 diff_size = 0; 246 } 247 } else if (non_nmethod_set) { 248 // Distribute remaining size between profiled and non-profiled code heaps 249 diff_size = cache_size - non_nmethod_size; 250 profiled_size = diff_size / 2; 251 non_profiled_size = diff_size - profiled_size; 252 diff_size = 0; 253 } 254 if (diff_size != 0) { 255 // Use non-nmethod code heap for remaining space requirements 256 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 257 non_nmethod_size += diff_size; 258 } 259 } 260 261 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 262 if(!heap_available(CodeBlobType::MethodProfiled)) { 263 non_profiled_size += profiled_size; 264 profiled_size = 0; 265 } 266 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 267 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 268 non_nmethod_size += non_profiled_size; 269 non_profiled_size = 0; 270 } 271 // Make sure we have enough space for VM internal code 272 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 273 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 274 vm_exit_during_initialization(err_msg( 275 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 276 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 277 } 278 279 // Verify sizes and update flag values 280 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 281 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 282 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 283 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 284 285 // Align CodeHeaps 286 size_t alignment = heap_alignment(); 287 non_nmethod_size = align_up(non_nmethod_size, alignment); 288 profiled_size = align_down(profiled_size, alignment); 289 290 // Reserve one continuous chunk of memory for CodeHeaps and split it into 291 // parts for the individual heaps. The memory layout looks like this: 292 // ---------- high ----------- 293 // Non-profiled nmethods 294 // Profiled nmethods 295 // Non-nmethods 296 // ---------- low ------------ 297 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 298 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 299 ReservedSpace rest = rs.last_part(non_nmethod_size); 300 ReservedSpace profiled_space = rest.first_part(profiled_size); 301 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 302 303 // Non-nmethods (stubs, adapters, ...) 304 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 305 // Tier 2 and tier 3 (profiled) methods 306 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 307 // Tier 1 and tier 4 (non-profiled) methods and native methods 308 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 309 } 310 311 size_t CodeCache::heap_alignment() { 312 // If large page support is enabled, align code heaps according to large 313 // page size to make sure that code cache is covered by large pages. 314 const size_t page_size = os::can_execute_large_page_memory() ? 315 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 316 os::vm_page_size(); 317 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 318 } 319 320 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 321 // Determine alignment 322 const size_t page_size = os::can_execute_large_page_memory() ? 323 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 324 os::page_size_for_region_aligned(size, 8)) : 325 os::vm_page_size(); 326 const size_t granularity = os::vm_allocation_granularity(); 327 const size_t r_align = MAX2(page_size, granularity); 328 const size_t r_size = align_up(size, r_align); 329 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 330 MAX2(page_size, granularity); 331 332 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 333 334 if (!rs.is_reserved()) { 335 vm_exit_during_initialization("Could not reserve enough space for code cache"); 336 } 337 338 // Initialize bounds 339 _low_bound = (address)rs.base(); 340 _high_bound = _low_bound + rs.size(); 341 342 return rs; 343 } 344 345 // Heaps available for allocation 346 bool CodeCache::heap_available(int code_blob_type) { 347 if (!SegmentedCodeCache) { 348 // No segmentation: use a single code heap 349 return (code_blob_type == CodeBlobType::All); 350 } else if (Arguments::is_interpreter_only()) { 351 // Interpreter only: we don't need any method code heaps 352 return (code_blob_type == CodeBlobType::NonNMethod); 353 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 354 // Tiered compilation: use all code heaps 355 return (code_blob_type < CodeBlobType::All); 356 } else { 357 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 358 return (code_blob_type == CodeBlobType::NonNMethod) || 359 (code_blob_type == CodeBlobType::MethodNonProfiled); 360 } 361 } 362 363 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 364 switch(code_blob_type) { 365 case CodeBlobType::NonNMethod: 366 return "NonNMethodCodeHeapSize"; 367 break; 368 case CodeBlobType::MethodNonProfiled: 369 return "NonProfiledCodeHeapSize"; 370 break; 371 case CodeBlobType::MethodProfiled: 372 return "ProfiledCodeHeapSize"; 373 break; 374 } 375 ShouldNotReachHere(); 376 return NULL; 377 } 378 379 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 380 if (lhs->code_blob_type() == rhs->code_blob_type()) { 381 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 382 } else { 383 return lhs->code_blob_type() - rhs->code_blob_type(); 384 } 385 } 386 387 void CodeCache::add_heap(CodeHeap* heap) { 388 assert(!Universe::is_fully_initialized(), "late heap addition?"); 389 390 _heaps->insert_sorted<code_heap_compare>(heap); 391 392 int type = heap->code_blob_type(); 393 if (code_blob_type_accepts_compiled(type)) { 394 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 395 } 396 if (code_blob_type_accepts_nmethod(type)) { 397 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 398 } 399 if (code_blob_type_accepts_allocable(type)) { 400 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 401 } 402 } 403 404 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 405 // Check if heap is needed 406 if (!heap_available(code_blob_type)) { 407 return; 408 } 409 410 // Create CodeHeap 411 CodeHeap* heap = new CodeHeap(name, code_blob_type); 412 add_heap(heap); 413 414 // Reserve Space 415 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 416 size_initial = align_up(size_initial, os::vm_page_size()); 417 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 418 vm_exit_during_initialization("Could not reserve enough space for code cache"); 419 } 420 421 // Register the CodeHeap 422 MemoryService::add_code_heap_memory_pool(heap, name); 423 } 424 425 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 426 FOR_ALL_HEAPS(heap) { 427 if ((*heap)->contains(start)) { 428 return *heap; 429 } 430 } 431 return NULL; 432 } 433 434 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 435 assert(cb != NULL, "CodeBlob is null"); 436 FOR_ALL_HEAPS(heap) { 437 if ((*heap)->contains_blob(cb)) { 438 return *heap; 439 } 440 } 441 ShouldNotReachHere(); 442 return NULL; 443 } 444 445 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 446 FOR_ALL_HEAPS(heap) { 447 if ((*heap)->accepts(code_blob_type)) { 448 return *heap; 449 } 450 } 451 return NULL; 452 } 453 454 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 455 assert_locked_or_safepoint(CodeCache_lock); 456 assert(heap != NULL, "heap is null"); 457 return (CodeBlob*)heap->first(); 458 } 459 460 CodeBlob* CodeCache::first_blob(int code_blob_type) { 461 if (heap_available(code_blob_type)) { 462 return first_blob(get_code_heap(code_blob_type)); 463 } else { 464 return NULL; 465 } 466 } 467 468 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 469 assert_locked_or_safepoint(CodeCache_lock); 470 assert(heap != NULL, "heap is null"); 471 return (CodeBlob*)heap->next(cb); 472 } 473 474 /** 475 * Do not seize the CodeCache lock here--if the caller has not 476 * already done so, we are going to lose bigtime, since the code 477 * cache will contain a garbage CodeBlob until the caller can 478 * run the constructor for the CodeBlob subclass he is busy 479 * instantiating. 480 */ 481 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 482 // Possibly wakes up the sweeper thread. 483 NMethodSweeper::notify(code_blob_type); 484 assert_locked_or_safepoint(CodeCache_lock); 485 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 486 if (size <= 0) { 487 return NULL; 488 } 489 CodeBlob* cb = NULL; 490 491 // Get CodeHeap for the given CodeBlobType 492 CodeHeap* heap = get_code_heap(code_blob_type); 493 assert(heap != NULL, "heap is null"); 494 495 while (true) { 496 cb = (CodeBlob*)heap->allocate(size); 497 if (cb != NULL) break; 498 if (!heap->expand_by(CodeCacheExpansionSize)) { 499 // Save original type for error reporting 500 if (orig_code_blob_type == CodeBlobType::All) { 501 orig_code_blob_type = code_blob_type; 502 } 503 // Expansion failed 504 if (SegmentedCodeCache) { 505 // Fallback solution: Try to store code in another code heap. 506 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 507 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 508 // and force stack scanning if less than 10% of the code heap are free. 509 int type = code_blob_type; 510 switch (type) { 511 case CodeBlobType::NonNMethod: 512 type = CodeBlobType::MethodNonProfiled; 513 break; 514 case CodeBlobType::MethodNonProfiled: 515 type = CodeBlobType::MethodProfiled; 516 break; 517 case CodeBlobType::MethodProfiled: 518 // Avoid loop if we already tried that code heap 519 if (type == orig_code_blob_type) { 520 type = CodeBlobType::MethodNonProfiled; 521 } 522 break; 523 } 524 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 525 if (PrintCodeCacheExtension) { 526 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 527 heap->name(), get_code_heap(type)->name()); 528 } 529 return allocate(size, type, orig_code_blob_type); 530 } 531 } 532 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 533 CompileBroker::handle_full_code_cache(orig_code_blob_type); 534 return NULL; 535 } 536 if (PrintCodeCacheExtension) { 537 ResourceMark rm; 538 if (_nmethod_heaps->length() >= 1) { 539 tty->print("%s", heap->name()); 540 } else { 541 tty->print("CodeCache"); 542 } 543 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 544 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 545 (address)heap->high() - (address)heap->low_boundary()); 546 } 547 } 548 print_trace("allocation", cb, size); 549 return cb; 550 } 551 552 void CodeCache::free(CodeBlob* cb) { 553 assert_locked_or_safepoint(CodeCache_lock); 554 CodeHeap* heap = get_code_heap(cb); 555 print_trace("free", cb); 556 if (cb->is_nmethod()) { 557 heap->set_nmethod_count(heap->nmethod_count() - 1); 558 if (((nmethod *)cb)->has_dependencies()) { 559 _number_of_nmethods_with_dependencies--; 560 } 561 } 562 if (cb->is_adapter_blob()) { 563 heap->set_adapter_count(heap->adapter_count() - 1); 564 } 565 566 // Get heap for given CodeBlob and deallocate 567 get_code_heap(cb)->deallocate(cb); 568 569 assert(heap->blob_count() >= 0, "sanity check"); 570 } 571 572 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 573 assert_locked_or_safepoint(CodeCache_lock); 574 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 575 print_trace("free_unused_tail", cb); 576 577 // We also have to account for the extra space (i.e. header) used by the CodeBlob 578 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 579 used += CodeBlob::align_code_offset(cb->header_size()); 580 581 // Get heap for given CodeBlob and deallocate its unused tail 582 get_code_heap(cb)->deallocate_tail(cb, used); 583 // Adjust the sizes of the CodeBlob 584 cb->adjust_size(used); 585 } 586 587 void CodeCache::commit(CodeBlob* cb) { 588 // this is called by nmethod::nmethod, which must already own CodeCache_lock 589 assert_locked_or_safepoint(CodeCache_lock); 590 CodeHeap* heap = get_code_heap(cb); 591 if (cb->is_nmethod()) { 592 heap->set_nmethod_count(heap->nmethod_count() + 1); 593 if (((nmethod *)cb)->has_dependencies()) { 594 _number_of_nmethods_with_dependencies++; 595 } 596 } 597 if (cb->is_adapter_blob()) { 598 heap->set_adapter_count(heap->adapter_count() + 1); 599 } 600 601 // flush the hardware I-cache 602 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 603 } 604 605 bool CodeCache::contains(void *p) { 606 // S390 uses contains() in current_frame(), which is used before 607 // code cache initialization if NativeMemoryTracking=detail is set. 608 S390_ONLY(if (_heaps == NULL) return false;) 609 // It should be ok to call contains without holding a lock. 610 FOR_ALL_HEAPS(heap) { 611 if ((*heap)->contains(p)) { 612 return true; 613 } 614 } 615 return false; 616 } 617 618 bool CodeCache::contains(nmethod *nm) { 619 return contains((void *)nm); 620 } 621 622 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 623 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 624 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 625 CodeBlob* CodeCache::find_blob(void* start) { 626 CodeBlob* result = find_blob_unsafe(start); 627 // We could potentially look up non_entrant methods 628 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 629 return result; 630 } 631 632 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 633 // what you are doing) 634 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 635 // NMT can walk the stack before code cache is created 636 if (_heaps != NULL) { 637 CodeHeap* heap = get_code_heap_containing(start); 638 if (heap != NULL) { 639 return heap->find_blob_unsafe(start); 640 } 641 } 642 return NULL; 643 } 644 645 nmethod* CodeCache::find_nmethod(void* start) { 646 CodeBlob* cb = find_blob(start); 647 assert(cb->is_nmethod(), "did not find an nmethod"); 648 return (nmethod*)cb; 649 } 650 651 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 652 assert_locked_or_safepoint(CodeCache_lock); 653 FOR_ALL_HEAPS(heap) { 654 FOR_ALL_BLOBS(cb, *heap) { 655 f(cb); 656 } 657 } 658 } 659 660 void CodeCache::nmethods_do(void f(nmethod* nm)) { 661 assert_locked_or_safepoint(CodeCache_lock); 662 NMethodIterator iter; 663 while(iter.next()) { 664 f(iter.method()); 665 } 666 } 667 668 void CodeCache::metadata_do(void f(Metadata* m)) { 669 assert_locked_or_safepoint(CodeCache_lock); 670 NMethodIterator iter; 671 while(iter.next_alive()) { 672 iter.method()->metadata_do(f); 673 } 674 AOTLoader::metadata_do(f); 675 } 676 677 int CodeCache::alignment_unit() { 678 return (int)_heaps->first()->alignment_unit(); 679 } 680 681 int CodeCache::alignment_offset() { 682 return (int)_heaps->first()->alignment_offset(); 683 } 684 685 // Mark nmethods for unloading if they contain otherwise unreachable oops. 686 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 687 assert_locked_or_safepoint(CodeCache_lock); 688 CompiledMethodIterator iter; 689 while(iter.next_alive()) { 690 iter.method()->do_unloading(is_alive, unloading_occurred); 691 } 692 } 693 694 void CodeCache::blobs_do(CodeBlobClosure* f) { 695 assert_locked_or_safepoint(CodeCache_lock); 696 FOR_ALL_ALLOCABLE_HEAPS(heap) { 697 FOR_ALL_BLOBS(cb, *heap) { 698 if (cb->is_alive()) { 699 f->do_code_blob(cb); 700 #ifdef ASSERT 701 if (cb->is_nmethod()) { 702 Universe::heap()->verify_nmethod((nmethod*)cb); 703 } 704 #endif //ASSERT 705 } 706 } 707 } 708 } 709 710 // Walk the list of methods which might contain oops to the java heap. 711 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 712 assert_locked_or_safepoint(CodeCache_lock); 713 714 const bool fix_relocations = f->fix_relocations(); 715 debug_only(mark_scavenge_root_nmethods()); 716 717 nmethod* prev = NULL; 718 nmethod* cur = scavenge_root_nmethods(); 719 while (cur != NULL) { 720 debug_only(cur->clear_scavenge_root_marked()); 721 assert(cur->scavenge_root_not_marked(), ""); 722 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 723 724 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 725 if (TraceScavenge) { 726 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 727 } 728 if (is_live) { 729 // Perform cur->oops_do(f), maybe just once per nmethod. 730 f->do_code_blob(cur); 731 } 732 nmethod* const next = cur->scavenge_root_link(); 733 // The scavengable nmethod list must contain all methods with scavengable 734 // oops. It is safe to include more nmethod on the list, but we do not 735 // expect any live non-scavengable nmethods on the list. 736 if (fix_relocations) { 737 if (!is_live || !cur->detect_scavenge_root_oops()) { 738 unlink_scavenge_root_nmethod(cur, prev); 739 } else { 740 prev = cur; 741 } 742 } 743 cur = next; 744 } 745 746 // Check for stray marks. 747 debug_only(verify_perm_nmethods(NULL)); 748 } 749 750 void CodeCache::register_scavenge_root_nmethod(nmethod* nm) { 751 assert_locked_or_safepoint(CodeCache_lock); 752 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) { 753 add_scavenge_root_nmethod(nm); 754 } 755 } 756 757 void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) { 758 nm->verify_scavenge_root_oops(); 759 } 760 761 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 762 assert_locked_or_safepoint(CodeCache_lock); 763 764 nm->set_on_scavenge_root_list(); 765 nm->set_scavenge_root_link(_scavenge_root_nmethods); 766 set_scavenge_root_nmethods(nm); 767 print_trace("add_scavenge_root", nm); 768 } 769 770 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 771 assert_locked_or_safepoint(CodeCache_lock); 772 773 assert((prev == NULL && scavenge_root_nmethods() == nm) || 774 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 775 776 print_trace("unlink_scavenge_root", nm); 777 if (prev == NULL) { 778 set_scavenge_root_nmethods(nm->scavenge_root_link()); 779 } else { 780 prev->set_scavenge_root_link(nm->scavenge_root_link()); 781 } 782 nm->set_scavenge_root_link(NULL); 783 nm->clear_on_scavenge_root_list(); 784 } 785 786 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 787 assert_locked_or_safepoint(CodeCache_lock); 788 789 print_trace("drop_scavenge_root", nm); 790 nmethod* prev = NULL; 791 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 792 if (cur == nm) { 793 unlink_scavenge_root_nmethod(cur, prev); 794 return; 795 } 796 prev = cur; 797 } 798 assert(false, "should have been on list"); 799 } 800 801 void CodeCache::prune_scavenge_root_nmethods() { 802 assert_locked_or_safepoint(CodeCache_lock); 803 804 debug_only(mark_scavenge_root_nmethods()); 805 806 nmethod* last = NULL; 807 nmethod* cur = scavenge_root_nmethods(); 808 while (cur != NULL) { 809 nmethod* next = cur->scavenge_root_link(); 810 debug_only(cur->clear_scavenge_root_marked()); 811 assert(cur->scavenge_root_not_marked(), ""); 812 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 813 814 if (!cur->is_zombie() && !cur->is_unloaded() 815 && cur->detect_scavenge_root_oops()) { 816 // Keep it. Advance 'last' to prevent deletion. 817 last = cur; 818 } else { 819 // Prune it from the list, so we don't have to look at it any more. 820 print_trace("prune_scavenge_root", cur); 821 unlink_scavenge_root_nmethod(cur, last); 822 } 823 cur = next; 824 } 825 826 // Check for stray marks. 827 debug_only(verify_perm_nmethods(NULL)); 828 } 829 830 #ifndef PRODUCT 831 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 832 // While we are here, verify the integrity of the list. 833 mark_scavenge_root_nmethods(); 834 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 835 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 836 cur->clear_scavenge_root_marked(); 837 } 838 verify_perm_nmethods(f); 839 } 840 841 // Temporarily mark nmethods that are claimed to be on the scavenge list. 842 void CodeCache::mark_scavenge_root_nmethods() { 843 NMethodIterator iter; 844 while(iter.next_alive()) { 845 nmethod* nm = iter.method(); 846 assert(nm->scavenge_root_not_marked(), "clean state"); 847 if (nm->on_scavenge_root_list()) 848 nm->set_scavenge_root_marked(); 849 } 850 } 851 852 // If the closure is given, run it on the unlisted nmethods. 853 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 854 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 855 NMethodIterator iter; 856 while(iter.next_alive()) { 857 nmethod* nm = iter.method(); 858 bool call_f = (f_or_null != NULL); 859 assert(nm->scavenge_root_not_marked(), "must be already processed"); 860 if (nm->on_scavenge_root_list()) 861 call_f = false; // don't show this one to the client 862 Universe::heap()->verify_nmethod(nm); 863 if (call_f) f_or_null->do_code_blob(nm); 864 } 865 } 866 #endif //PRODUCT 867 868 void CodeCache::verify_clean_inline_caches() { 869 #ifdef ASSERT 870 NMethodIterator iter; 871 while(iter.next_alive()) { 872 nmethod* nm = iter.method(); 873 assert(!nm->is_unloaded(), "Tautology"); 874 nm->verify_clean_inline_caches(); 875 nm->verify(); 876 } 877 #endif 878 } 879 880 void CodeCache::verify_icholder_relocations() { 881 #ifdef ASSERT 882 // make sure that we aren't leaking icholders 883 int count = 0; 884 FOR_ALL_HEAPS(heap) { 885 FOR_ALL_BLOBS(cb, *heap) { 886 CompiledMethod *nm = cb->as_compiled_method_or_null(); 887 if (nm != NULL) { 888 count += nm->verify_icholder_relocations(); 889 } 890 } 891 } 892 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 893 CompiledICHolder::live_count(), "must agree"); 894 #endif 895 } 896 897 void CodeCache::gc_prologue() { 898 } 899 900 void CodeCache::gc_epilogue() { 901 assert_locked_or_safepoint(CodeCache_lock); 902 NOT_DEBUG(if (needs_cache_clean())) { 903 CompiledMethodIterator iter; 904 while(iter.next_alive()) { 905 CompiledMethod* cm = iter.method(); 906 assert(!cm->is_unloaded(), "Tautology"); 907 DEBUG_ONLY(if (needs_cache_clean())) { 908 cm->cleanup_inline_caches(); 909 } 910 DEBUG_ONLY(cm->verify()); 911 DEBUG_ONLY(cm->verify_oop_relocations()); 912 } 913 } 914 915 set_needs_cache_clean(false); 916 prune_scavenge_root_nmethods(); 917 918 verify_icholder_relocations(); 919 } 920 921 void CodeCache::verify_oops() { 922 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 923 VerifyOopClosure voc; 924 NMethodIterator iter; 925 while(iter.next_alive()) { 926 nmethod* nm = iter.method(); 927 nm->oops_do(&voc); 928 nm->verify_oop_relocations(); 929 } 930 } 931 932 int CodeCache::blob_count(int code_blob_type) { 933 CodeHeap* heap = get_code_heap(code_blob_type); 934 return (heap != NULL) ? heap->blob_count() : 0; 935 } 936 937 int CodeCache::blob_count() { 938 int count = 0; 939 FOR_ALL_HEAPS(heap) { 940 count += (*heap)->blob_count(); 941 } 942 return count; 943 } 944 945 int CodeCache::nmethod_count(int code_blob_type) { 946 CodeHeap* heap = get_code_heap(code_blob_type); 947 return (heap != NULL) ? heap->nmethod_count() : 0; 948 } 949 950 int CodeCache::nmethod_count() { 951 int count = 0; 952 FOR_ALL_NMETHOD_HEAPS(heap) { 953 count += (*heap)->nmethod_count(); 954 } 955 return count; 956 } 957 958 int CodeCache::adapter_count(int code_blob_type) { 959 CodeHeap* heap = get_code_heap(code_blob_type); 960 return (heap != NULL) ? heap->adapter_count() : 0; 961 } 962 963 int CodeCache::adapter_count() { 964 int count = 0; 965 FOR_ALL_HEAPS(heap) { 966 count += (*heap)->adapter_count(); 967 } 968 return count; 969 } 970 971 address CodeCache::low_bound(int code_blob_type) { 972 CodeHeap* heap = get_code_heap(code_blob_type); 973 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 974 } 975 976 address CodeCache::high_bound(int code_blob_type) { 977 CodeHeap* heap = get_code_heap(code_blob_type); 978 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 979 } 980 981 size_t CodeCache::capacity() { 982 size_t cap = 0; 983 FOR_ALL_ALLOCABLE_HEAPS(heap) { 984 cap += (*heap)->capacity(); 985 } 986 return cap; 987 } 988 989 size_t CodeCache::unallocated_capacity(int code_blob_type) { 990 CodeHeap* heap = get_code_heap(code_blob_type); 991 return (heap != NULL) ? heap->unallocated_capacity() : 0; 992 } 993 994 size_t CodeCache::unallocated_capacity() { 995 size_t unallocated_cap = 0; 996 FOR_ALL_ALLOCABLE_HEAPS(heap) { 997 unallocated_cap += (*heap)->unallocated_capacity(); 998 } 999 return unallocated_cap; 1000 } 1001 1002 size_t CodeCache::max_capacity() { 1003 size_t max_cap = 0; 1004 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1005 max_cap += (*heap)->max_capacity(); 1006 } 1007 return max_cap; 1008 } 1009 1010 /** 1011 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 1012 * is free, reverse_free_ratio() returns 4. 1013 */ 1014 double CodeCache::reverse_free_ratio(int code_blob_type) { 1015 CodeHeap* heap = get_code_heap(code_blob_type); 1016 if (heap == NULL) { 1017 return 0; 1018 } 1019 1020 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1021 double max_capacity = (double)heap->max_capacity(); 1022 double result = max_capacity / unallocated_capacity; 1023 assert (max_capacity >= unallocated_capacity, "Must be"); 1024 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1025 return result; 1026 } 1027 1028 size_t CodeCache::bytes_allocated_in_freelists() { 1029 size_t allocated_bytes = 0; 1030 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1031 allocated_bytes += (*heap)->allocated_in_freelist(); 1032 } 1033 return allocated_bytes; 1034 } 1035 1036 int CodeCache::allocated_segments() { 1037 int number_of_segments = 0; 1038 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1039 number_of_segments += (*heap)->allocated_segments(); 1040 } 1041 return number_of_segments; 1042 } 1043 1044 size_t CodeCache::freelists_length() { 1045 size_t length = 0; 1046 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1047 length += (*heap)->freelist_length(); 1048 } 1049 return length; 1050 } 1051 1052 void icache_init(); 1053 1054 void CodeCache::initialize() { 1055 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1056 #ifdef COMPILER2 1057 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1058 #endif 1059 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1060 // This was originally just a check of the alignment, causing failure, instead, round 1061 // the code cache to the page size. In particular, Solaris is moving to a larger 1062 // default page size. 1063 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1064 1065 if (SegmentedCodeCache) { 1066 // Use multiple code heaps 1067 initialize_heaps(); 1068 } else { 1069 // Use a single code heap 1070 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1071 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1072 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1073 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1074 add_heap(rs, "CodeCache", CodeBlobType::All); 1075 } 1076 1077 // Initialize ICache flush mechanism 1078 // This service is needed for os::register_code_area 1079 icache_init(); 1080 1081 // Give OS a chance to register generated code area. 1082 // This is used on Windows 64 bit platforms to register 1083 // Structured Exception Handlers for our generated code. 1084 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1085 } 1086 1087 void codeCache_init() { 1088 CodeCache::initialize(); 1089 // Load AOT libraries and add AOT code heaps. 1090 AOTLoader::initialize(); 1091 } 1092 1093 //------------------------------------------------------------------------------------------------ 1094 1095 int CodeCache::number_of_nmethods_with_dependencies() { 1096 return _number_of_nmethods_with_dependencies; 1097 } 1098 1099 void CodeCache::clear_inline_caches() { 1100 assert_locked_or_safepoint(CodeCache_lock); 1101 CompiledMethodIterator iter; 1102 while(iter.next_alive()) { 1103 iter.method()->clear_inline_caches(); 1104 } 1105 } 1106 1107 void CodeCache::cleanup_inline_caches() { 1108 assert_locked_or_safepoint(CodeCache_lock); 1109 NMethodIterator iter; 1110 while(iter.next_alive()) { 1111 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1112 } 1113 } 1114 1115 // Keeps track of time spent for checking dependencies 1116 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1117 1118 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1119 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1120 int number_of_marked_CodeBlobs = 0; 1121 1122 // search the hierarchy looking for nmethods which are affected by the loading of this class 1123 1124 // then search the interfaces this class implements looking for nmethods 1125 // which might be dependent of the fact that an interface only had one 1126 // implementor. 1127 // nmethod::check_all_dependencies works only correctly, if no safepoint 1128 // can happen 1129 NoSafepointVerifier nsv; 1130 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1131 Klass* d = str.klass(); 1132 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1133 } 1134 1135 #ifndef PRODUCT 1136 if (VerifyDependencies) { 1137 // Object pointers are used as unique identifiers for dependency arguments. This 1138 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1139 dependentCheckTime.start(); 1140 nmethod::check_all_dependencies(changes); 1141 dependentCheckTime.stop(); 1142 } 1143 #endif 1144 1145 return number_of_marked_CodeBlobs; 1146 } 1147 1148 CompiledMethod* CodeCache::find_compiled(void* start) { 1149 CodeBlob *cb = find_blob(start); 1150 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1151 return (CompiledMethod*)cb; 1152 } 1153 1154 bool CodeCache::is_far_target(address target) { 1155 #if INCLUDE_AOT 1156 return NativeCall::is_far_call(_low_bound, target) || 1157 NativeCall::is_far_call(_high_bound, target); 1158 #else 1159 return false; 1160 #endif 1161 } 1162 1163 #ifdef HOTSWAP 1164 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1165 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1166 int number_of_marked_CodeBlobs = 0; 1167 1168 // Deoptimize all methods of the evolving class itself 1169 Array<Method*>* old_methods = dependee->methods(); 1170 for (int i = 0; i < old_methods->length(); i++) { 1171 ResourceMark rm; 1172 Method* old_method = old_methods->at(i); 1173 CompiledMethod* nm = old_method->code(); 1174 if (nm != NULL) { 1175 nm->mark_for_deoptimization(); 1176 number_of_marked_CodeBlobs++; 1177 } 1178 } 1179 1180 CompiledMethodIterator iter; 1181 while(iter.next_alive()) { 1182 CompiledMethod* nm = iter.method(); 1183 if (nm->is_marked_for_deoptimization()) { 1184 // ...Already marked in the previous pass; don't count it again. 1185 } else if (nm->is_evol_dependent_on(dependee)) { 1186 ResourceMark rm; 1187 nm->mark_for_deoptimization(); 1188 number_of_marked_CodeBlobs++; 1189 } else { 1190 // flush caches in case they refer to a redefined Method* 1191 nm->clear_inline_caches(); 1192 } 1193 } 1194 1195 return number_of_marked_CodeBlobs; 1196 } 1197 #endif // HOTSWAP 1198 1199 1200 // Deoptimize all methods 1201 void CodeCache::mark_all_nmethods_for_deoptimization() { 1202 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1203 CompiledMethodIterator iter; 1204 while(iter.next_alive()) { 1205 CompiledMethod* nm = iter.method(); 1206 if (!nm->method()->is_method_handle_intrinsic()) { 1207 nm->mark_for_deoptimization(); 1208 } 1209 } 1210 } 1211 1212 int CodeCache::mark_for_deoptimization(Method* dependee) { 1213 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1214 int number_of_marked_CodeBlobs = 0; 1215 1216 CompiledMethodIterator iter; 1217 while(iter.next_alive()) { 1218 CompiledMethod* nm = iter.method(); 1219 if (nm->is_dependent_on_method(dependee)) { 1220 ResourceMark rm; 1221 nm->mark_for_deoptimization(); 1222 number_of_marked_CodeBlobs++; 1223 } 1224 } 1225 1226 return number_of_marked_CodeBlobs; 1227 } 1228 1229 void CodeCache::make_marked_nmethods_not_entrant() { 1230 assert_locked_or_safepoint(CodeCache_lock); 1231 CompiledMethodIterator iter; 1232 while(iter.next_alive()) { 1233 CompiledMethod* nm = iter.method(); 1234 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1235 nm->make_not_entrant(); 1236 } 1237 } 1238 } 1239 1240 // Flushes compiled methods dependent on dependee. 1241 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1242 assert_lock_strong(Compile_lock); 1243 1244 if (number_of_nmethods_with_dependencies() == 0) return; 1245 1246 // CodeCache can only be updated by a thread_in_VM and they will all be 1247 // stopped during the safepoint so CodeCache will be safe to update without 1248 // holding the CodeCache_lock. 1249 1250 KlassDepChange changes(dependee); 1251 1252 // Compute the dependent nmethods 1253 if (mark_for_deoptimization(changes) > 0) { 1254 // At least one nmethod has been marked for deoptimization 1255 VM_Deoptimize op; 1256 VMThread::execute(&op); 1257 } 1258 } 1259 1260 #ifdef HOTSWAP 1261 // Flushes compiled methods dependent on dependee in the evolutionary sense 1262 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1263 // --- Compile_lock is not held. However we are at a safepoint. 1264 assert_locked_or_safepoint(Compile_lock); 1265 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1266 1267 // CodeCache can only be updated by a thread_in_VM and they will all be 1268 // stopped during the safepoint so CodeCache will be safe to update without 1269 // holding the CodeCache_lock. 1270 1271 // Compute the dependent nmethods 1272 if (mark_for_evol_deoptimization(ev_k) > 0) { 1273 // At least one nmethod has been marked for deoptimization 1274 1275 // All this already happens inside a VM_Operation, so we'll do all the work here. 1276 // Stuff copied from VM_Deoptimize and modified slightly. 1277 1278 // We do not want any GCs to happen while we are in the middle of this VM operation 1279 ResourceMark rm; 1280 DeoptimizationMarker dm; 1281 1282 // Deoptimize all activations depending on marked nmethods 1283 Deoptimization::deoptimize_dependents(); 1284 1285 // Make the dependent methods not entrant 1286 make_marked_nmethods_not_entrant(); 1287 } 1288 } 1289 #endif // HOTSWAP 1290 1291 1292 // Flushes compiled methods dependent on dependee 1293 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1294 // --- Compile_lock is not held. However we are at a safepoint. 1295 assert_locked_or_safepoint(Compile_lock); 1296 1297 // CodeCache can only be updated by a thread_in_VM and they will all be 1298 // stopped dring the safepoint so CodeCache will be safe to update without 1299 // holding the CodeCache_lock. 1300 1301 // Compute the dependent nmethods 1302 if (mark_for_deoptimization(m_h()) > 0) { 1303 // At least one nmethod has been marked for deoptimization 1304 1305 // All this already happens inside a VM_Operation, so we'll do all the work here. 1306 // Stuff copied from VM_Deoptimize and modified slightly. 1307 1308 // We do not want any GCs to happen while we are in the middle of this VM operation 1309 ResourceMark rm; 1310 DeoptimizationMarker dm; 1311 1312 // Deoptimize all activations depending on marked nmethods 1313 Deoptimization::deoptimize_dependents(); 1314 1315 // Make the dependent methods not entrant 1316 make_marked_nmethods_not_entrant(); 1317 } 1318 } 1319 1320 void CodeCache::verify() { 1321 assert_locked_or_safepoint(CodeCache_lock); 1322 FOR_ALL_HEAPS(heap) { 1323 (*heap)->verify(); 1324 FOR_ALL_BLOBS(cb, *heap) { 1325 if (cb->is_alive()) { 1326 cb->verify(); 1327 } 1328 } 1329 } 1330 } 1331 1332 // A CodeHeap is full. Print out warning and report event. 1333 PRAGMA_DIAG_PUSH 1334 PRAGMA_FORMAT_NONLITERAL_IGNORED 1335 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1336 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1337 CodeHeap* heap = get_code_heap(code_blob_type); 1338 assert(heap != NULL, "heap is null"); 1339 1340 if ((heap->full_count() == 0) || print) { 1341 // Not yet reported for this heap, report 1342 if (SegmentedCodeCache) { 1343 ResourceMark rm; 1344 stringStream msg1_stream, msg2_stream; 1345 msg1_stream.print("%s is full. Compiler has been disabled.", 1346 get_code_heap_name(code_blob_type)); 1347 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1348 get_code_heap_flag_name(code_blob_type)); 1349 const char *msg1 = msg1_stream.as_string(); 1350 const char *msg2 = msg2_stream.as_string(); 1351 1352 log_warning(codecache)(msg1); 1353 log_warning(codecache)(msg2); 1354 warning(msg1); 1355 warning(msg2); 1356 } else { 1357 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1358 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1359 1360 log_warning(codecache)(msg1); 1361 log_warning(codecache)(msg2); 1362 warning(msg1); 1363 warning(msg2); 1364 } 1365 ResourceMark rm; 1366 stringStream s; 1367 // Dump code cache into a buffer before locking the tty, 1368 { 1369 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1370 print_summary(&s); 1371 } 1372 ttyLocker ttyl; 1373 tty->print("%s", s.as_string()); 1374 } 1375 1376 heap->report_full(); 1377 1378 EventCodeCacheFull event; 1379 if (event.should_commit()) { 1380 event.set_codeBlobType((u1)code_blob_type); 1381 event.set_startAddress((u8)heap->low_boundary()); 1382 event.set_commitedTopAddress((u8)heap->high()); 1383 event.set_reservedTopAddress((u8)heap->high_boundary()); 1384 event.set_entryCount(heap->blob_count()); 1385 event.set_methodCount(heap->nmethod_count()); 1386 event.set_adaptorCount(heap->adapter_count()); 1387 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1388 event.set_fullCount(heap->full_count()); 1389 event.commit(); 1390 } 1391 } 1392 PRAGMA_DIAG_POP 1393 1394 void CodeCache::print_memory_overhead() { 1395 size_t wasted_bytes = 0; 1396 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1397 CodeHeap* curr_heap = *heap; 1398 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1399 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1400 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1401 } 1402 } 1403 // Print bytes that are allocated in the freelist 1404 ttyLocker ttl; 1405 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1406 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1407 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1408 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1409 } 1410 1411 //------------------------------------------------------------------------------------------------ 1412 // Non-product version 1413 1414 #ifndef PRODUCT 1415 1416 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1417 if (PrintCodeCache2) { // Need to add a new flag 1418 ResourceMark rm; 1419 if (size == 0) size = cb->size(); 1420 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1421 } 1422 } 1423 1424 void CodeCache::print_internals() { 1425 int nmethodCount = 0; 1426 int runtimeStubCount = 0; 1427 int adapterCount = 0; 1428 int deoptimizationStubCount = 0; 1429 int uncommonTrapStubCount = 0; 1430 int bufferBlobCount = 0; 1431 int total = 0; 1432 int nmethodAlive = 0; 1433 int nmethodNotEntrant = 0; 1434 int nmethodZombie = 0; 1435 int nmethodUnloaded = 0; 1436 int nmethodJava = 0; 1437 int nmethodNative = 0; 1438 int max_nm_size = 0; 1439 ResourceMark rm; 1440 1441 int i = 0; 1442 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1443 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1444 tty->print_cr("-- %s --", (*heap)->name()); 1445 } 1446 FOR_ALL_BLOBS(cb, *heap) { 1447 total++; 1448 if (cb->is_nmethod()) { 1449 nmethod* nm = (nmethod*)cb; 1450 1451 if (Verbose && nm->method() != NULL) { 1452 ResourceMark rm; 1453 char *method_name = nm->method()->name_and_sig_as_C_string(); 1454 tty->print("%s", method_name); 1455 if(nm->is_alive()) { tty->print_cr(" alive"); } 1456 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1457 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1458 } 1459 1460 nmethodCount++; 1461 1462 if(nm->is_alive()) { nmethodAlive++; } 1463 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1464 if(nm->is_zombie()) { nmethodZombie++; } 1465 if(nm->is_unloaded()) { nmethodUnloaded++; } 1466 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1467 1468 if(nm->method() != NULL && nm->is_java_method()) { 1469 nmethodJava++; 1470 max_nm_size = MAX2(max_nm_size, nm->size()); 1471 } 1472 } else if (cb->is_runtime_stub()) { 1473 runtimeStubCount++; 1474 } else if (cb->is_deoptimization_stub()) { 1475 deoptimizationStubCount++; 1476 } else if (cb->is_uncommon_trap_stub()) { 1477 uncommonTrapStubCount++; 1478 } else if (cb->is_adapter_blob()) { 1479 adapterCount++; 1480 } else if (cb->is_buffer_blob()) { 1481 bufferBlobCount++; 1482 } 1483 } 1484 } 1485 1486 int bucketSize = 512; 1487 int bucketLimit = max_nm_size / bucketSize + 1; 1488 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1489 memset(buckets, 0, sizeof(int) * bucketLimit); 1490 1491 NMethodIterator iter; 1492 while(iter.next()) { 1493 nmethod* nm = iter.method(); 1494 if(nm->method() != NULL && nm->is_java_method()) { 1495 buckets[nm->size() / bucketSize]++; 1496 } 1497 } 1498 1499 tty->print_cr("Code Cache Entries (total of %d)",total); 1500 tty->print_cr("-------------------------------------------------"); 1501 tty->print_cr("nmethods: %d",nmethodCount); 1502 tty->print_cr("\talive: %d",nmethodAlive); 1503 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1504 tty->print_cr("\tzombie: %d",nmethodZombie); 1505 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1506 tty->print_cr("\tjava: %d",nmethodJava); 1507 tty->print_cr("\tnative: %d",nmethodNative); 1508 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1509 tty->print_cr("adapters: %d",adapterCount); 1510 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1511 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1512 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1513 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1514 tty->print_cr("-------------------------------------------------"); 1515 1516 for(int i=0; i<bucketLimit; i++) { 1517 if(buckets[i] != 0) { 1518 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1519 tty->fill_to(40); 1520 tty->print_cr("%d",buckets[i]); 1521 } 1522 } 1523 1524 FREE_C_HEAP_ARRAY(int, buckets); 1525 print_memory_overhead(); 1526 } 1527 1528 #endif // !PRODUCT 1529 1530 void CodeCache::print() { 1531 print_summary(tty); 1532 1533 #ifndef PRODUCT 1534 if (!Verbose) return; 1535 1536 CodeBlob_sizes live; 1537 CodeBlob_sizes dead; 1538 1539 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1540 FOR_ALL_BLOBS(cb, *heap) { 1541 if (!cb->is_alive()) { 1542 dead.add(cb); 1543 } else { 1544 live.add(cb); 1545 } 1546 } 1547 } 1548 1549 tty->print_cr("CodeCache:"); 1550 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1551 1552 if (!live.is_empty()) { 1553 live.print("live"); 1554 } 1555 if (!dead.is_empty()) { 1556 dead.print("dead"); 1557 } 1558 1559 if (WizardMode) { 1560 // print the oop_map usage 1561 int code_size = 0; 1562 int number_of_blobs = 0; 1563 int number_of_oop_maps = 0; 1564 int map_size = 0; 1565 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1566 FOR_ALL_BLOBS(cb, *heap) { 1567 if (cb->is_alive()) { 1568 number_of_blobs++; 1569 code_size += cb->code_size(); 1570 ImmutableOopMapSet* set = cb->oop_maps(); 1571 if (set != NULL) { 1572 number_of_oop_maps += set->count(); 1573 map_size += set->nr_of_bytes(); 1574 } 1575 } 1576 } 1577 } 1578 tty->print_cr("OopMaps"); 1579 tty->print_cr(" #blobs = %d", number_of_blobs); 1580 tty->print_cr(" code size = %d", code_size); 1581 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1582 tty->print_cr(" map size = %d", map_size); 1583 } 1584 1585 #endif // !PRODUCT 1586 } 1587 1588 void CodeCache::print_summary(outputStream* st, bool detailed) { 1589 FOR_ALL_HEAPS(heap_iterator) { 1590 CodeHeap* heap = (*heap_iterator); 1591 size_t total = (heap->high_boundary() - heap->low_boundary()); 1592 if (_heaps->length() >= 1) { 1593 st->print("%s:", heap->name()); 1594 } else { 1595 st->print("CodeCache:"); 1596 } 1597 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1598 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1599 total/K, (total - heap->unallocated_capacity())/K, 1600 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1601 1602 if (detailed) { 1603 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1604 p2i(heap->low_boundary()), 1605 p2i(heap->high()), 1606 p2i(heap->high_boundary())); 1607 } 1608 } 1609 1610 if (detailed) { 1611 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1612 " adapters=" UINT32_FORMAT, 1613 blob_count(), nmethod_count(), adapter_count()); 1614 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1615 "enabled" : Arguments::mode() == Arguments::_int ? 1616 "disabled (interpreter mode)" : 1617 "disabled (not enough contiguous free space left)"); 1618 } 1619 } 1620 1621 void CodeCache::print_codelist(outputStream* st) { 1622 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1623 1624 CompiledMethodIterator iter; 1625 while (iter.next_alive()) { 1626 CompiledMethod* cm = iter.method(); 1627 ResourceMark rm; 1628 char* method_name = cm->method()->name_and_sig_as_C_string(); 1629 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1630 cm->compile_id(), cm->comp_level(), cm->get_state(), 1631 method_name, 1632 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1633 } 1634 } 1635 1636 void CodeCache::print_layout(outputStream* st) { 1637 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1638 ResourceMark rm; 1639 print_summary(st, true); 1640 } 1641 1642 void CodeCache::log_state(outputStream* st) { 1643 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1644 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1645 blob_count(), nmethod_count(), adapter_count(), 1646 unallocated_capacity()); 1647 }