1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "gc/shared/gcLocker.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.inline.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/verifyOopClosure.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/compilationPolicy.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/icache.hpp" 48 #include "runtime/java.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/sweeper.hpp" 51 #include "services/memoryService.hpp" 52 #include "trace/tracing.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/vmError.hpp" 55 #include "utilities/xmlstream.hpp" 56 #ifdef COMPILER1 57 #include "c1/c1_Compilation.hpp" 58 #include "c1/c1_Compiler.hpp" 59 #endif 60 #ifdef COMPILER2 61 #include "opto/c2compiler.hpp" 62 #include "opto/compile.hpp" 63 #include "opto/node.hpp" 64 #endif 65 66 // Helper class for printing in CodeCache 67 class CodeBlob_sizes { 68 private: 69 int count; 70 int total_size; 71 int header_size; 72 int code_size; 73 int stub_size; 74 int relocation_size; 75 int scopes_oop_size; 76 int scopes_metadata_size; 77 int scopes_data_size; 78 int scopes_pcs_size; 79 80 public: 81 CodeBlob_sizes() { 82 count = 0; 83 total_size = 0; 84 header_size = 0; 85 code_size = 0; 86 stub_size = 0; 87 relocation_size = 0; 88 scopes_oop_size = 0; 89 scopes_metadata_size = 0; 90 scopes_data_size = 0; 91 scopes_pcs_size = 0; 92 } 93 94 int total() { return total_size; } 95 bool is_empty() { return count == 0; } 96 97 void print(const char* title) { 98 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 99 count, 100 title, 101 (int)(total() / K), 102 header_size * 100 / total_size, 103 relocation_size * 100 / total_size, 104 code_size * 100 / total_size, 105 stub_size * 100 / total_size, 106 scopes_oop_size * 100 / total_size, 107 scopes_metadata_size * 100 / total_size, 108 scopes_data_size * 100 / total_size, 109 scopes_pcs_size * 100 / total_size); 110 } 111 112 void add(CodeBlob* cb) { 113 count++; 114 total_size += cb->size(); 115 header_size += cb->header_size(); 116 relocation_size += cb->relocation_size(); 117 if (cb->is_nmethod()) { 118 nmethod* nm = cb->as_nmethod_or_null(); 119 code_size += nm->insts_size(); 120 stub_size += nm->stub_size(); 121 122 scopes_oop_size += nm->oops_size(); 123 scopes_metadata_size += nm->metadata_size(); 124 scopes_data_size += nm->scopes_data_size(); 125 scopes_pcs_size += nm->scopes_pcs_size(); 126 } else { 127 code_size += cb->code_size(); 128 } 129 } 130 }; 131 132 // Iterate over all CodeHeaps 133 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 134 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 135 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 136 137 // Iterate over all CodeBlobs (cb) on the given CodeHeap 138 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 139 140 address CodeCache::_low_bound = 0; 141 address CodeCache::_high_bound = 0; 142 int CodeCache::_number_of_nmethods_with_dependencies = 0; 143 bool CodeCache::_needs_cache_clean = false; 144 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 145 146 // Initialize arrays of CodeHeap subsets 147 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 148 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 149 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 150 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 151 152 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 153 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 154 // Prepare error message 155 const char* error = "Invalid code heap sizes"; 156 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 157 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 158 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 159 160 if (total_size > cache_size) { 161 // Some code heap sizes were explicitly set: total_size must be <= cache_size 162 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 163 vm_exit_during_initialization(error, message); 164 } else if (all_set && total_size != cache_size) { 165 // All code heap sizes were explicitly set: total_size must equal cache_size 166 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 167 vm_exit_during_initialization(error, message); 168 } 169 } 170 171 void CodeCache::initialize_heaps() { 172 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 173 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 174 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 175 size_t min_size = os::vm_page_size(); 176 size_t cache_size = ReservedCodeCacheSize; 177 size_t non_nmethod_size = NonNMethodCodeHeapSize; 178 size_t profiled_size = ProfiledCodeHeapSize; 179 size_t non_profiled_size = NonProfiledCodeHeapSize; 180 // Check if total size set via command line flags exceeds the reserved size 181 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 182 (profiled_set ? profiled_size : min_size), 183 (non_profiled_set ? non_profiled_size : min_size), 184 cache_size, 185 non_nmethod_set && profiled_set && non_profiled_set); 186 187 // Determine size of compiler buffers 188 size_t code_buffers_size = 0; 189 #ifdef COMPILER1 190 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 191 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 192 code_buffers_size += c1_count * Compiler::code_buffer_size(); 193 #endif 194 #ifdef COMPILER2 195 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 196 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 197 // Initial size of constant table (this may be increased if a compiled method needs more space) 198 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 199 #endif 200 201 // Increase default non_nmethod_size to account for compiler buffers 202 if (!non_nmethod_set) { 203 non_nmethod_size += code_buffers_size; 204 } 205 // Calculate default CodeHeap sizes if not set by user 206 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 207 // Check if we have enough space for the non-nmethod code heap 208 if (cache_size > non_nmethod_size) { 209 // Use the default value for non_nmethod_size and one half of the 210 // remaining size for non-profiled and one half for profiled methods 211 size_t remaining_size = cache_size - non_nmethod_size; 212 profiled_size = remaining_size / 2; 213 non_profiled_size = remaining_size - profiled_size; 214 } else { 215 // Use all space for the non-nmethod heap and set other heaps to minimal size 216 non_nmethod_size = cache_size - 2 * min_size; 217 profiled_size = min_size; 218 non_profiled_size = min_size; 219 } 220 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 221 // The user explicitly set some code heap sizes. Increase or decrease the (default) 222 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 223 // code heap sizes and then only change non-nmethod code heap size if still necessary. 224 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 225 if (non_profiled_set) { 226 if (!profiled_set) { 227 // Adapt size of profiled code heap 228 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 229 // Not enough space available, set to minimum size 230 diff_size += profiled_size - min_size; 231 profiled_size = min_size; 232 } else { 233 profiled_size += diff_size; 234 diff_size = 0; 235 } 236 } 237 } else if (profiled_set) { 238 // Adapt size of non-profiled code heap 239 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 240 // Not enough space available, set to minimum size 241 diff_size += non_profiled_size - min_size; 242 non_profiled_size = min_size; 243 } else { 244 non_profiled_size += diff_size; 245 diff_size = 0; 246 } 247 } else if (non_nmethod_set) { 248 // Distribute remaining size between profiled and non-profiled code heaps 249 diff_size = cache_size - non_nmethod_size; 250 profiled_size = diff_size / 2; 251 non_profiled_size = diff_size - profiled_size; 252 diff_size = 0; 253 } 254 if (diff_size != 0) { 255 // Use non-nmethod code heap for remaining space requirements 256 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 257 non_nmethod_size += diff_size; 258 } 259 } 260 261 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 262 if (!heap_available(CodeBlobType::MethodProfiled)) { 263 non_profiled_size += profiled_size; 264 profiled_size = 0; 265 } 266 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 267 if (!heap_available(CodeBlobType::MethodNonProfiled)) { 268 non_nmethod_size += non_profiled_size; 269 non_profiled_size = 0; 270 } 271 // Make sure we have enough space for VM internal code 272 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 273 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 274 vm_exit_during_initialization(err_msg( 275 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 276 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 277 } 278 279 // Verify sizes and update flag values 280 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 281 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 282 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 283 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 284 285 // If large page support is enabled, align code heaps according to large 286 // page size to make sure that code cache is covered by large pages. 287 const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity()); 288 non_nmethod_size = align_up(non_nmethod_size, alignment); 289 profiled_size = align_down(profiled_size, alignment); 290 291 // Reserve one continuous chunk of memory for CodeHeaps and split it into 292 // parts for the individual heaps. The memory layout looks like this: 293 // ---------- high ----------- 294 // Non-profiled nmethods 295 // Profiled nmethods 296 // Non-nmethods 297 // ---------- low ------------ 298 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 299 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 300 ReservedSpace rest = rs.last_part(non_nmethod_size); 301 ReservedSpace profiled_space = rest.first_part(profiled_size); 302 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 303 304 // Non-nmethods (stubs, adapters, ...) 305 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 306 // Tier 2 and tier 3 (profiled) methods 307 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 308 // Tier 1 and tier 4 (non-profiled) methods and native methods 309 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 310 } 311 312 size_t CodeCache::page_size(bool aligned) { 313 if (os::can_execute_large_page_memory()) { 314 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) : 315 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8); 316 } else { 317 return os::vm_page_size(); 318 } 319 } 320 321 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 322 // Align and reserve space for code cache 323 const size_t rs_ps = page_size(); 324 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); 325 const size_t rs_size = align_up(size, rs_align); 326 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); 327 if (!rs.is_reserved()) { 328 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 329 rs_size/K)); 330 } 331 332 // Initialize bounds 333 _low_bound = (address)rs.base(); 334 _high_bound = _low_bound + rs.size(); 335 return rs; 336 } 337 338 // Heaps available for allocation 339 bool CodeCache::heap_available(int code_blob_type) { 340 if (!SegmentedCodeCache) { 341 // No segmentation: use a single code heap 342 return (code_blob_type == CodeBlobType::All); 343 } else if (Arguments::is_interpreter_only()) { 344 // Interpreter only: we don't need any method code heaps 345 return (code_blob_type == CodeBlobType::NonNMethod); 346 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 347 // Tiered compilation: use all code heaps 348 return (code_blob_type < CodeBlobType::All); 349 } else { 350 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 351 return (code_blob_type == CodeBlobType::NonNMethod) || 352 (code_blob_type == CodeBlobType::MethodNonProfiled); 353 } 354 } 355 356 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 357 switch(code_blob_type) { 358 case CodeBlobType::NonNMethod: 359 return "NonNMethodCodeHeapSize"; 360 break; 361 case CodeBlobType::MethodNonProfiled: 362 return "NonProfiledCodeHeapSize"; 363 break; 364 case CodeBlobType::MethodProfiled: 365 return "ProfiledCodeHeapSize"; 366 break; 367 } 368 ShouldNotReachHere(); 369 return NULL; 370 } 371 372 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 373 if (lhs->code_blob_type() == rhs->code_blob_type()) { 374 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 375 } else { 376 return lhs->code_blob_type() - rhs->code_blob_type(); 377 } 378 } 379 380 void CodeCache::add_heap(CodeHeap* heap) { 381 assert(!Universe::is_fully_initialized(), "late heap addition?"); 382 383 _heaps->insert_sorted<code_heap_compare>(heap); 384 385 int type = heap->code_blob_type(); 386 if (code_blob_type_accepts_compiled(type)) { 387 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 388 } 389 if (code_blob_type_accepts_nmethod(type)) { 390 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 391 } 392 if (code_blob_type_accepts_allocable(type)) { 393 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 394 } 395 } 396 397 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 398 // Check if heap is needed 399 if (!heap_available(code_blob_type)) { 400 return; 401 } 402 403 // Create CodeHeap 404 CodeHeap* heap = new CodeHeap(name, code_blob_type); 405 add_heap(heap); 406 407 // Reserve Space 408 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 409 size_initial = align_up(size_initial, os::vm_page_size()); 410 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 411 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 412 heap->name(), size_initial/K)); 413 } 414 415 // Register the CodeHeap 416 MemoryService::add_code_heap_memory_pool(heap, name); 417 } 418 419 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 420 FOR_ALL_HEAPS(heap) { 421 if ((*heap)->contains(start)) { 422 return *heap; 423 } 424 } 425 return NULL; 426 } 427 428 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 429 assert(cb != NULL, "CodeBlob is null"); 430 FOR_ALL_HEAPS(heap) { 431 if ((*heap)->contains_blob(cb)) { 432 return *heap; 433 } 434 } 435 ShouldNotReachHere(); 436 return NULL; 437 } 438 439 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 440 FOR_ALL_HEAPS(heap) { 441 if ((*heap)->accepts(code_blob_type)) { 442 return *heap; 443 } 444 } 445 return NULL; 446 } 447 448 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 449 assert_locked_or_safepoint(CodeCache_lock); 450 assert(heap != NULL, "heap is null"); 451 return (CodeBlob*)heap->first(); 452 } 453 454 CodeBlob* CodeCache::first_blob(int code_blob_type) { 455 if (heap_available(code_blob_type)) { 456 return first_blob(get_code_heap(code_blob_type)); 457 } else { 458 return NULL; 459 } 460 } 461 462 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 463 assert_locked_or_safepoint(CodeCache_lock); 464 assert(heap != NULL, "heap is null"); 465 return (CodeBlob*)heap->next(cb); 466 } 467 468 /** 469 * Do not seize the CodeCache lock here--if the caller has not 470 * already done so, we are going to lose bigtime, since the code 471 * cache will contain a garbage CodeBlob until the caller can 472 * run the constructor for the CodeBlob subclass he is busy 473 * instantiating. 474 */ 475 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 476 // Possibly wakes up the sweeper thread. 477 NMethodSweeper::notify(code_blob_type); 478 assert_locked_or_safepoint(CodeCache_lock); 479 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 480 if (size <= 0) { 481 return NULL; 482 } 483 CodeBlob* cb = NULL; 484 485 // Get CodeHeap for the given CodeBlobType 486 CodeHeap* heap = get_code_heap(code_blob_type); 487 assert(heap != NULL, "heap is null"); 488 489 while (true) { 490 cb = (CodeBlob*)heap->allocate(size); 491 if (cb != NULL) break; 492 if (!heap->expand_by(CodeCacheExpansionSize)) { 493 // Save original type for error reporting 494 if (orig_code_blob_type == CodeBlobType::All) { 495 orig_code_blob_type = code_blob_type; 496 } 497 // Expansion failed 498 if (SegmentedCodeCache) { 499 // Fallback solution: Try to store code in another code heap. 500 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 501 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 502 // and force stack scanning if less than 10% of the code heap are free. 503 int type = code_blob_type; 504 switch (type) { 505 case CodeBlobType::NonNMethod: 506 type = CodeBlobType::MethodNonProfiled; 507 break; 508 case CodeBlobType::MethodNonProfiled: 509 type = CodeBlobType::MethodProfiled; 510 break; 511 case CodeBlobType::MethodProfiled: 512 // Avoid loop if we already tried that code heap 513 if (type == orig_code_blob_type) { 514 type = CodeBlobType::MethodNonProfiled; 515 } 516 break; 517 } 518 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 519 if (PrintCodeCacheExtension) { 520 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 521 heap->name(), get_code_heap(type)->name()); 522 } 523 return allocate(size, type, orig_code_blob_type); 524 } 525 } 526 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 527 CompileBroker::handle_full_code_cache(orig_code_blob_type); 528 return NULL; 529 } 530 if (PrintCodeCacheExtension) { 531 ResourceMark rm; 532 if (_nmethod_heaps->length() >= 1) { 533 tty->print("%s", heap->name()); 534 } else { 535 tty->print("CodeCache"); 536 } 537 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 538 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 539 (address)heap->high() - (address)heap->low_boundary()); 540 } 541 } 542 print_trace("allocation", cb, size); 543 return cb; 544 } 545 546 void CodeCache::free(CodeBlob* cb) { 547 assert_locked_or_safepoint(CodeCache_lock); 548 CodeHeap* heap = get_code_heap(cb); 549 print_trace("free", cb); 550 if (cb->is_nmethod()) { 551 heap->set_nmethod_count(heap->nmethod_count() - 1); 552 if (((nmethod *)cb)->has_dependencies()) { 553 _number_of_nmethods_with_dependencies--; 554 } 555 } 556 if (cb->is_adapter_blob()) { 557 heap->set_adapter_count(heap->adapter_count() - 1); 558 } 559 560 // Get heap for given CodeBlob and deallocate 561 get_code_heap(cb)->deallocate(cb); 562 563 assert(heap->blob_count() >= 0, "sanity check"); 564 } 565 566 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 567 assert_locked_or_safepoint(CodeCache_lock); 568 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 569 print_trace("free_unused_tail", cb); 570 571 // We also have to account for the extra space (i.e. header) used by the CodeBlob 572 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 573 used += CodeBlob::align_code_offset(cb->header_size()); 574 575 // Get heap for given CodeBlob and deallocate its unused tail 576 get_code_heap(cb)->deallocate_tail(cb, used); 577 // Adjust the sizes of the CodeBlob 578 cb->adjust_size(used); 579 } 580 581 void CodeCache::commit(CodeBlob* cb) { 582 // this is called by nmethod::nmethod, which must already own CodeCache_lock 583 assert_locked_or_safepoint(CodeCache_lock); 584 CodeHeap* heap = get_code_heap(cb); 585 if (cb->is_nmethod()) { 586 heap->set_nmethod_count(heap->nmethod_count() + 1); 587 if (((nmethod *)cb)->has_dependencies()) { 588 _number_of_nmethods_with_dependencies++; 589 } 590 } 591 if (cb->is_adapter_blob()) { 592 heap->set_adapter_count(heap->adapter_count() + 1); 593 } 594 595 // flush the hardware I-cache 596 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 597 } 598 599 bool CodeCache::contains(void *p) { 600 // S390 uses contains() in current_frame(), which is used before 601 // code cache initialization if NativeMemoryTracking=detail is set. 602 S390_ONLY(if (_heaps == NULL) return false;) 603 // It should be ok to call contains without holding a lock. 604 FOR_ALL_HEAPS(heap) { 605 if ((*heap)->contains(p)) { 606 return true; 607 } 608 } 609 return false; 610 } 611 612 bool CodeCache::contains(nmethod *nm) { 613 return contains((void *)nm); 614 } 615 616 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 617 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 618 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 619 CodeBlob* CodeCache::find_blob(void* start) { 620 CodeBlob* result = find_blob_unsafe(start); 621 // We could potentially look up non_entrant methods 622 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 623 return result; 624 } 625 626 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 627 // what you are doing) 628 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 629 // NMT can walk the stack before code cache is created 630 if (_heaps != NULL) { 631 CodeHeap* heap = get_code_heap_containing(start); 632 if (heap != NULL) { 633 return heap->find_blob_unsafe(start); 634 } 635 } 636 return NULL; 637 } 638 639 nmethod* CodeCache::find_nmethod(void* start) { 640 CodeBlob* cb = find_blob(start); 641 assert(cb->is_nmethod(), "did not find an nmethod"); 642 return (nmethod*)cb; 643 } 644 645 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 646 assert_locked_or_safepoint(CodeCache_lock); 647 FOR_ALL_HEAPS(heap) { 648 FOR_ALL_BLOBS(cb, *heap) { 649 f(cb); 650 } 651 } 652 } 653 654 void CodeCache::nmethods_do(void f(nmethod* nm)) { 655 assert_locked_or_safepoint(CodeCache_lock); 656 NMethodIterator iter; 657 while(iter.next()) { 658 f(iter.method()); 659 } 660 } 661 662 void CodeCache::metadata_do(void f(Metadata* m)) { 663 assert_locked_or_safepoint(CodeCache_lock); 664 NMethodIterator iter; 665 while(iter.next_alive()) { 666 iter.method()->metadata_do(f); 667 } 668 AOTLoader::metadata_do(f); 669 } 670 671 int CodeCache::alignment_unit() { 672 return (int)_heaps->first()->alignment_unit(); 673 } 674 675 int CodeCache::alignment_offset() { 676 return (int)_heaps->first()->alignment_offset(); 677 } 678 679 // Mark nmethods for unloading if they contain otherwise unreachable oops. 680 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 681 assert_locked_or_safepoint(CodeCache_lock); 682 CompiledMethodIterator iter; 683 while(iter.next_alive()) { 684 iter.method()->do_unloading(is_alive, unloading_occurred); 685 } 686 } 687 688 void CodeCache::blobs_do(CodeBlobClosure* f) { 689 assert_locked_or_safepoint(CodeCache_lock); 690 FOR_ALL_ALLOCABLE_HEAPS(heap) { 691 FOR_ALL_BLOBS(cb, *heap) { 692 if (cb->is_alive()) { 693 f->do_code_blob(cb); 694 #ifdef ASSERT 695 if (cb->is_nmethod()) { 696 Universe::heap()->verify_nmethod((nmethod*)cb); 697 } 698 #endif //ASSERT 699 } 700 } 701 } 702 } 703 704 // Walk the list of methods which might contain oops to the java heap. 705 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 706 assert_locked_or_safepoint(CodeCache_lock); 707 708 const bool fix_relocations = f->fix_relocations(); 709 debug_only(mark_scavenge_root_nmethods()); 710 711 nmethod* prev = NULL; 712 nmethod* cur = scavenge_root_nmethods(); 713 while (cur != NULL) { 714 debug_only(cur->clear_scavenge_root_marked()); 715 assert(cur->scavenge_root_not_marked(), ""); 716 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 717 718 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 719 if (TraceScavenge) { 720 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 721 } 722 if (is_live) { 723 // Perform cur->oops_do(f), maybe just once per nmethod. 724 f->do_code_blob(cur); 725 } 726 nmethod* const next = cur->scavenge_root_link(); 727 // The scavengable nmethod list must contain all methods with scavengable 728 // oops. It is safe to include more nmethod on the list, but we do not 729 // expect any live non-scavengable nmethods on the list. 730 if (fix_relocations) { 731 if (!is_live || !cur->detect_scavenge_root_oops()) { 732 unlink_scavenge_root_nmethod(cur, prev); 733 } else { 734 prev = cur; 735 } 736 } 737 cur = next; 738 } 739 740 // Check for stray marks. 741 debug_only(verify_perm_nmethods(NULL)); 742 } 743 744 void CodeCache::register_scavenge_root_nmethod(nmethod* nm) { 745 assert_locked_or_safepoint(CodeCache_lock); 746 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) { 747 add_scavenge_root_nmethod(nm); 748 } 749 } 750 751 void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) { 752 nm->verify_scavenge_root_oops(); 753 } 754 755 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 756 assert_locked_or_safepoint(CodeCache_lock); 757 758 nm->set_on_scavenge_root_list(); 759 nm->set_scavenge_root_link(_scavenge_root_nmethods); 760 set_scavenge_root_nmethods(nm); 761 print_trace("add_scavenge_root", nm); 762 } 763 764 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 765 assert_locked_or_safepoint(CodeCache_lock); 766 767 assert((prev == NULL && scavenge_root_nmethods() == nm) || 768 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 769 770 print_trace("unlink_scavenge_root", nm); 771 if (prev == NULL) { 772 set_scavenge_root_nmethods(nm->scavenge_root_link()); 773 } else { 774 prev->set_scavenge_root_link(nm->scavenge_root_link()); 775 } 776 nm->set_scavenge_root_link(NULL); 777 nm->clear_on_scavenge_root_list(); 778 } 779 780 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 781 assert_locked_or_safepoint(CodeCache_lock); 782 783 print_trace("drop_scavenge_root", nm); 784 nmethod* prev = NULL; 785 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 786 if (cur == nm) { 787 unlink_scavenge_root_nmethod(cur, prev); 788 return; 789 } 790 prev = cur; 791 } 792 assert(false, "should have been on list"); 793 } 794 795 void CodeCache::prune_scavenge_root_nmethods() { 796 assert_locked_or_safepoint(CodeCache_lock); 797 798 debug_only(mark_scavenge_root_nmethods()); 799 800 nmethod* last = NULL; 801 nmethod* cur = scavenge_root_nmethods(); 802 while (cur != NULL) { 803 nmethod* next = cur->scavenge_root_link(); 804 debug_only(cur->clear_scavenge_root_marked()); 805 assert(cur->scavenge_root_not_marked(), ""); 806 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 807 808 if (!cur->is_zombie() && !cur->is_unloaded() 809 && cur->detect_scavenge_root_oops()) { 810 // Keep it. Advance 'last' to prevent deletion. 811 last = cur; 812 } else { 813 // Prune it from the list, so we don't have to look at it any more. 814 print_trace("prune_scavenge_root", cur); 815 unlink_scavenge_root_nmethod(cur, last); 816 } 817 cur = next; 818 } 819 820 // Check for stray marks. 821 debug_only(verify_perm_nmethods(NULL)); 822 } 823 824 #ifndef PRODUCT 825 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 826 // While we are here, verify the integrity of the list. 827 mark_scavenge_root_nmethods(); 828 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 829 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 830 cur->clear_scavenge_root_marked(); 831 } 832 verify_perm_nmethods(f); 833 } 834 835 // Temporarily mark nmethods that are claimed to be on the scavenge list. 836 void CodeCache::mark_scavenge_root_nmethods() { 837 NMethodIterator iter; 838 while(iter.next_alive()) { 839 nmethod* nm = iter.method(); 840 assert(nm->scavenge_root_not_marked(), "clean state"); 841 if (nm->on_scavenge_root_list()) 842 nm->set_scavenge_root_marked(); 843 } 844 } 845 846 // If the closure is given, run it on the unlisted nmethods. 847 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 848 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 849 NMethodIterator iter; 850 while(iter.next_alive()) { 851 nmethod* nm = iter.method(); 852 bool call_f = (f_or_null != NULL); 853 assert(nm->scavenge_root_not_marked(), "must be already processed"); 854 if (nm->on_scavenge_root_list()) 855 call_f = false; // don't show this one to the client 856 Universe::heap()->verify_nmethod(nm); 857 if (call_f) f_or_null->do_code_blob(nm); 858 } 859 } 860 #endif //PRODUCT 861 862 void CodeCache::verify_clean_inline_caches() { 863 #ifdef ASSERT 864 NMethodIterator iter; 865 while(iter.next_alive()) { 866 nmethod* nm = iter.method(); 867 assert(!nm->is_unloaded(), "Tautology"); 868 nm->verify_clean_inline_caches(); 869 nm->verify(); 870 } 871 #endif 872 } 873 874 void CodeCache::verify_icholder_relocations() { 875 #ifdef ASSERT 876 // make sure that we aren't leaking icholders 877 int count = 0; 878 FOR_ALL_HEAPS(heap) { 879 FOR_ALL_BLOBS(cb, *heap) { 880 CompiledMethod *nm = cb->as_compiled_method_or_null(); 881 if (nm != NULL) { 882 count += nm->verify_icholder_relocations(); 883 } 884 } 885 } 886 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 887 CompiledICHolder::live_count(), "must agree"); 888 #endif 889 } 890 891 void CodeCache::gc_prologue() { 892 } 893 894 void CodeCache::gc_epilogue() { 895 assert_locked_or_safepoint(CodeCache_lock); 896 NOT_DEBUG(if (needs_cache_clean())) { 897 CompiledMethodIterator iter; 898 while(iter.next_alive()) { 899 CompiledMethod* cm = iter.method(); 900 assert(!cm->is_unloaded(), "Tautology"); 901 DEBUG_ONLY(if (needs_cache_clean())) { 902 cm->cleanup_inline_caches(); 903 } 904 DEBUG_ONLY(cm->verify()); 905 DEBUG_ONLY(cm->verify_oop_relocations()); 906 } 907 } 908 909 set_needs_cache_clean(false); 910 prune_scavenge_root_nmethods(); 911 912 verify_icholder_relocations(); 913 } 914 915 void CodeCache::verify_oops() { 916 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 917 VerifyOopClosure voc; 918 NMethodIterator iter; 919 while(iter.next_alive()) { 920 nmethod* nm = iter.method(); 921 nm->oops_do(&voc); 922 nm->verify_oop_relocations(); 923 } 924 } 925 926 int CodeCache::blob_count(int code_blob_type) { 927 CodeHeap* heap = get_code_heap(code_blob_type); 928 return (heap != NULL) ? heap->blob_count() : 0; 929 } 930 931 int CodeCache::blob_count() { 932 int count = 0; 933 FOR_ALL_HEAPS(heap) { 934 count += (*heap)->blob_count(); 935 } 936 return count; 937 } 938 939 int CodeCache::nmethod_count(int code_blob_type) { 940 CodeHeap* heap = get_code_heap(code_blob_type); 941 return (heap != NULL) ? heap->nmethod_count() : 0; 942 } 943 944 int CodeCache::nmethod_count() { 945 int count = 0; 946 FOR_ALL_NMETHOD_HEAPS(heap) { 947 count += (*heap)->nmethod_count(); 948 } 949 return count; 950 } 951 952 int CodeCache::adapter_count(int code_blob_type) { 953 CodeHeap* heap = get_code_heap(code_blob_type); 954 return (heap != NULL) ? heap->adapter_count() : 0; 955 } 956 957 int CodeCache::adapter_count() { 958 int count = 0; 959 FOR_ALL_HEAPS(heap) { 960 count += (*heap)->adapter_count(); 961 } 962 return count; 963 } 964 965 address CodeCache::low_bound(int code_blob_type) { 966 CodeHeap* heap = get_code_heap(code_blob_type); 967 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 968 } 969 970 address CodeCache::high_bound(int code_blob_type) { 971 CodeHeap* heap = get_code_heap(code_blob_type); 972 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 973 } 974 975 size_t CodeCache::capacity() { 976 size_t cap = 0; 977 FOR_ALL_ALLOCABLE_HEAPS(heap) { 978 cap += (*heap)->capacity(); 979 } 980 return cap; 981 } 982 983 size_t CodeCache::unallocated_capacity(int code_blob_type) { 984 CodeHeap* heap = get_code_heap(code_blob_type); 985 return (heap != NULL) ? heap->unallocated_capacity() : 0; 986 } 987 988 size_t CodeCache::unallocated_capacity() { 989 size_t unallocated_cap = 0; 990 FOR_ALL_ALLOCABLE_HEAPS(heap) { 991 unallocated_cap += (*heap)->unallocated_capacity(); 992 } 993 return unallocated_cap; 994 } 995 996 size_t CodeCache::max_capacity() { 997 size_t max_cap = 0; 998 FOR_ALL_ALLOCABLE_HEAPS(heap) { 999 max_cap += (*heap)->max_capacity(); 1000 } 1001 return max_cap; 1002 } 1003 1004 /** 1005 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 1006 * is free, reverse_free_ratio() returns 4. 1007 */ 1008 double CodeCache::reverse_free_ratio(int code_blob_type) { 1009 CodeHeap* heap = get_code_heap(code_blob_type); 1010 if (heap == NULL) { 1011 return 0; 1012 } 1013 1014 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1015 double max_capacity = (double)heap->max_capacity(); 1016 double result = max_capacity / unallocated_capacity; 1017 assert (max_capacity >= unallocated_capacity, "Must be"); 1018 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1019 return result; 1020 } 1021 1022 size_t CodeCache::bytes_allocated_in_freelists() { 1023 size_t allocated_bytes = 0; 1024 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1025 allocated_bytes += (*heap)->allocated_in_freelist(); 1026 } 1027 return allocated_bytes; 1028 } 1029 1030 int CodeCache::allocated_segments() { 1031 int number_of_segments = 0; 1032 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1033 number_of_segments += (*heap)->allocated_segments(); 1034 } 1035 return number_of_segments; 1036 } 1037 1038 size_t CodeCache::freelists_length() { 1039 size_t length = 0; 1040 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1041 length += (*heap)->freelist_length(); 1042 } 1043 return length; 1044 } 1045 1046 void icache_init(); 1047 1048 void CodeCache::initialize() { 1049 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1050 #ifdef COMPILER2 1051 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1052 #endif 1053 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1054 // This was originally just a check of the alignment, causing failure, instead, round 1055 // the code cache to the page size. In particular, Solaris is moving to a larger 1056 // default page size. 1057 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1058 1059 if (SegmentedCodeCache) { 1060 // Use multiple code heaps 1061 initialize_heaps(); 1062 } else { 1063 // Use a single code heap 1064 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1065 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1066 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1067 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1068 add_heap(rs, "CodeCache", CodeBlobType::All); 1069 } 1070 1071 // Initialize ICache flush mechanism 1072 // This service is needed for os::register_code_area 1073 icache_init(); 1074 1075 // Give OS a chance to register generated code area. 1076 // This is used on Windows 64 bit platforms to register 1077 // Structured Exception Handlers for our generated code. 1078 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1079 } 1080 1081 void codeCache_init() { 1082 CodeCache::initialize(); 1083 // Load AOT libraries and add AOT code heaps. 1084 AOTLoader::initialize(); 1085 } 1086 1087 //------------------------------------------------------------------------------------------------ 1088 1089 int CodeCache::number_of_nmethods_with_dependencies() { 1090 return _number_of_nmethods_with_dependencies; 1091 } 1092 1093 void CodeCache::clear_inline_caches() { 1094 assert_locked_or_safepoint(CodeCache_lock); 1095 CompiledMethodIterator iter; 1096 while(iter.next_alive()) { 1097 iter.method()->clear_inline_caches(); 1098 } 1099 } 1100 1101 void CodeCache::cleanup_inline_caches() { 1102 assert_locked_or_safepoint(CodeCache_lock); 1103 NMethodIterator iter; 1104 while(iter.next_alive()) { 1105 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1106 } 1107 } 1108 1109 // Keeps track of time spent for checking dependencies 1110 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1111 1112 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1113 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1114 int number_of_marked_CodeBlobs = 0; 1115 1116 // search the hierarchy looking for nmethods which are affected by the loading of this class 1117 1118 // then search the interfaces this class implements looking for nmethods 1119 // which might be dependent of the fact that an interface only had one 1120 // implementor. 1121 // nmethod::check_all_dependencies works only correctly, if no safepoint 1122 // can happen 1123 NoSafepointVerifier nsv; 1124 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1125 Klass* d = str.klass(); 1126 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1127 } 1128 1129 #ifndef PRODUCT 1130 if (VerifyDependencies) { 1131 // Object pointers are used as unique identifiers for dependency arguments. This 1132 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1133 dependentCheckTime.start(); 1134 nmethod::check_all_dependencies(changes); 1135 dependentCheckTime.stop(); 1136 } 1137 #endif 1138 1139 return number_of_marked_CodeBlobs; 1140 } 1141 1142 CompiledMethod* CodeCache::find_compiled(void* start) { 1143 CodeBlob *cb = find_blob(start); 1144 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1145 return (CompiledMethod*)cb; 1146 } 1147 1148 bool CodeCache::is_far_target(address target) { 1149 #if INCLUDE_AOT 1150 return NativeCall::is_far_call(_low_bound, target) || 1151 NativeCall::is_far_call(_high_bound, target); 1152 #else 1153 return false; 1154 #endif 1155 } 1156 1157 #ifdef HOTSWAP 1158 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1159 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1160 int number_of_marked_CodeBlobs = 0; 1161 1162 // Deoptimize all methods of the evolving class itself 1163 Array<Method*>* old_methods = dependee->methods(); 1164 for (int i = 0; i < old_methods->length(); i++) { 1165 ResourceMark rm; 1166 Method* old_method = old_methods->at(i); 1167 CompiledMethod* nm = old_method->code(); 1168 if (nm != NULL) { 1169 nm->mark_for_deoptimization(); 1170 number_of_marked_CodeBlobs++; 1171 } 1172 } 1173 1174 CompiledMethodIterator iter; 1175 while(iter.next_alive()) { 1176 CompiledMethod* nm = iter.method(); 1177 if (nm->is_marked_for_deoptimization()) { 1178 // ...Already marked in the previous pass; don't count it again. 1179 } else if (nm->is_evol_dependent_on(dependee)) { 1180 ResourceMark rm; 1181 nm->mark_for_deoptimization(); 1182 number_of_marked_CodeBlobs++; 1183 } else { 1184 // flush caches in case they refer to a redefined Method* 1185 nm->clear_inline_caches(); 1186 } 1187 } 1188 1189 return number_of_marked_CodeBlobs; 1190 } 1191 #endif // HOTSWAP 1192 1193 1194 // Deoptimize all methods 1195 void CodeCache::mark_all_nmethods_for_deoptimization() { 1196 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1197 CompiledMethodIterator iter; 1198 while(iter.next_alive()) { 1199 CompiledMethod* nm = iter.method(); 1200 if (!nm->method()->is_method_handle_intrinsic()) { 1201 nm->mark_for_deoptimization(); 1202 } 1203 } 1204 } 1205 1206 int CodeCache::mark_for_deoptimization(Method* dependee) { 1207 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1208 int number_of_marked_CodeBlobs = 0; 1209 1210 CompiledMethodIterator iter; 1211 while(iter.next_alive()) { 1212 CompiledMethod* nm = iter.method(); 1213 if (nm->is_dependent_on_method(dependee)) { 1214 ResourceMark rm; 1215 nm->mark_for_deoptimization(); 1216 number_of_marked_CodeBlobs++; 1217 } 1218 } 1219 1220 return number_of_marked_CodeBlobs; 1221 } 1222 1223 void CodeCache::make_marked_nmethods_not_entrant() { 1224 assert_locked_or_safepoint(CodeCache_lock); 1225 CompiledMethodIterator iter; 1226 while(iter.next_alive()) { 1227 CompiledMethod* nm = iter.method(); 1228 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1229 nm->make_not_entrant(); 1230 } 1231 } 1232 } 1233 1234 // Flushes compiled methods dependent on dependee. 1235 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1236 assert_lock_strong(Compile_lock); 1237 1238 if (number_of_nmethods_with_dependencies() == 0) return; 1239 1240 // CodeCache can only be updated by a thread_in_VM and they will all be 1241 // stopped during the safepoint so CodeCache will be safe to update without 1242 // holding the CodeCache_lock. 1243 1244 KlassDepChange changes(dependee); 1245 1246 // Compute the dependent nmethods 1247 if (mark_for_deoptimization(changes) > 0) { 1248 // At least one nmethod has been marked for deoptimization 1249 VM_Deoptimize op; 1250 VMThread::execute(&op); 1251 } 1252 } 1253 1254 #ifdef HOTSWAP 1255 // Flushes compiled methods dependent on dependee in the evolutionary sense 1256 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1257 // --- Compile_lock is not held. However we are at a safepoint. 1258 assert_locked_or_safepoint(Compile_lock); 1259 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1260 1261 // CodeCache can only be updated by a thread_in_VM and they will all be 1262 // stopped during the safepoint so CodeCache will be safe to update without 1263 // holding the CodeCache_lock. 1264 1265 // Compute the dependent nmethods 1266 if (mark_for_evol_deoptimization(ev_k) > 0) { 1267 // At least one nmethod has been marked for deoptimization 1268 1269 // All this already happens inside a VM_Operation, so we'll do all the work here. 1270 // Stuff copied from VM_Deoptimize and modified slightly. 1271 1272 // We do not want any GCs to happen while we are in the middle of this VM operation 1273 ResourceMark rm; 1274 DeoptimizationMarker dm; 1275 1276 // Deoptimize all activations depending on marked nmethods 1277 Deoptimization::deoptimize_dependents(); 1278 1279 // Make the dependent methods not entrant 1280 make_marked_nmethods_not_entrant(); 1281 } 1282 } 1283 #endif // HOTSWAP 1284 1285 1286 // Flushes compiled methods dependent on dependee 1287 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1288 // --- Compile_lock is not held. However we are at a safepoint. 1289 assert_locked_or_safepoint(Compile_lock); 1290 1291 // CodeCache can only be updated by a thread_in_VM and they will all be 1292 // stopped dring the safepoint so CodeCache will be safe to update without 1293 // holding the CodeCache_lock. 1294 1295 // Compute the dependent nmethods 1296 if (mark_for_deoptimization(m_h()) > 0) { 1297 // At least one nmethod has been marked for deoptimization 1298 1299 // All this already happens inside a VM_Operation, so we'll do all the work here. 1300 // Stuff copied from VM_Deoptimize and modified slightly. 1301 1302 // We do not want any GCs to happen while we are in the middle of this VM operation 1303 ResourceMark rm; 1304 DeoptimizationMarker dm; 1305 1306 // Deoptimize all activations depending on marked nmethods 1307 Deoptimization::deoptimize_dependents(); 1308 1309 // Make the dependent methods not entrant 1310 make_marked_nmethods_not_entrant(); 1311 } 1312 } 1313 1314 void CodeCache::verify() { 1315 assert_locked_or_safepoint(CodeCache_lock); 1316 FOR_ALL_HEAPS(heap) { 1317 (*heap)->verify(); 1318 FOR_ALL_BLOBS(cb, *heap) { 1319 if (cb->is_alive()) { 1320 cb->verify(); 1321 } 1322 } 1323 } 1324 } 1325 1326 // A CodeHeap is full. Print out warning and report event. 1327 PRAGMA_DIAG_PUSH 1328 PRAGMA_FORMAT_NONLITERAL_IGNORED 1329 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1330 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1331 CodeHeap* heap = get_code_heap(code_blob_type); 1332 assert(heap != NULL, "heap is null"); 1333 1334 if ((heap->full_count() == 0) || print) { 1335 // Not yet reported for this heap, report 1336 if (SegmentedCodeCache) { 1337 ResourceMark rm; 1338 stringStream msg1_stream, msg2_stream; 1339 msg1_stream.print("%s is full. Compiler has been disabled.", 1340 get_code_heap_name(code_blob_type)); 1341 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1342 get_code_heap_flag_name(code_blob_type)); 1343 const char *msg1 = msg1_stream.as_string(); 1344 const char *msg2 = msg2_stream.as_string(); 1345 1346 log_warning(codecache)(msg1); 1347 log_warning(codecache)(msg2); 1348 warning(msg1); 1349 warning(msg2); 1350 } else { 1351 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1352 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1353 1354 log_warning(codecache)(msg1); 1355 log_warning(codecache)(msg2); 1356 warning(msg1); 1357 warning(msg2); 1358 } 1359 ResourceMark rm; 1360 stringStream s; 1361 // Dump code cache into a buffer before locking the tty, 1362 { 1363 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1364 print_summary(&s); 1365 } 1366 ttyLocker ttyl; 1367 tty->print("%s", s.as_string()); 1368 } 1369 1370 heap->report_full(); 1371 1372 EventCodeCacheFull event; 1373 if (event.should_commit()) { 1374 event.set_codeBlobType((u1)code_blob_type); 1375 event.set_startAddress((u8)heap->low_boundary()); 1376 event.set_commitedTopAddress((u8)heap->high()); 1377 event.set_reservedTopAddress((u8)heap->high_boundary()); 1378 event.set_entryCount(heap->blob_count()); 1379 event.set_methodCount(heap->nmethod_count()); 1380 event.set_adaptorCount(heap->adapter_count()); 1381 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1382 event.set_fullCount(heap->full_count()); 1383 event.commit(); 1384 } 1385 } 1386 PRAGMA_DIAG_POP 1387 1388 void CodeCache::print_memory_overhead() { 1389 size_t wasted_bytes = 0; 1390 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1391 CodeHeap* curr_heap = *heap; 1392 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1393 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1394 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1395 } 1396 } 1397 // Print bytes that are allocated in the freelist 1398 ttyLocker ttl; 1399 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1400 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1401 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1402 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1403 } 1404 1405 //------------------------------------------------------------------------------------------------ 1406 // Non-product version 1407 1408 #ifndef PRODUCT 1409 1410 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1411 if (PrintCodeCache2) { // Need to add a new flag 1412 ResourceMark rm; 1413 if (size == 0) size = cb->size(); 1414 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1415 } 1416 } 1417 1418 void CodeCache::print_internals() { 1419 int nmethodCount = 0; 1420 int runtimeStubCount = 0; 1421 int adapterCount = 0; 1422 int deoptimizationStubCount = 0; 1423 int uncommonTrapStubCount = 0; 1424 int bufferBlobCount = 0; 1425 int total = 0; 1426 int nmethodAlive = 0; 1427 int nmethodNotEntrant = 0; 1428 int nmethodZombie = 0; 1429 int nmethodUnloaded = 0; 1430 int nmethodJava = 0; 1431 int nmethodNative = 0; 1432 int max_nm_size = 0; 1433 ResourceMark rm; 1434 1435 int i = 0; 1436 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1437 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1438 tty->print_cr("-- %s --", (*heap)->name()); 1439 } 1440 FOR_ALL_BLOBS(cb, *heap) { 1441 total++; 1442 if (cb->is_nmethod()) { 1443 nmethod* nm = (nmethod*)cb; 1444 1445 if (Verbose && nm->method() != NULL) { 1446 ResourceMark rm; 1447 char *method_name = nm->method()->name_and_sig_as_C_string(); 1448 tty->print("%s", method_name); 1449 if(nm->is_alive()) { tty->print_cr(" alive"); } 1450 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1451 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1452 } 1453 1454 nmethodCount++; 1455 1456 if(nm->is_alive()) { nmethodAlive++; } 1457 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1458 if(nm->is_zombie()) { nmethodZombie++; } 1459 if(nm->is_unloaded()) { nmethodUnloaded++; } 1460 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1461 1462 if(nm->method() != NULL && nm->is_java_method()) { 1463 nmethodJava++; 1464 max_nm_size = MAX2(max_nm_size, nm->size()); 1465 } 1466 } else if (cb->is_runtime_stub()) { 1467 runtimeStubCount++; 1468 } else if (cb->is_deoptimization_stub()) { 1469 deoptimizationStubCount++; 1470 } else if (cb->is_uncommon_trap_stub()) { 1471 uncommonTrapStubCount++; 1472 } else if (cb->is_adapter_blob()) { 1473 adapterCount++; 1474 } else if (cb->is_buffer_blob()) { 1475 bufferBlobCount++; 1476 } 1477 } 1478 } 1479 1480 int bucketSize = 512; 1481 int bucketLimit = max_nm_size / bucketSize + 1; 1482 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1483 memset(buckets, 0, sizeof(int) * bucketLimit); 1484 1485 NMethodIterator iter; 1486 while(iter.next()) { 1487 nmethod* nm = iter.method(); 1488 if(nm->method() != NULL && nm->is_java_method()) { 1489 buckets[nm->size() / bucketSize]++; 1490 } 1491 } 1492 1493 tty->print_cr("Code Cache Entries (total of %d)",total); 1494 tty->print_cr("-------------------------------------------------"); 1495 tty->print_cr("nmethods: %d",nmethodCount); 1496 tty->print_cr("\talive: %d",nmethodAlive); 1497 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1498 tty->print_cr("\tzombie: %d",nmethodZombie); 1499 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1500 tty->print_cr("\tjava: %d",nmethodJava); 1501 tty->print_cr("\tnative: %d",nmethodNative); 1502 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1503 tty->print_cr("adapters: %d",adapterCount); 1504 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1505 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1506 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1507 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1508 tty->print_cr("-------------------------------------------------"); 1509 1510 for(int i=0; i<bucketLimit; i++) { 1511 if(buckets[i] != 0) { 1512 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1513 tty->fill_to(40); 1514 tty->print_cr("%d",buckets[i]); 1515 } 1516 } 1517 1518 FREE_C_HEAP_ARRAY(int, buckets); 1519 print_memory_overhead(); 1520 } 1521 1522 #endif // !PRODUCT 1523 1524 void CodeCache::print() { 1525 print_summary(tty); 1526 1527 #ifndef PRODUCT 1528 if (!Verbose) return; 1529 1530 CodeBlob_sizes live; 1531 CodeBlob_sizes dead; 1532 1533 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1534 FOR_ALL_BLOBS(cb, *heap) { 1535 if (!cb->is_alive()) { 1536 dead.add(cb); 1537 } else { 1538 live.add(cb); 1539 } 1540 } 1541 } 1542 1543 tty->print_cr("CodeCache:"); 1544 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1545 1546 if (!live.is_empty()) { 1547 live.print("live"); 1548 } 1549 if (!dead.is_empty()) { 1550 dead.print("dead"); 1551 } 1552 1553 if (WizardMode) { 1554 // print the oop_map usage 1555 int code_size = 0; 1556 int number_of_blobs = 0; 1557 int number_of_oop_maps = 0; 1558 int map_size = 0; 1559 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1560 FOR_ALL_BLOBS(cb, *heap) { 1561 if (cb->is_alive()) { 1562 number_of_blobs++; 1563 code_size += cb->code_size(); 1564 ImmutableOopMapSet* set = cb->oop_maps(); 1565 if (set != NULL) { 1566 number_of_oop_maps += set->count(); 1567 map_size += set->nr_of_bytes(); 1568 } 1569 } 1570 } 1571 } 1572 tty->print_cr("OopMaps"); 1573 tty->print_cr(" #blobs = %d", number_of_blobs); 1574 tty->print_cr(" code size = %d", code_size); 1575 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1576 tty->print_cr(" map size = %d", map_size); 1577 } 1578 1579 #endif // !PRODUCT 1580 } 1581 1582 void CodeCache::print_summary(outputStream* st, bool detailed) { 1583 FOR_ALL_HEAPS(heap_iterator) { 1584 CodeHeap* heap = (*heap_iterator); 1585 size_t total = (heap->high_boundary() - heap->low_boundary()); 1586 if (_heaps->length() >= 1) { 1587 st->print("%s:", heap->name()); 1588 } else { 1589 st->print("CodeCache:"); 1590 } 1591 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1592 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1593 total/K, (total - heap->unallocated_capacity())/K, 1594 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1595 1596 if (detailed) { 1597 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1598 p2i(heap->low_boundary()), 1599 p2i(heap->high()), 1600 p2i(heap->high_boundary())); 1601 } 1602 } 1603 1604 if (detailed) { 1605 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1606 " adapters=" UINT32_FORMAT, 1607 blob_count(), nmethod_count(), adapter_count()); 1608 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1609 "enabled" : Arguments::mode() == Arguments::_int ? 1610 "disabled (interpreter mode)" : 1611 "disabled (not enough contiguous free space left)"); 1612 } 1613 } 1614 1615 void CodeCache::print_codelist(outputStream* st) { 1616 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1617 1618 CompiledMethodIterator iter; 1619 while (iter.next_alive()) { 1620 CompiledMethod* cm = iter.method(); 1621 ResourceMark rm; 1622 char* method_name = cm->method()->name_and_sig_as_C_string(); 1623 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1624 cm->compile_id(), cm->comp_level(), cm->get_state(), 1625 method_name, 1626 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1627 } 1628 } 1629 1630 void CodeCache::print_layout(outputStream* st) { 1631 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1632 ResourceMark rm; 1633 print_summary(st, true); 1634 } 1635 1636 void CodeCache::log_state(outputStream* st) { 1637 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1638 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1639 blob_count(), nmethod_count(), adapter_count(), 1640 unallocated_capacity()); 1641 }