1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/codeHeapState.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/dependencies.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/nmethod.hpp" 34 #include "code/pcDesc.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "logging/log.hpp" 37 #include "logging/logStream.hpp" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/iterator.hpp" 40 #include "memory/resourceArea.hpp" 41 #include "oops/method.inline.hpp" 42 #include "oops/objArrayOop.hpp" 43 #include "oops/oop.inline.hpp" 44 #include "oops/verifyOopClosure.hpp" 45 #include "runtime/arguments.hpp" 46 #include "runtime/compilationPolicy.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/handles.inline.hpp" 49 #include "runtime/icache.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/mutexLocker.hpp" 52 #include "runtime/safepointVerifiers.hpp" 53 #include "runtime/sweeper.hpp" 54 #include "services/memoryService.hpp" 55 #include "trace/tracing.hpp" 56 #include "utilities/align.hpp" 57 #include "utilities/vmError.hpp" 58 #include "utilities/xmlstream.hpp" 59 #ifdef COMPILER1 60 #include "c1/c1_Compilation.hpp" 61 #include "c1/c1_Compiler.hpp" 62 #endif 63 #ifdef COMPILER2 64 #include "opto/c2compiler.hpp" 65 #include "opto/compile.hpp" 66 #include "opto/node.hpp" 67 #endif 68 69 // Helper class for printing in CodeCache 70 class CodeBlob_sizes { 71 private: 72 int count; 73 int total_size; 74 int header_size; 75 int code_size; 76 int stub_size; 77 int relocation_size; 78 int scopes_oop_size; 79 int scopes_metadata_size; 80 int scopes_data_size; 81 int scopes_pcs_size; 82 83 public: 84 CodeBlob_sizes() { 85 count = 0; 86 total_size = 0; 87 header_size = 0; 88 code_size = 0; 89 stub_size = 0; 90 relocation_size = 0; 91 scopes_oop_size = 0; 92 scopes_metadata_size = 0; 93 scopes_data_size = 0; 94 scopes_pcs_size = 0; 95 } 96 97 int total() { return total_size; } 98 bool is_empty() { return count == 0; } 99 100 void print(const char* title) { 101 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 102 count, 103 title, 104 (int)(total() / K), 105 header_size * 100 / total_size, 106 relocation_size * 100 / total_size, 107 code_size * 100 / total_size, 108 stub_size * 100 / total_size, 109 scopes_oop_size * 100 / total_size, 110 scopes_metadata_size * 100 / total_size, 111 scopes_data_size * 100 / total_size, 112 scopes_pcs_size * 100 / total_size); 113 } 114 115 void add(CodeBlob* cb) { 116 count++; 117 total_size += cb->size(); 118 header_size += cb->header_size(); 119 relocation_size += cb->relocation_size(); 120 if (cb->is_nmethod()) { 121 nmethod* nm = cb->as_nmethod_or_null(); 122 code_size += nm->insts_size(); 123 stub_size += nm->stub_size(); 124 125 scopes_oop_size += nm->oops_size(); 126 scopes_metadata_size += nm->metadata_size(); 127 scopes_data_size += nm->scopes_data_size(); 128 scopes_pcs_size += nm->scopes_pcs_size(); 129 } else { 130 code_size += cb->code_size(); 131 } 132 } 133 }; 134 135 // Iterate over all CodeHeaps 136 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 137 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 138 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 139 140 // Iterate over all CodeBlobs (cb) on the given CodeHeap 141 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 142 143 address CodeCache::_low_bound = 0; 144 address CodeCache::_high_bound = 0; 145 int CodeCache::_number_of_nmethods_with_dependencies = 0; 146 bool CodeCache::_needs_cache_clean = false; 147 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 148 149 // Initialize arrays of CodeHeap subsets 150 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 151 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 152 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 153 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 154 155 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 156 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 157 // Prepare error message 158 const char* error = "Invalid code heap sizes"; 159 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 160 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 161 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 162 163 if (total_size > cache_size) { 164 // Some code heap sizes were explicitly set: total_size must be <= cache_size 165 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 166 vm_exit_during_initialization(error, message); 167 } else if (all_set && total_size != cache_size) { 168 // All code heap sizes were explicitly set: total_size must equal cache_size 169 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 170 vm_exit_during_initialization(error, message); 171 } 172 } 173 174 void CodeCache::initialize_heaps() { 175 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 176 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 177 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 178 size_t min_size = os::vm_page_size(); 179 size_t cache_size = ReservedCodeCacheSize; 180 size_t non_nmethod_size = NonNMethodCodeHeapSize; 181 size_t profiled_size = ProfiledCodeHeapSize; 182 size_t non_profiled_size = NonProfiledCodeHeapSize; 183 // Check if total size set via command line flags exceeds the reserved size 184 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 185 (profiled_set ? profiled_size : min_size), 186 (non_profiled_set ? non_profiled_size : min_size), 187 cache_size, 188 non_nmethod_set && profiled_set && non_profiled_set); 189 190 // Determine size of compiler buffers 191 size_t code_buffers_size = 0; 192 #ifdef COMPILER1 193 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 194 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 195 code_buffers_size += c1_count * Compiler::code_buffer_size(); 196 #endif 197 #ifdef COMPILER2 198 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 199 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 200 // Initial size of constant table (this may be increased if a compiled method needs more space) 201 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 202 #endif 203 204 // Increase default non_nmethod_size to account for compiler buffers 205 if (!non_nmethod_set) { 206 non_nmethod_size += code_buffers_size; 207 } 208 // Calculate default CodeHeap sizes if not set by user 209 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 210 // Check if we have enough space for the non-nmethod code heap 211 if (cache_size > non_nmethod_size) { 212 // Use the default value for non_nmethod_size and one half of the 213 // remaining size for non-profiled and one half for profiled methods 214 size_t remaining_size = cache_size - non_nmethod_size; 215 profiled_size = remaining_size / 2; 216 non_profiled_size = remaining_size - profiled_size; 217 } else { 218 // Use all space for the non-nmethod heap and set other heaps to minimal size 219 non_nmethod_size = cache_size - 2 * min_size; 220 profiled_size = min_size; 221 non_profiled_size = min_size; 222 } 223 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 224 // The user explicitly set some code heap sizes. Increase or decrease the (default) 225 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 226 // code heap sizes and then only change non-nmethod code heap size if still necessary. 227 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 228 if (non_profiled_set) { 229 if (!profiled_set) { 230 // Adapt size of profiled code heap 231 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 232 // Not enough space available, set to minimum size 233 diff_size += profiled_size - min_size; 234 profiled_size = min_size; 235 } else { 236 profiled_size += diff_size; 237 diff_size = 0; 238 } 239 } 240 } else if (profiled_set) { 241 // Adapt size of non-profiled code heap 242 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 243 // Not enough space available, set to minimum size 244 diff_size += non_profiled_size - min_size; 245 non_profiled_size = min_size; 246 } else { 247 non_profiled_size += diff_size; 248 diff_size = 0; 249 } 250 } else if (non_nmethod_set) { 251 // Distribute remaining size between profiled and non-profiled code heaps 252 diff_size = cache_size - non_nmethod_size; 253 profiled_size = diff_size / 2; 254 non_profiled_size = diff_size - profiled_size; 255 diff_size = 0; 256 } 257 if (diff_size != 0) { 258 // Use non-nmethod code heap for remaining space requirements 259 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 260 non_nmethod_size += diff_size; 261 } 262 } 263 264 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 265 if (!heap_available(CodeBlobType::MethodProfiled)) { 266 non_profiled_size += profiled_size; 267 profiled_size = 0; 268 } 269 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 270 if (!heap_available(CodeBlobType::MethodNonProfiled)) { 271 non_nmethod_size += non_profiled_size; 272 non_profiled_size = 0; 273 } 274 // Make sure we have enough space for VM internal code 275 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 276 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 277 vm_exit_during_initialization(err_msg( 278 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 279 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 280 } 281 282 // Verify sizes and update flag values 283 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 284 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 285 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 286 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 287 288 // If large page support is enabled, align code heaps according to large 289 // page size to make sure that code cache is covered by large pages. 290 const size_t alignment = MAX2(page_size(false), (size_t) os::vm_allocation_granularity()); 291 non_nmethod_size = align_up(non_nmethod_size, alignment); 292 profiled_size = align_down(profiled_size, alignment); 293 294 // Reserve one continuous chunk of memory for CodeHeaps and split it into 295 // parts for the individual heaps. The memory layout looks like this: 296 // ---------- high ----------- 297 // Non-profiled nmethods 298 // Profiled nmethods 299 // Non-nmethods 300 // ---------- low ------------ 301 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 302 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 303 ReservedSpace rest = rs.last_part(non_nmethod_size); 304 ReservedSpace profiled_space = rest.first_part(profiled_size); 305 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 306 307 // Non-nmethods (stubs, adapters, ...) 308 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 309 // Tier 2 and tier 3 (profiled) methods 310 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 311 // Tier 1 and tier 4 (non-profiled) methods and native methods 312 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 313 } 314 315 size_t CodeCache::page_size(bool aligned) { 316 if (os::can_execute_large_page_memory()) { 317 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, 8) : 318 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8); 319 } else { 320 return os::vm_page_size(); 321 } 322 } 323 324 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 325 // Align and reserve space for code cache 326 const size_t rs_ps = page_size(); 327 const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); 328 const size_t rs_size = align_up(size, rs_align); 329 ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); 330 if (!rs.is_reserved()) { 331 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)", 332 rs_size/K)); 333 } 334 335 // Initialize bounds 336 _low_bound = (address)rs.base(); 337 _high_bound = _low_bound + rs.size(); 338 return rs; 339 } 340 341 // Heaps available for allocation 342 bool CodeCache::heap_available(int code_blob_type) { 343 if (!SegmentedCodeCache) { 344 // No segmentation: use a single code heap 345 return (code_blob_type == CodeBlobType::All); 346 } else if (Arguments::is_interpreter_only()) { 347 // Interpreter only: we don't need any method code heaps 348 return (code_blob_type == CodeBlobType::NonNMethod); 349 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 350 // Tiered compilation: use all code heaps 351 return (code_blob_type < CodeBlobType::All); 352 } else { 353 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 354 return (code_blob_type == CodeBlobType::NonNMethod) || 355 (code_blob_type == CodeBlobType::MethodNonProfiled); 356 } 357 } 358 359 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 360 switch(code_blob_type) { 361 case CodeBlobType::NonNMethod: 362 return "NonNMethodCodeHeapSize"; 363 break; 364 case CodeBlobType::MethodNonProfiled: 365 return "NonProfiledCodeHeapSize"; 366 break; 367 case CodeBlobType::MethodProfiled: 368 return "ProfiledCodeHeapSize"; 369 break; 370 } 371 ShouldNotReachHere(); 372 return NULL; 373 } 374 375 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 376 if (lhs->code_blob_type() == rhs->code_blob_type()) { 377 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 378 } else { 379 return lhs->code_blob_type() - rhs->code_blob_type(); 380 } 381 } 382 383 void CodeCache::add_heap(CodeHeap* heap) { 384 assert(!Universe::is_fully_initialized(), "late heap addition?"); 385 386 _heaps->insert_sorted<code_heap_compare>(heap); 387 388 int type = heap->code_blob_type(); 389 if (code_blob_type_accepts_compiled(type)) { 390 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 391 } 392 if (code_blob_type_accepts_nmethod(type)) { 393 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 394 } 395 if (code_blob_type_accepts_allocable(type)) { 396 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 397 } 398 } 399 400 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 401 // Check if heap is needed 402 if (!heap_available(code_blob_type)) { 403 return; 404 } 405 406 // Create CodeHeap 407 CodeHeap* heap = new CodeHeap(name, code_blob_type); 408 add_heap(heap); 409 410 // Reserve Space 411 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 412 size_initial = align_up(size_initial, os::vm_page_size()); 413 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 414 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)", 415 heap->name(), size_initial/K)); 416 } 417 418 // Register the CodeHeap 419 MemoryService::add_code_heap_memory_pool(heap, name); 420 } 421 422 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 423 FOR_ALL_HEAPS(heap) { 424 if ((*heap)->contains(start)) { 425 return *heap; 426 } 427 } 428 return NULL; 429 } 430 431 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 432 assert(cb != NULL, "CodeBlob is null"); 433 FOR_ALL_HEAPS(heap) { 434 if ((*heap)->contains_blob(cb)) { 435 return *heap; 436 } 437 } 438 ShouldNotReachHere(); 439 return NULL; 440 } 441 442 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 443 FOR_ALL_HEAPS(heap) { 444 if ((*heap)->accepts(code_blob_type)) { 445 return *heap; 446 } 447 } 448 return NULL; 449 } 450 451 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 452 assert_locked_or_safepoint(CodeCache_lock); 453 assert(heap != NULL, "heap is null"); 454 return (CodeBlob*)heap->first(); 455 } 456 457 CodeBlob* CodeCache::first_blob(int code_blob_type) { 458 if (heap_available(code_blob_type)) { 459 return first_blob(get_code_heap(code_blob_type)); 460 } else { 461 return NULL; 462 } 463 } 464 465 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 466 assert_locked_or_safepoint(CodeCache_lock); 467 assert(heap != NULL, "heap is null"); 468 return (CodeBlob*)heap->next(cb); 469 } 470 471 /** 472 * Do not seize the CodeCache lock here--if the caller has not 473 * already done so, we are going to lose bigtime, since the code 474 * cache will contain a garbage CodeBlob until the caller can 475 * run the constructor for the CodeBlob subclass he is busy 476 * instantiating. 477 */ 478 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 479 // Possibly wakes up the sweeper thread. 480 NMethodSweeper::notify(code_blob_type); 481 assert_locked_or_safepoint(CodeCache_lock); 482 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 483 if (size <= 0) { 484 return NULL; 485 } 486 CodeBlob* cb = NULL; 487 488 // Get CodeHeap for the given CodeBlobType 489 CodeHeap* heap = get_code_heap(code_blob_type); 490 assert(heap != NULL, "heap is null"); 491 492 while (true) { 493 cb = (CodeBlob*)heap->allocate(size); 494 if (cb != NULL) break; 495 if (!heap->expand_by(CodeCacheExpansionSize)) { 496 // Save original type for error reporting 497 if (orig_code_blob_type == CodeBlobType::All) { 498 orig_code_blob_type = code_blob_type; 499 } 500 // Expansion failed 501 if (SegmentedCodeCache) { 502 // Fallback solution: Try to store code in another code heap. 503 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 504 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 505 // and force stack scanning if less than 10% of the code heap are free. 506 int type = code_blob_type; 507 switch (type) { 508 case CodeBlobType::NonNMethod: 509 type = CodeBlobType::MethodNonProfiled; 510 break; 511 case CodeBlobType::MethodNonProfiled: 512 type = CodeBlobType::MethodProfiled; 513 break; 514 case CodeBlobType::MethodProfiled: 515 // Avoid loop if we already tried that code heap 516 if (type == orig_code_blob_type) { 517 type = CodeBlobType::MethodNonProfiled; 518 } 519 break; 520 } 521 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 522 if (PrintCodeCacheExtension) { 523 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 524 heap->name(), get_code_heap(type)->name()); 525 } 526 return allocate(size, type, orig_code_blob_type); 527 } 528 } 529 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 530 CompileBroker::handle_full_code_cache(orig_code_blob_type); 531 return NULL; 532 } 533 if (PrintCodeCacheExtension) { 534 ResourceMark rm; 535 if (_nmethod_heaps->length() >= 1) { 536 tty->print("%s", heap->name()); 537 } else { 538 tty->print("CodeCache"); 539 } 540 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 541 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 542 (address)heap->high() - (address)heap->low_boundary()); 543 } 544 } 545 print_trace("allocation", cb, size); 546 return cb; 547 } 548 549 void CodeCache::free(CodeBlob* cb) { 550 assert_locked_or_safepoint(CodeCache_lock); 551 CodeHeap* heap = get_code_heap(cb); 552 print_trace("free", cb); 553 if (cb->is_nmethod()) { 554 heap->set_nmethod_count(heap->nmethod_count() - 1); 555 if (((nmethod *)cb)->has_dependencies()) { 556 _number_of_nmethods_with_dependencies--; 557 } 558 } 559 if (cb->is_adapter_blob()) { 560 heap->set_adapter_count(heap->adapter_count() - 1); 561 } 562 563 // Get heap for given CodeBlob and deallocate 564 get_code_heap(cb)->deallocate(cb); 565 566 assert(heap->blob_count() >= 0, "sanity check"); 567 } 568 569 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 570 assert_locked_or_safepoint(CodeCache_lock); 571 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 572 print_trace("free_unused_tail", cb); 573 574 // We also have to account for the extra space (i.e. header) used by the CodeBlob 575 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 576 used += CodeBlob::align_code_offset(cb->header_size()); 577 578 // Get heap for given CodeBlob and deallocate its unused tail 579 get_code_heap(cb)->deallocate_tail(cb, used); 580 // Adjust the sizes of the CodeBlob 581 cb->adjust_size(used); 582 } 583 584 void CodeCache::commit(CodeBlob* cb) { 585 // this is called by nmethod::nmethod, which must already own CodeCache_lock 586 assert_locked_or_safepoint(CodeCache_lock); 587 CodeHeap* heap = get_code_heap(cb); 588 if (cb->is_nmethod()) { 589 heap->set_nmethod_count(heap->nmethod_count() + 1); 590 if (((nmethod *)cb)->has_dependencies()) { 591 _number_of_nmethods_with_dependencies++; 592 } 593 } 594 if (cb->is_adapter_blob()) { 595 heap->set_adapter_count(heap->adapter_count() + 1); 596 } 597 598 // flush the hardware I-cache 599 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 600 } 601 602 bool CodeCache::contains(void *p) { 603 // S390 uses contains() in current_frame(), which is used before 604 // code cache initialization if NativeMemoryTracking=detail is set. 605 S390_ONLY(if (_heaps == NULL) return false;) 606 // It should be ok to call contains without holding a lock. 607 FOR_ALL_HEAPS(heap) { 608 if ((*heap)->contains(p)) { 609 return true; 610 } 611 } 612 return false; 613 } 614 615 bool CodeCache::contains(nmethod *nm) { 616 return contains((void *)nm); 617 } 618 619 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 620 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 621 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 622 CodeBlob* CodeCache::find_blob(void* start) { 623 CodeBlob* result = find_blob_unsafe(start); 624 // We could potentially look up non_entrant methods 625 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 626 return result; 627 } 628 629 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 630 // what you are doing) 631 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 632 // NMT can walk the stack before code cache is created 633 if (_heaps != NULL) { 634 CodeHeap* heap = get_code_heap_containing(start); 635 if (heap != NULL) { 636 return heap->find_blob_unsafe(start); 637 } 638 } 639 return NULL; 640 } 641 642 nmethod* CodeCache::find_nmethod(void* start) { 643 CodeBlob* cb = find_blob(start); 644 assert(cb->is_nmethod(), "did not find an nmethod"); 645 return (nmethod*)cb; 646 } 647 648 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 649 assert_locked_or_safepoint(CodeCache_lock); 650 FOR_ALL_HEAPS(heap) { 651 FOR_ALL_BLOBS(cb, *heap) { 652 f(cb); 653 } 654 } 655 } 656 657 void CodeCache::nmethods_do(void f(nmethod* nm)) { 658 assert_locked_or_safepoint(CodeCache_lock); 659 NMethodIterator iter; 660 while(iter.next()) { 661 f(iter.method()); 662 } 663 } 664 665 void CodeCache::metadata_do(void f(Metadata* m)) { 666 assert_locked_or_safepoint(CodeCache_lock); 667 NMethodIterator iter; 668 while(iter.next_alive()) { 669 iter.method()->metadata_do(f); 670 } 671 AOTLoader::metadata_do(f); 672 } 673 674 int CodeCache::alignment_unit() { 675 return (int)_heaps->first()->alignment_unit(); 676 } 677 678 int CodeCache::alignment_offset() { 679 return (int)_heaps->first()->alignment_offset(); 680 } 681 682 // Mark nmethods for unloading if they contain otherwise unreachable oops. 683 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 684 assert_locked_or_safepoint(CodeCache_lock); 685 CompiledMethodIterator iter; 686 while(iter.next_alive()) { 687 iter.method()->do_unloading(is_alive, unloading_occurred); 688 } 689 } 690 691 void CodeCache::blobs_do(CodeBlobClosure* f) { 692 assert_locked_or_safepoint(CodeCache_lock); 693 FOR_ALL_ALLOCABLE_HEAPS(heap) { 694 FOR_ALL_BLOBS(cb, *heap) { 695 if (cb->is_alive()) { 696 f->do_code_blob(cb); 697 #ifdef ASSERT 698 if (cb->is_nmethod()) { 699 Universe::heap()->verify_nmethod((nmethod*)cb); 700 } 701 #endif //ASSERT 702 } 703 } 704 } 705 } 706 707 // Walk the list of methods which might contain oops to the java heap. 708 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 709 assert_locked_or_safepoint(CodeCache_lock); 710 711 const bool fix_relocations = f->fix_relocations(); 712 debug_only(mark_scavenge_root_nmethods()); 713 714 nmethod* prev = NULL; 715 nmethod* cur = scavenge_root_nmethods(); 716 while (cur != NULL) { 717 debug_only(cur->clear_scavenge_root_marked()); 718 assert(cur->scavenge_root_not_marked(), ""); 719 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 720 721 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 722 if (TraceScavenge) { 723 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 724 } 725 if (is_live) { 726 // Perform cur->oops_do(f), maybe just once per nmethod. 727 f->do_code_blob(cur); 728 } 729 nmethod* const next = cur->scavenge_root_link(); 730 // The scavengable nmethod list must contain all methods with scavengable 731 // oops. It is safe to include more nmethod on the list, but we do not 732 // expect any live non-scavengable nmethods on the list. 733 if (fix_relocations) { 734 if (!is_live || !cur->detect_scavenge_root_oops()) { 735 unlink_scavenge_root_nmethod(cur, prev); 736 } else { 737 prev = cur; 738 } 739 } 740 cur = next; 741 } 742 743 // Check for stray marks. 744 debug_only(verify_perm_nmethods(NULL)); 745 } 746 747 void CodeCache::register_scavenge_root_nmethod(nmethod* nm) { 748 assert_locked_or_safepoint(CodeCache_lock); 749 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) { 750 add_scavenge_root_nmethod(nm); 751 } 752 } 753 754 void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) { 755 nm->verify_scavenge_root_oops(); 756 } 757 758 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 759 assert_locked_or_safepoint(CodeCache_lock); 760 761 nm->set_on_scavenge_root_list(); 762 nm->set_scavenge_root_link(_scavenge_root_nmethods); 763 set_scavenge_root_nmethods(nm); 764 print_trace("add_scavenge_root", nm); 765 } 766 767 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 768 assert_locked_or_safepoint(CodeCache_lock); 769 770 assert((prev == NULL && scavenge_root_nmethods() == nm) || 771 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 772 773 print_trace("unlink_scavenge_root", nm); 774 if (prev == NULL) { 775 set_scavenge_root_nmethods(nm->scavenge_root_link()); 776 } else { 777 prev->set_scavenge_root_link(nm->scavenge_root_link()); 778 } 779 nm->set_scavenge_root_link(NULL); 780 nm->clear_on_scavenge_root_list(); 781 } 782 783 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 784 assert_locked_or_safepoint(CodeCache_lock); 785 786 print_trace("drop_scavenge_root", nm); 787 nmethod* prev = NULL; 788 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 789 if (cur == nm) { 790 unlink_scavenge_root_nmethod(cur, prev); 791 return; 792 } 793 prev = cur; 794 } 795 assert(false, "should have been on list"); 796 } 797 798 void CodeCache::prune_scavenge_root_nmethods() { 799 assert_locked_or_safepoint(CodeCache_lock); 800 801 debug_only(mark_scavenge_root_nmethods()); 802 803 nmethod* last = NULL; 804 nmethod* cur = scavenge_root_nmethods(); 805 while (cur != NULL) { 806 nmethod* next = cur->scavenge_root_link(); 807 debug_only(cur->clear_scavenge_root_marked()); 808 assert(cur->scavenge_root_not_marked(), ""); 809 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 810 811 if (!cur->is_zombie() && !cur->is_unloaded() 812 && cur->detect_scavenge_root_oops()) { 813 // Keep it. Advance 'last' to prevent deletion. 814 last = cur; 815 } else { 816 // Prune it from the list, so we don't have to look at it any more. 817 print_trace("prune_scavenge_root", cur); 818 unlink_scavenge_root_nmethod(cur, last); 819 } 820 cur = next; 821 } 822 823 // Check for stray marks. 824 debug_only(verify_perm_nmethods(NULL)); 825 } 826 827 #ifndef PRODUCT 828 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 829 // While we are here, verify the integrity of the list. 830 mark_scavenge_root_nmethods(); 831 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 832 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 833 cur->clear_scavenge_root_marked(); 834 } 835 verify_perm_nmethods(f); 836 } 837 838 // Temporarily mark nmethods that are claimed to be on the scavenge list. 839 void CodeCache::mark_scavenge_root_nmethods() { 840 NMethodIterator iter; 841 while(iter.next_alive()) { 842 nmethod* nm = iter.method(); 843 assert(nm->scavenge_root_not_marked(), "clean state"); 844 if (nm->on_scavenge_root_list()) 845 nm->set_scavenge_root_marked(); 846 } 847 } 848 849 // If the closure is given, run it on the unlisted nmethods. 850 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 851 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 852 NMethodIterator iter; 853 while(iter.next_alive()) { 854 nmethod* nm = iter.method(); 855 bool call_f = (f_or_null != NULL); 856 assert(nm->scavenge_root_not_marked(), "must be already processed"); 857 if (nm->on_scavenge_root_list()) 858 call_f = false; // don't show this one to the client 859 Universe::heap()->verify_nmethod(nm); 860 if (call_f) f_or_null->do_code_blob(nm); 861 } 862 } 863 #endif //PRODUCT 864 865 void CodeCache::verify_clean_inline_caches() { 866 #ifdef ASSERT 867 NMethodIterator iter; 868 while(iter.next_alive()) { 869 nmethod* nm = iter.method(); 870 assert(!nm->is_unloaded(), "Tautology"); 871 nm->verify_clean_inline_caches(); 872 nm->verify(); 873 } 874 #endif 875 } 876 877 void CodeCache::verify_icholder_relocations() { 878 #ifdef ASSERT 879 // make sure that we aren't leaking icholders 880 int count = 0; 881 FOR_ALL_HEAPS(heap) { 882 FOR_ALL_BLOBS(cb, *heap) { 883 CompiledMethod *nm = cb->as_compiled_method_or_null(); 884 if (nm != NULL) { 885 count += nm->verify_icholder_relocations(); 886 } 887 } 888 } 889 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 890 CompiledICHolder::live_count(), "must agree"); 891 #endif 892 } 893 894 void CodeCache::gc_prologue() { 895 } 896 897 void CodeCache::gc_epilogue() { 898 assert_locked_or_safepoint(CodeCache_lock); 899 NOT_DEBUG(if (needs_cache_clean())) { 900 CompiledMethodIterator iter; 901 while(iter.next_alive()) { 902 CompiledMethod* cm = iter.method(); 903 assert(!cm->is_unloaded(), "Tautology"); 904 DEBUG_ONLY(if (needs_cache_clean())) { 905 cm->cleanup_inline_caches(); 906 } 907 DEBUG_ONLY(cm->verify()); 908 DEBUG_ONLY(cm->verify_oop_relocations()); 909 } 910 } 911 912 set_needs_cache_clean(false); 913 prune_scavenge_root_nmethods(); 914 915 verify_icholder_relocations(); 916 } 917 918 void CodeCache::verify_oops() { 919 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 920 VerifyOopClosure voc; 921 NMethodIterator iter; 922 while(iter.next_alive()) { 923 nmethod* nm = iter.method(); 924 nm->oops_do(&voc); 925 nm->verify_oop_relocations(); 926 } 927 } 928 929 int CodeCache::blob_count(int code_blob_type) { 930 CodeHeap* heap = get_code_heap(code_blob_type); 931 return (heap != NULL) ? heap->blob_count() : 0; 932 } 933 934 int CodeCache::blob_count() { 935 int count = 0; 936 FOR_ALL_HEAPS(heap) { 937 count += (*heap)->blob_count(); 938 } 939 return count; 940 } 941 942 int CodeCache::nmethod_count(int code_blob_type) { 943 CodeHeap* heap = get_code_heap(code_blob_type); 944 return (heap != NULL) ? heap->nmethod_count() : 0; 945 } 946 947 int CodeCache::nmethod_count() { 948 int count = 0; 949 FOR_ALL_NMETHOD_HEAPS(heap) { 950 count += (*heap)->nmethod_count(); 951 } 952 return count; 953 } 954 955 int CodeCache::adapter_count(int code_blob_type) { 956 CodeHeap* heap = get_code_heap(code_blob_type); 957 return (heap != NULL) ? heap->adapter_count() : 0; 958 } 959 960 int CodeCache::adapter_count() { 961 int count = 0; 962 FOR_ALL_HEAPS(heap) { 963 count += (*heap)->adapter_count(); 964 } 965 return count; 966 } 967 968 address CodeCache::low_bound(int code_blob_type) { 969 CodeHeap* heap = get_code_heap(code_blob_type); 970 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 971 } 972 973 address CodeCache::high_bound(int code_blob_type) { 974 CodeHeap* heap = get_code_heap(code_blob_type); 975 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 976 } 977 978 size_t CodeCache::capacity() { 979 size_t cap = 0; 980 FOR_ALL_ALLOCABLE_HEAPS(heap) { 981 cap += (*heap)->capacity(); 982 } 983 return cap; 984 } 985 986 size_t CodeCache::unallocated_capacity(int code_blob_type) { 987 CodeHeap* heap = get_code_heap(code_blob_type); 988 return (heap != NULL) ? heap->unallocated_capacity() : 0; 989 } 990 991 size_t CodeCache::unallocated_capacity() { 992 size_t unallocated_cap = 0; 993 FOR_ALL_ALLOCABLE_HEAPS(heap) { 994 unallocated_cap += (*heap)->unallocated_capacity(); 995 } 996 return unallocated_cap; 997 } 998 999 size_t CodeCache::max_capacity() { 1000 size_t max_cap = 0; 1001 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1002 max_cap += (*heap)->max_capacity(); 1003 } 1004 return max_cap; 1005 } 1006 1007 /** 1008 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 1009 * is free, reverse_free_ratio() returns 4. 1010 */ 1011 double CodeCache::reverse_free_ratio(int code_blob_type) { 1012 CodeHeap* heap = get_code_heap(code_blob_type); 1013 if (heap == NULL) { 1014 return 0; 1015 } 1016 1017 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1018 double max_capacity = (double)heap->max_capacity(); 1019 double result = max_capacity / unallocated_capacity; 1020 assert (max_capacity >= unallocated_capacity, "Must be"); 1021 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1022 return result; 1023 } 1024 1025 size_t CodeCache::bytes_allocated_in_freelists() { 1026 size_t allocated_bytes = 0; 1027 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1028 allocated_bytes += (*heap)->allocated_in_freelist(); 1029 } 1030 return allocated_bytes; 1031 } 1032 1033 int CodeCache::allocated_segments() { 1034 int number_of_segments = 0; 1035 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1036 number_of_segments += (*heap)->allocated_segments(); 1037 } 1038 return number_of_segments; 1039 } 1040 1041 size_t CodeCache::freelists_length() { 1042 size_t length = 0; 1043 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1044 length += (*heap)->freelist_length(); 1045 } 1046 return length; 1047 } 1048 1049 void icache_init(); 1050 1051 void CodeCache::initialize() { 1052 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1053 #ifdef COMPILER2 1054 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1055 #endif 1056 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1057 // This was originally just a check of the alignment, causing failure, instead, round 1058 // the code cache to the page size. In particular, Solaris is moving to a larger 1059 // default page size. 1060 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1061 1062 if (SegmentedCodeCache) { 1063 // Use multiple code heaps 1064 initialize_heaps(); 1065 } else { 1066 // Use a single code heap 1067 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1068 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1069 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1070 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1071 add_heap(rs, "CodeCache", CodeBlobType::All); 1072 } 1073 1074 // Initialize ICache flush mechanism 1075 // This service is needed for os::register_code_area 1076 icache_init(); 1077 1078 // Give OS a chance to register generated code area. 1079 // This is used on Windows 64 bit platforms to register 1080 // Structured Exception Handlers for our generated code. 1081 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1082 } 1083 1084 void codeCache_init() { 1085 CodeCache::initialize(); 1086 // Load AOT libraries and add AOT code heaps. 1087 AOTLoader::initialize(); 1088 } 1089 1090 //------------------------------------------------------------------------------------------------ 1091 1092 int CodeCache::number_of_nmethods_with_dependencies() { 1093 return _number_of_nmethods_with_dependencies; 1094 } 1095 1096 void CodeCache::clear_inline_caches() { 1097 assert_locked_or_safepoint(CodeCache_lock); 1098 CompiledMethodIterator iter; 1099 while(iter.next_alive()) { 1100 iter.method()->clear_inline_caches(); 1101 } 1102 } 1103 1104 void CodeCache::cleanup_inline_caches() { 1105 assert_locked_or_safepoint(CodeCache_lock); 1106 NMethodIterator iter; 1107 while(iter.next_alive()) { 1108 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1109 } 1110 } 1111 1112 // Keeps track of time spent for checking dependencies 1113 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1114 1115 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1116 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1117 int number_of_marked_CodeBlobs = 0; 1118 1119 // search the hierarchy looking for nmethods which are affected by the loading of this class 1120 1121 // then search the interfaces this class implements looking for nmethods 1122 // which might be dependent of the fact that an interface only had one 1123 // implementor. 1124 // nmethod::check_all_dependencies works only correctly, if no safepoint 1125 // can happen 1126 NoSafepointVerifier nsv; 1127 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1128 Klass* d = str.klass(); 1129 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1130 } 1131 1132 #ifndef PRODUCT 1133 if (VerifyDependencies) { 1134 // Object pointers are used as unique identifiers for dependency arguments. This 1135 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1136 dependentCheckTime.start(); 1137 nmethod::check_all_dependencies(changes); 1138 dependentCheckTime.stop(); 1139 } 1140 #endif 1141 1142 return number_of_marked_CodeBlobs; 1143 } 1144 1145 CompiledMethod* CodeCache::find_compiled(void* start) { 1146 CodeBlob *cb = find_blob(start); 1147 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1148 return (CompiledMethod*)cb; 1149 } 1150 1151 bool CodeCache::is_far_target(address target) { 1152 #if INCLUDE_AOT 1153 return NativeCall::is_far_call(_low_bound, target) || 1154 NativeCall::is_far_call(_high_bound, target); 1155 #else 1156 return false; 1157 #endif 1158 } 1159 1160 #ifdef HOTSWAP 1161 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1162 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1163 int number_of_marked_CodeBlobs = 0; 1164 1165 // Deoptimize all methods of the evolving class itself 1166 Array<Method*>* old_methods = dependee->methods(); 1167 for (int i = 0; i < old_methods->length(); i++) { 1168 ResourceMark rm; 1169 Method* old_method = old_methods->at(i); 1170 CompiledMethod* nm = old_method->code(); 1171 if (nm != NULL) { 1172 nm->mark_for_deoptimization(); 1173 number_of_marked_CodeBlobs++; 1174 } 1175 } 1176 1177 CompiledMethodIterator iter; 1178 while(iter.next_alive()) { 1179 CompiledMethod* nm = iter.method(); 1180 if (nm->is_marked_for_deoptimization()) { 1181 // ...Already marked in the previous pass; don't count it again. 1182 } else if (nm->is_evol_dependent_on(dependee)) { 1183 ResourceMark rm; 1184 nm->mark_for_deoptimization(); 1185 number_of_marked_CodeBlobs++; 1186 } else { 1187 // flush caches in case they refer to a redefined Method* 1188 nm->clear_inline_caches(); 1189 } 1190 } 1191 1192 return number_of_marked_CodeBlobs; 1193 } 1194 #endif // HOTSWAP 1195 1196 1197 // Deoptimize all methods 1198 void CodeCache::mark_all_nmethods_for_deoptimization() { 1199 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1200 CompiledMethodIterator iter; 1201 while(iter.next_alive()) { 1202 CompiledMethod* nm = iter.method(); 1203 if (!nm->method()->is_method_handle_intrinsic()) { 1204 nm->mark_for_deoptimization(); 1205 } 1206 } 1207 } 1208 1209 int CodeCache::mark_for_deoptimization(Method* dependee) { 1210 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1211 int number_of_marked_CodeBlobs = 0; 1212 1213 CompiledMethodIterator iter; 1214 while(iter.next_alive()) { 1215 CompiledMethod* nm = iter.method(); 1216 if (nm->is_dependent_on_method(dependee)) { 1217 ResourceMark rm; 1218 nm->mark_for_deoptimization(); 1219 number_of_marked_CodeBlobs++; 1220 } 1221 } 1222 1223 return number_of_marked_CodeBlobs; 1224 } 1225 1226 void CodeCache::make_marked_nmethods_not_entrant() { 1227 assert_locked_or_safepoint(CodeCache_lock); 1228 CompiledMethodIterator iter; 1229 while(iter.next_alive()) { 1230 CompiledMethod* nm = iter.method(); 1231 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1232 nm->make_not_entrant(); 1233 } 1234 } 1235 } 1236 1237 // Flushes compiled methods dependent on dependee. 1238 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1239 assert_lock_strong(Compile_lock); 1240 1241 if (number_of_nmethods_with_dependencies() == 0) return; 1242 1243 // CodeCache can only be updated by a thread_in_VM and they will all be 1244 // stopped during the safepoint so CodeCache will be safe to update without 1245 // holding the CodeCache_lock. 1246 1247 KlassDepChange changes(dependee); 1248 1249 // Compute the dependent nmethods 1250 if (mark_for_deoptimization(changes) > 0) { 1251 // At least one nmethod has been marked for deoptimization 1252 VM_Deoptimize op; 1253 VMThread::execute(&op); 1254 } 1255 } 1256 1257 #ifdef HOTSWAP 1258 // Flushes compiled methods dependent on dependee in the evolutionary sense 1259 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1260 // --- Compile_lock is not held. However we are at a safepoint. 1261 assert_locked_or_safepoint(Compile_lock); 1262 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1263 1264 // CodeCache can only be updated by a thread_in_VM and they will all be 1265 // stopped during the safepoint so CodeCache will be safe to update without 1266 // holding the CodeCache_lock. 1267 1268 // Compute the dependent nmethods 1269 if (mark_for_evol_deoptimization(ev_k) > 0) { 1270 // At least one nmethod has been marked for deoptimization 1271 1272 // All this already happens inside a VM_Operation, so we'll do all the work here. 1273 // Stuff copied from VM_Deoptimize and modified slightly. 1274 1275 // We do not want any GCs to happen while we are in the middle of this VM operation 1276 ResourceMark rm; 1277 DeoptimizationMarker dm; 1278 1279 // Deoptimize all activations depending on marked nmethods 1280 Deoptimization::deoptimize_dependents(); 1281 1282 // Make the dependent methods not entrant 1283 make_marked_nmethods_not_entrant(); 1284 } 1285 } 1286 #endif // HOTSWAP 1287 1288 1289 // Flushes compiled methods dependent on dependee 1290 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { 1291 // --- Compile_lock is not held. However we are at a safepoint. 1292 assert_locked_or_safepoint(Compile_lock); 1293 1294 // CodeCache can only be updated by a thread_in_VM and they will all be 1295 // stopped dring the safepoint so CodeCache will be safe to update without 1296 // holding the CodeCache_lock. 1297 1298 // Compute the dependent nmethods 1299 if (mark_for_deoptimization(m_h()) > 0) { 1300 // At least one nmethod has been marked for deoptimization 1301 1302 // All this already happens inside a VM_Operation, so we'll do all the work here. 1303 // Stuff copied from VM_Deoptimize and modified slightly. 1304 1305 // We do not want any GCs to happen while we are in the middle of this VM operation 1306 ResourceMark rm; 1307 DeoptimizationMarker dm; 1308 1309 // Deoptimize all activations depending on marked nmethods 1310 Deoptimization::deoptimize_dependents(); 1311 1312 // Make the dependent methods not entrant 1313 make_marked_nmethods_not_entrant(); 1314 } 1315 } 1316 1317 void CodeCache::verify() { 1318 assert_locked_or_safepoint(CodeCache_lock); 1319 FOR_ALL_HEAPS(heap) { 1320 (*heap)->verify(); 1321 FOR_ALL_BLOBS(cb, *heap) { 1322 if (cb->is_alive()) { 1323 cb->verify(); 1324 } 1325 } 1326 } 1327 } 1328 1329 // A CodeHeap is full. Print out warning and report event. 1330 PRAGMA_DIAG_PUSH 1331 PRAGMA_FORMAT_NONLITERAL_IGNORED 1332 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1333 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1334 CodeHeap* heap = get_code_heap(code_blob_type); 1335 assert(heap != NULL, "heap is null"); 1336 1337 if ((heap->full_count() == 0) || print) { 1338 // Not yet reported for this heap, report 1339 if (SegmentedCodeCache) { 1340 ResourceMark rm; 1341 stringStream msg1_stream, msg2_stream; 1342 msg1_stream.print("%s is full. Compiler has been disabled.", 1343 get_code_heap_name(code_blob_type)); 1344 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1345 get_code_heap_flag_name(code_blob_type)); 1346 const char *msg1 = msg1_stream.as_string(); 1347 const char *msg2 = msg2_stream.as_string(); 1348 1349 log_warning(codecache)(msg1); 1350 log_warning(codecache)(msg2); 1351 warning(msg1); 1352 warning(msg2); 1353 } else { 1354 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1355 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1356 1357 log_warning(codecache)(msg1); 1358 log_warning(codecache)(msg2); 1359 warning(msg1); 1360 warning(msg2); 1361 } 1362 ResourceMark rm; 1363 stringStream s; 1364 // Dump code cache into a buffer before locking the tty, 1365 { 1366 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1367 print_summary(&s); 1368 } 1369 { 1370 ttyLocker ttyl; 1371 tty->print("%s", s.as_string()); 1372 } 1373 1374 if (heap->full_count() == 0) { 1375 LogTarget(Debug, codecache) lt; 1376 if (lt.is_enabled()) { 1377 CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot! 1378 } 1379 } 1380 } 1381 1382 heap->report_full(); 1383 1384 EventCodeCacheFull event; 1385 if (event.should_commit()) { 1386 event.set_codeBlobType((u1)code_blob_type); 1387 event.set_startAddress((u8)heap->low_boundary()); 1388 event.set_commitedTopAddress((u8)heap->high()); 1389 event.set_reservedTopAddress((u8)heap->high_boundary()); 1390 event.set_entryCount(heap->blob_count()); 1391 event.set_methodCount(heap->nmethod_count()); 1392 event.set_adaptorCount(heap->adapter_count()); 1393 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1394 event.set_fullCount(heap->full_count()); 1395 event.commit(); 1396 } 1397 } 1398 PRAGMA_DIAG_POP 1399 1400 void CodeCache::print_memory_overhead() { 1401 size_t wasted_bytes = 0; 1402 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1403 CodeHeap* curr_heap = *heap; 1404 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1405 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1406 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1407 } 1408 } 1409 // Print bytes that are allocated in the freelist 1410 ttyLocker ttl; 1411 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1412 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1413 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1414 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1415 } 1416 1417 //------------------------------------------------------------------------------------------------ 1418 // Non-product version 1419 1420 #ifndef PRODUCT 1421 1422 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1423 if (PrintCodeCache2) { // Need to add a new flag 1424 ResourceMark rm; 1425 if (size == 0) size = cb->size(); 1426 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1427 } 1428 } 1429 1430 void CodeCache::print_internals() { 1431 int nmethodCount = 0; 1432 int runtimeStubCount = 0; 1433 int adapterCount = 0; 1434 int deoptimizationStubCount = 0; 1435 int uncommonTrapStubCount = 0; 1436 int bufferBlobCount = 0; 1437 int total = 0; 1438 int nmethodAlive = 0; 1439 int nmethodNotEntrant = 0; 1440 int nmethodZombie = 0; 1441 int nmethodUnloaded = 0; 1442 int nmethodJava = 0; 1443 int nmethodNative = 0; 1444 int max_nm_size = 0; 1445 ResourceMark rm; 1446 1447 int i = 0; 1448 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1449 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1450 tty->print_cr("-- %s --", (*heap)->name()); 1451 } 1452 FOR_ALL_BLOBS(cb, *heap) { 1453 total++; 1454 if (cb->is_nmethod()) { 1455 nmethod* nm = (nmethod*)cb; 1456 1457 if (Verbose && nm->method() != NULL) { 1458 ResourceMark rm; 1459 char *method_name = nm->method()->name_and_sig_as_C_string(); 1460 tty->print("%s", method_name); 1461 if(nm->is_alive()) { tty->print_cr(" alive"); } 1462 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1463 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1464 } 1465 1466 nmethodCount++; 1467 1468 if(nm->is_alive()) { nmethodAlive++; } 1469 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1470 if(nm->is_zombie()) { nmethodZombie++; } 1471 if(nm->is_unloaded()) { nmethodUnloaded++; } 1472 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1473 1474 if(nm->method() != NULL && nm->is_java_method()) { 1475 nmethodJava++; 1476 max_nm_size = MAX2(max_nm_size, nm->size()); 1477 } 1478 } else if (cb->is_runtime_stub()) { 1479 runtimeStubCount++; 1480 } else if (cb->is_deoptimization_stub()) { 1481 deoptimizationStubCount++; 1482 } else if (cb->is_uncommon_trap_stub()) { 1483 uncommonTrapStubCount++; 1484 } else if (cb->is_adapter_blob()) { 1485 adapterCount++; 1486 } else if (cb->is_buffer_blob()) { 1487 bufferBlobCount++; 1488 } 1489 } 1490 } 1491 1492 int bucketSize = 512; 1493 int bucketLimit = max_nm_size / bucketSize + 1; 1494 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1495 memset(buckets, 0, sizeof(int) * bucketLimit); 1496 1497 NMethodIterator iter; 1498 while(iter.next()) { 1499 nmethod* nm = iter.method(); 1500 if(nm->method() != NULL && nm->is_java_method()) { 1501 buckets[nm->size() / bucketSize]++; 1502 } 1503 } 1504 1505 tty->print_cr("Code Cache Entries (total of %d)",total); 1506 tty->print_cr("-------------------------------------------------"); 1507 tty->print_cr("nmethods: %d",nmethodCount); 1508 tty->print_cr("\talive: %d",nmethodAlive); 1509 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1510 tty->print_cr("\tzombie: %d",nmethodZombie); 1511 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1512 tty->print_cr("\tjava: %d",nmethodJava); 1513 tty->print_cr("\tnative: %d",nmethodNative); 1514 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1515 tty->print_cr("adapters: %d",adapterCount); 1516 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1517 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1518 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1519 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1520 tty->print_cr("-------------------------------------------------"); 1521 1522 for(int i=0; i<bucketLimit; i++) { 1523 if(buckets[i] != 0) { 1524 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1525 tty->fill_to(40); 1526 tty->print_cr("%d",buckets[i]); 1527 } 1528 } 1529 1530 FREE_C_HEAP_ARRAY(int, buckets); 1531 print_memory_overhead(); 1532 } 1533 1534 #endif // !PRODUCT 1535 1536 void CodeCache::print() { 1537 print_summary(tty); 1538 1539 #ifndef PRODUCT 1540 if (!Verbose) return; 1541 1542 CodeBlob_sizes live; 1543 CodeBlob_sizes dead; 1544 1545 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1546 FOR_ALL_BLOBS(cb, *heap) { 1547 if (!cb->is_alive()) { 1548 dead.add(cb); 1549 } else { 1550 live.add(cb); 1551 } 1552 } 1553 } 1554 1555 tty->print_cr("CodeCache:"); 1556 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1557 1558 if (!live.is_empty()) { 1559 live.print("live"); 1560 } 1561 if (!dead.is_empty()) { 1562 dead.print("dead"); 1563 } 1564 1565 if (WizardMode) { 1566 // print the oop_map usage 1567 int code_size = 0; 1568 int number_of_blobs = 0; 1569 int number_of_oop_maps = 0; 1570 int map_size = 0; 1571 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1572 FOR_ALL_BLOBS(cb, *heap) { 1573 if (cb->is_alive()) { 1574 number_of_blobs++; 1575 code_size += cb->code_size(); 1576 ImmutableOopMapSet* set = cb->oop_maps(); 1577 if (set != NULL) { 1578 number_of_oop_maps += set->count(); 1579 map_size += set->nr_of_bytes(); 1580 } 1581 } 1582 } 1583 } 1584 tty->print_cr("OopMaps"); 1585 tty->print_cr(" #blobs = %d", number_of_blobs); 1586 tty->print_cr(" code size = %d", code_size); 1587 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1588 tty->print_cr(" map size = %d", map_size); 1589 } 1590 1591 #endif // !PRODUCT 1592 } 1593 1594 void CodeCache::print_summary(outputStream* st, bool detailed) { 1595 FOR_ALL_HEAPS(heap_iterator) { 1596 CodeHeap* heap = (*heap_iterator); 1597 size_t total = (heap->high_boundary() - heap->low_boundary()); 1598 if (_heaps->length() >= 1) { 1599 st->print("%s:", heap->name()); 1600 } else { 1601 st->print("CodeCache:"); 1602 } 1603 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1604 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1605 total/K, (total - heap->unallocated_capacity())/K, 1606 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1607 1608 if (detailed) { 1609 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1610 p2i(heap->low_boundary()), 1611 p2i(heap->high()), 1612 p2i(heap->high_boundary())); 1613 } 1614 } 1615 1616 if (detailed) { 1617 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1618 " adapters=" UINT32_FORMAT, 1619 blob_count(), nmethod_count(), adapter_count()); 1620 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1621 "enabled" : Arguments::mode() == Arguments::_int ? 1622 "disabled (interpreter mode)" : 1623 "disabled (not enough contiguous free space left)"); 1624 } 1625 } 1626 1627 void CodeCache::print_codelist(outputStream* st) { 1628 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1629 1630 CompiledMethodIterator iter; 1631 while (iter.next_alive()) { 1632 CompiledMethod* cm = iter.method(); 1633 ResourceMark rm; 1634 char* method_name = cm->method()->name_and_sig_as_C_string(); 1635 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1636 cm->compile_id(), cm->comp_level(), cm->get_state(), 1637 method_name, 1638 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1639 } 1640 } 1641 1642 void CodeCache::print_layout(outputStream* st) { 1643 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1644 ResourceMark rm; 1645 print_summary(st, true); 1646 } 1647 1648 void CodeCache::log_state(outputStream* st) { 1649 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1650 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1651 blob_count(), nmethod_count(), adapter_count(), 1652 unallocated_capacity()); 1653 } 1654 1655 //---< BEGIN >--- CodeHeap State Analytics. 1656 1657 void CodeCache::aggregate(outputStream *out, const char* granularity) { 1658 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1659 CodeHeapState::aggregate(out, (*heap), granularity); 1660 } 1661 } 1662 1663 void CodeCache::discard(outputStream *out) { 1664 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1665 CodeHeapState::discard(out, (*heap)); 1666 } 1667 } 1668 1669 void CodeCache::print_usedSpace(outputStream *out) { 1670 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1671 CodeHeapState::print_usedSpace(out, (*heap)); 1672 } 1673 } 1674 1675 void CodeCache::print_freeSpace(outputStream *out) { 1676 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1677 CodeHeapState::print_freeSpace(out, (*heap)); 1678 } 1679 } 1680 1681 void CodeCache::print_count(outputStream *out) { 1682 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1683 CodeHeapState::print_count(out, (*heap)); 1684 } 1685 } 1686 1687 void CodeCache::print_space(outputStream *out) { 1688 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1689 CodeHeapState::print_space(out, (*heap)); 1690 } 1691 } 1692 1693 void CodeCache::print_age(outputStream *out) { 1694 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1695 CodeHeapState::print_age(out, (*heap)); 1696 } 1697 } 1698 1699 void CodeCache::print_names(outputStream *out) { 1700 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1701 CodeHeapState::print_names(out, (*heap)); 1702 } 1703 } 1704 //---< END >--- CodeHeap State Analytics.