1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "gc/shared/gcLocker.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/verifyOopClosure.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/compilationPolicy.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/icache.hpp" 48 #include "runtime/java.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/sweeper.hpp" 51 #include "services/memoryService.hpp" 52 #include "trace/tracing.hpp" 53 #include "utilities/vmError.hpp" 54 #include "utilities/xmlstream.hpp" 55 #ifdef COMPILER1 56 #include "c1/c1_Compilation.hpp" 57 #include "c1/c1_Compiler.hpp" 58 #endif 59 #ifdef COMPILER2 60 #include "opto/c2compiler.hpp" 61 #include "opto/compile.hpp" 62 #include "opto/node.hpp" 63 #endif 64 65 // Helper class for printing in CodeCache 66 class CodeBlob_sizes { 67 private: 68 int count; 69 int total_size; 70 int header_size; 71 int code_size; 72 int stub_size; 73 int relocation_size; 74 int scopes_oop_size; 75 int scopes_metadata_size; 76 int scopes_data_size; 77 int scopes_pcs_size; 78 79 public: 80 CodeBlob_sizes() { 81 count = 0; 82 total_size = 0; 83 header_size = 0; 84 code_size = 0; 85 stub_size = 0; 86 relocation_size = 0; 87 scopes_oop_size = 0; 88 scopes_metadata_size = 0; 89 scopes_data_size = 0; 90 scopes_pcs_size = 0; 91 } 92 93 int total() { return total_size; } 94 bool is_empty() { return count == 0; } 95 96 void print(const char* title) { 97 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 98 count, 99 title, 100 (int)(total() / K), 101 header_size * 100 / total_size, 102 relocation_size * 100 / total_size, 103 code_size * 100 / total_size, 104 stub_size * 100 / total_size, 105 scopes_oop_size * 100 / total_size, 106 scopes_metadata_size * 100 / total_size, 107 scopes_data_size * 100 / total_size, 108 scopes_pcs_size * 100 / total_size); 109 } 110 111 void add(CodeBlob* cb) { 112 count++; 113 total_size += cb->size(); 114 header_size += cb->header_size(); 115 relocation_size += cb->relocation_size(); 116 if (cb->is_nmethod()) { 117 nmethod* nm = cb->as_nmethod_or_null(); 118 code_size += nm->insts_size(); 119 stub_size += nm->stub_size(); 120 121 scopes_oop_size += nm->oops_size(); 122 scopes_metadata_size += nm->metadata_size(); 123 scopes_data_size += nm->scopes_data_size(); 124 scopes_pcs_size += nm->scopes_pcs_size(); 125 } else { 126 code_size += cb->code_size(); 127 } 128 } 129 }; 130 131 // Iterate over all CodeHeaps 132 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 133 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 134 135 // Iterate over all CodeBlobs (cb) on the given CodeHeap 136 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 137 138 address CodeCache::_low_bound = 0; 139 address CodeCache::_high_bound = 0; 140 int CodeCache::_number_of_nmethods_with_dependencies = 0; 141 bool CodeCache::_needs_cache_clean = false; 142 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 143 144 // Initialize array of CodeHeaps 145 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 146 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 147 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 148 149 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 150 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 151 // Prepare error message 152 const char* error = "Invalid code heap sizes"; 153 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 154 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 155 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 156 157 if (total_size > cache_size) { 158 // Some code heap sizes were explicitly set: total_size must be <= cache_size 159 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 160 vm_exit_during_initialization(error, message); 161 } else if (all_set && total_size != cache_size) { 162 // All code heap sizes were explicitly set: total_size must equal cache_size 163 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 164 vm_exit_during_initialization(error, message); 165 } 166 } 167 168 void CodeCache::initialize_heaps() { 169 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 170 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 171 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 172 size_t min_size = os::vm_page_size(); 173 size_t cache_size = ReservedCodeCacheSize; 174 size_t non_nmethod_size = NonNMethodCodeHeapSize; 175 size_t profiled_size = ProfiledCodeHeapSize; 176 size_t non_profiled_size = NonProfiledCodeHeapSize; 177 // Check if total size set via command line flags exceeds the reserved size 178 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 179 (profiled_set ? profiled_size : min_size), 180 (non_profiled_set ? non_profiled_size : min_size), 181 cache_size, 182 non_nmethod_set && profiled_set && non_profiled_set); 183 184 // Determine size of compiler buffers 185 size_t code_buffers_size = 0; 186 #ifdef COMPILER1 187 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 188 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 189 code_buffers_size += c1_count * Compiler::code_buffer_size(); 190 #endif 191 #ifdef COMPILER2 192 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 193 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 194 // Initial size of constant table (this may be increased if a compiled method needs more space) 195 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 196 #endif 197 198 // Increase default non_nmethod_size to account for compiler buffers 199 if (!non_nmethod_set) { 200 non_nmethod_size += code_buffers_size; 201 } 202 // Calculate default CodeHeap sizes if not set by user 203 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 204 // Check if we have enough space for the non-nmethod code heap 205 if (cache_size > non_nmethod_size) { 206 // Use the default value for non_nmethod_size and one half of the 207 // remaining size for non-profiled and one half for profiled methods 208 size_t remaining_size = cache_size - non_nmethod_size; 209 profiled_size = remaining_size / 2; 210 non_profiled_size = remaining_size - profiled_size; 211 } else { 212 // Use all space for the non-nmethod heap and set other heaps to minimal size 213 non_nmethod_size = cache_size - 2 * min_size; 214 profiled_size = min_size; 215 non_profiled_size = min_size; 216 } 217 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 218 // The user explicitly set some code heap sizes. Increase or decrease the (default) 219 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 220 // code heap sizes and then only change non-nmethod code heap size if still necessary. 221 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 222 if (non_profiled_set) { 223 if (!profiled_set) { 224 // Adapt size of profiled code heap 225 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 226 // Not enough space available, set to minimum size 227 diff_size += profiled_size - min_size; 228 profiled_size = min_size; 229 } else { 230 profiled_size += diff_size; 231 diff_size = 0; 232 } 233 } 234 } else if (profiled_set) { 235 // Adapt size of non-profiled code heap 236 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 237 // Not enough space available, set to minimum size 238 diff_size += non_profiled_size - min_size; 239 non_profiled_size = min_size; 240 } else { 241 non_profiled_size += diff_size; 242 diff_size = 0; 243 } 244 } else if (non_nmethod_set) { 245 // Distribute remaining size between profiled and non-profiled code heaps 246 diff_size = cache_size - non_nmethod_size; 247 profiled_size = diff_size / 2; 248 non_profiled_size = diff_size - profiled_size; 249 diff_size = 0; 250 } 251 if (diff_size != 0) { 252 // Use non-nmethod code heap for remaining space requirements 253 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 254 non_nmethod_size += diff_size; 255 } 256 } 257 258 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 259 if(!heap_available(CodeBlobType::MethodProfiled)) { 260 non_profiled_size += profiled_size; 261 profiled_size = 0; 262 } 263 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 264 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 265 non_nmethod_size += non_profiled_size; 266 non_profiled_size = 0; 267 } 268 // Make sure we have enough space for VM internal code 269 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 270 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 271 vm_exit_during_initialization(err_msg( 272 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 273 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 274 } 275 276 // Verify sizes and update flag values 277 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 278 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 279 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 280 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 281 282 // Align CodeHeaps 283 size_t alignment = heap_alignment(); 284 non_nmethod_size = align_size_up(non_nmethod_size, alignment); 285 profiled_size = align_size_down(profiled_size, alignment); 286 287 // Reserve one continuous chunk of memory for CodeHeaps and split it into 288 // parts for the individual heaps. The memory layout looks like this: 289 // ---------- high ----------- 290 // Non-profiled nmethods 291 // Profiled nmethods 292 // Non-nmethods 293 // ---------- low ------------ 294 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 295 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 296 ReservedSpace rest = rs.last_part(non_nmethod_size); 297 ReservedSpace profiled_space = rest.first_part(profiled_size); 298 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 299 300 // Non-nmethods (stubs, adapters, ...) 301 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 302 // Tier 2 and tier 3 (profiled) methods 303 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 304 // Tier 1 and tier 4 (non-profiled) methods and native methods 305 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 306 } 307 308 size_t CodeCache::heap_alignment() { 309 // If large page support is enabled, align code heaps according to large 310 // page size to make sure that code cache is covered by large pages. 311 const size_t page_size = os::can_execute_large_page_memory() ? 312 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 313 os::vm_page_size(); 314 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 315 } 316 317 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 318 // Determine alignment 319 const size_t page_size = os::can_execute_large_page_memory() ? 320 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 321 os::page_size_for_region_aligned(size, 8)) : 322 os::vm_page_size(); 323 const size_t granularity = os::vm_allocation_granularity(); 324 const size_t r_align = MAX2(page_size, granularity); 325 const size_t r_size = align_size_up(size, r_align); 326 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 327 MAX2(page_size, granularity); 328 329 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 330 331 if (!rs.is_reserved()) { 332 vm_exit_during_initialization("Could not reserve enough space for code cache"); 333 } 334 335 // Initialize bounds 336 _low_bound = (address)rs.base(); 337 _high_bound = _low_bound + rs.size(); 338 339 return rs; 340 } 341 342 bool CodeCache::heap_available(int code_blob_type) { 343 if (!SegmentedCodeCache) { 344 // No segmentation: use a single code heap 345 return (code_blob_type == CodeBlobType::All); 346 } else if (Arguments::is_interpreter_only()) { 347 // Interpreter only: we don't need any method code heaps 348 return (code_blob_type == CodeBlobType::NonNMethod); 349 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 350 // Tiered compilation: use all code heaps 351 return (code_blob_type < CodeBlobType::All); 352 } else { 353 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 354 return (code_blob_type == CodeBlobType::NonNMethod) || 355 (code_blob_type == CodeBlobType::MethodNonProfiled); 356 } 357 } 358 359 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 360 switch(code_blob_type) { 361 case CodeBlobType::NonNMethod: 362 return "NonNMethodCodeHeapSize"; 363 break; 364 case CodeBlobType::MethodNonProfiled: 365 return "NonProfiledCodeHeapSize"; 366 break; 367 case CodeBlobType::MethodProfiled: 368 return "ProfiledCodeHeapSize"; 369 break; 370 } 371 ShouldNotReachHere(); 372 return NULL; 373 } 374 375 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 376 if (lhs->code_blob_type() == rhs->code_blob_type()) { 377 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 378 } else { 379 return lhs->code_blob_type() - rhs->code_blob_type(); 380 } 381 } 382 383 void CodeCache::add_heap(CodeHeap* heap) { 384 assert(!Universe::is_fully_initialized(), "late heap addition?"); 385 386 _heaps->insert_sorted<code_heap_compare>(heap); 387 388 int type = heap->code_blob_type(); 389 if (code_blob_type_accepts_compiled(type)) { 390 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 391 } 392 if (code_blob_type_accepts_nmethod(type)) { 393 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 394 } 395 } 396 397 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 398 // Check if heap is needed 399 if (!heap_available(code_blob_type)) { 400 return; 401 } 402 403 // Create CodeHeap 404 CodeHeap* heap = new CodeHeap(name, code_blob_type); 405 add_heap(heap); 406 407 // Reserve Space 408 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 409 size_initial = round_to(size_initial, os::vm_page_size()); 410 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 411 vm_exit_during_initialization("Could not reserve enough space for code cache"); 412 } 413 414 // Register the CodeHeap 415 MemoryService::add_code_heap_memory_pool(heap, name); 416 } 417 418 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 419 assert(cb != NULL, "CodeBlob is null"); 420 FOR_ALL_HEAPS(heap) { 421 if ((*heap)->contains_blob(cb)) { 422 return *heap; 423 } 424 } 425 ShouldNotReachHere(); 426 return NULL; 427 } 428 429 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 430 FOR_ALL_HEAPS(heap) { 431 if ((*heap)->accepts(code_blob_type)) { 432 return *heap; 433 } 434 } 435 return NULL; 436 } 437 438 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 439 assert_locked_or_safepoint(CodeCache_lock); 440 assert(heap != NULL, "heap is null"); 441 return (CodeBlob*)heap->first(); 442 } 443 444 CodeBlob* CodeCache::first_blob(int code_blob_type) { 445 if (heap_available(code_blob_type)) { 446 return first_blob(get_code_heap(code_blob_type)); 447 } else { 448 return NULL; 449 } 450 } 451 452 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 453 assert_locked_or_safepoint(CodeCache_lock); 454 assert(heap != NULL, "heap is null"); 455 return (CodeBlob*)heap->next(cb); 456 } 457 458 /** 459 * Do not seize the CodeCache lock here--if the caller has not 460 * already done so, we are going to lose bigtime, since the code 461 * cache will contain a garbage CodeBlob until the caller can 462 * run the constructor for the CodeBlob subclass he is busy 463 * instantiating. 464 */ 465 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 466 // Possibly wakes up the sweeper thread. 467 NMethodSweeper::notify(code_blob_type); 468 assert_locked_or_safepoint(CodeCache_lock); 469 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 470 if (size <= 0) { 471 return NULL; 472 } 473 CodeBlob* cb = NULL; 474 475 // Get CodeHeap for the given CodeBlobType 476 CodeHeap* heap = get_code_heap(code_blob_type); 477 assert(heap != NULL, "heap is null"); 478 479 while (true) { 480 cb = (CodeBlob*)heap->allocate(size); 481 if (cb != NULL) break; 482 if (!heap->expand_by(CodeCacheExpansionSize)) { 483 // Save original type for error reporting 484 if (orig_code_blob_type == CodeBlobType::All) { 485 orig_code_blob_type = code_blob_type; 486 } 487 // Expansion failed 488 if (SegmentedCodeCache) { 489 // Fallback solution: Try to store code in another code heap. 490 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 491 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 492 // and force stack scanning if less than 10% of the code heap are free. 493 int type = code_blob_type; 494 switch (type) { 495 case CodeBlobType::NonNMethod: 496 type = CodeBlobType::MethodNonProfiled; 497 break; 498 case CodeBlobType::MethodNonProfiled: 499 type = CodeBlobType::MethodProfiled; 500 break; 501 case CodeBlobType::MethodProfiled: 502 // Avoid loop if we already tried that code heap 503 if (type == orig_code_blob_type) { 504 type = CodeBlobType::MethodNonProfiled; 505 } 506 break; 507 } 508 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 509 if (PrintCodeCacheExtension) { 510 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 511 heap->name(), get_code_heap(type)->name()); 512 } 513 return allocate(size, type, orig_code_blob_type); 514 } 515 } 516 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 517 CompileBroker::handle_full_code_cache(orig_code_blob_type); 518 return NULL; 519 } 520 if (PrintCodeCacheExtension) { 521 ResourceMark rm; 522 if (_nmethod_heaps->length() >= 1) { 523 tty->print("%s", heap->name()); 524 } else { 525 tty->print("CodeCache"); 526 } 527 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 528 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 529 (address)heap->high() - (address)heap->low_boundary()); 530 } 531 } 532 print_trace("allocation", cb, size); 533 return cb; 534 } 535 536 void CodeCache::free(CodeBlob* cb) { 537 assert_locked_or_safepoint(CodeCache_lock); 538 CodeHeap* heap = get_code_heap(cb); 539 print_trace("free", cb); 540 if (cb->is_nmethod()) { 541 heap->set_nmethod_count(heap->nmethod_count() - 1); 542 if (((nmethod *)cb)->has_dependencies()) { 543 _number_of_nmethods_with_dependencies--; 544 } 545 } 546 if (cb->is_adapter_blob()) { 547 heap->set_adapter_count(heap->adapter_count() - 1); 548 } 549 550 // Get heap for given CodeBlob and deallocate 551 get_code_heap(cb)->deallocate(cb); 552 553 assert(heap->blob_count() >= 0, "sanity check"); 554 } 555 556 void CodeCache::commit(CodeBlob* cb) { 557 // this is called by nmethod::nmethod, which must already own CodeCache_lock 558 assert_locked_or_safepoint(CodeCache_lock); 559 CodeHeap* heap = get_code_heap(cb); 560 if (cb->is_nmethod()) { 561 heap->set_nmethod_count(heap->nmethod_count() + 1); 562 if (((nmethod *)cb)->has_dependencies()) { 563 _number_of_nmethods_with_dependencies++; 564 } 565 } 566 if (cb->is_adapter_blob()) { 567 heap->set_adapter_count(heap->adapter_count() + 1); 568 } 569 570 // flush the hardware I-cache 571 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 572 } 573 574 bool CodeCache::contains(void *p) { 575 // S390 uses contains() in current_frame(), which is used before 576 // code cache initialization if NativeMemoryTracking=detail is set. 577 S390_ONLY(if (_heaps == NULL) return false;) 578 // It should be ok to call contains without holding a lock. 579 FOR_ALL_HEAPS(heap) { 580 if ((*heap)->contains(p)) { 581 return true; 582 } 583 } 584 return false; 585 } 586 587 bool CodeCache::contains(nmethod *nm) { 588 return contains((void *)nm); 589 } 590 591 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 592 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 593 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 594 CodeBlob* CodeCache::find_blob(void* start) { 595 CodeBlob* result = find_blob_unsafe(start); 596 // We could potentially look up non_entrant methods 597 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 598 return result; 599 } 600 601 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 602 // what you are doing) 603 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 604 // NMT can walk the stack before code cache is created 605 if (_heaps != NULL && !_heaps->is_empty()) { 606 FOR_ALL_HEAPS(heap) { 607 CodeBlob* result = (*heap)->find_blob_unsafe(start); 608 if (result != NULL) { 609 return result; 610 } 611 } 612 } 613 return NULL; 614 } 615 616 nmethod* CodeCache::find_nmethod(void* start) { 617 CodeBlob* cb = find_blob(start); 618 assert(cb->is_nmethod(), "did not find an nmethod"); 619 return (nmethod*)cb; 620 } 621 622 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 623 assert_locked_or_safepoint(CodeCache_lock); 624 FOR_ALL_HEAPS(heap) { 625 FOR_ALL_BLOBS(cb, *heap) { 626 f(cb); 627 } 628 } 629 } 630 631 void CodeCache::nmethods_do(void f(nmethod* nm)) { 632 assert_locked_or_safepoint(CodeCache_lock); 633 NMethodIterator iter; 634 while(iter.next()) { 635 f(iter.method()); 636 } 637 } 638 639 void CodeCache::metadata_do(void f(Metadata* m)) { 640 assert_locked_or_safepoint(CodeCache_lock); 641 NMethodIterator iter; 642 while(iter.next_alive()) { 643 iter.method()->metadata_do(f); 644 } 645 AOTLoader::metadata_do(f); 646 } 647 648 int CodeCache::alignment_unit() { 649 return (int)_heaps->first()->alignment_unit(); 650 } 651 652 int CodeCache::alignment_offset() { 653 return (int)_heaps->first()->alignment_offset(); 654 } 655 656 // Mark nmethods for unloading if they contain otherwise unreachable oops. 657 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 658 assert_locked_or_safepoint(CodeCache_lock); 659 CompiledMethodIterator iter; 660 while(iter.next_alive()) { 661 iter.method()->do_unloading(is_alive, unloading_occurred); 662 } 663 } 664 665 void CodeCache::blobs_do(CodeBlobClosure* f) { 666 assert_locked_or_safepoint(CodeCache_lock); 667 FOR_ALL_NMETHOD_HEAPS(heap) { 668 FOR_ALL_BLOBS(cb, *heap) { 669 if (cb->is_alive()) { 670 f->do_code_blob(cb); 671 #ifdef ASSERT 672 if (cb->is_nmethod()) 673 ((nmethod*)cb)->verify_scavenge_root_oops(); 674 #endif //ASSERT 675 } 676 } 677 } 678 } 679 680 // Walk the list of methods which might contain non-perm oops. 681 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 682 assert_locked_or_safepoint(CodeCache_lock); 683 684 if (UseG1GC) { 685 return; 686 } 687 688 const bool fix_relocations = f->fix_relocations(); 689 debug_only(mark_scavenge_root_nmethods()); 690 691 nmethod* prev = NULL; 692 nmethod* cur = scavenge_root_nmethods(); 693 while (cur != NULL) { 694 debug_only(cur->clear_scavenge_root_marked()); 695 assert(cur->scavenge_root_not_marked(), ""); 696 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 697 698 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 699 if (TraceScavenge) { 700 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 701 } 702 if (is_live) { 703 // Perform cur->oops_do(f), maybe just once per nmethod. 704 f->do_code_blob(cur); 705 } 706 nmethod* const next = cur->scavenge_root_link(); 707 // The scavengable nmethod list must contain all methods with scavengable 708 // oops. It is safe to include more nmethod on the list, but we do not 709 // expect any live non-scavengable nmethods on the list. 710 if (fix_relocations) { 711 if (!is_live || !cur->detect_scavenge_root_oops()) { 712 unlink_scavenge_root_nmethod(cur, prev); 713 } else { 714 prev = cur; 715 } 716 } 717 cur = next; 718 } 719 720 // Check for stray marks. 721 debug_only(verify_perm_nmethods(NULL)); 722 } 723 724 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 725 assert_locked_or_safepoint(CodeCache_lock); 726 727 if (UseG1GC) { 728 return; 729 } 730 731 nm->set_on_scavenge_root_list(); 732 nm->set_scavenge_root_link(_scavenge_root_nmethods); 733 set_scavenge_root_nmethods(nm); 734 print_trace("add_scavenge_root", nm); 735 } 736 737 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 738 assert_locked_or_safepoint(CodeCache_lock); 739 740 assert((prev == NULL && scavenge_root_nmethods() == nm) || 741 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 742 743 assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list"); 744 745 print_trace("unlink_scavenge_root", nm); 746 if (prev == NULL) { 747 set_scavenge_root_nmethods(nm->scavenge_root_link()); 748 } else { 749 prev->set_scavenge_root_link(nm->scavenge_root_link()); 750 } 751 nm->set_scavenge_root_link(NULL); 752 nm->clear_on_scavenge_root_list(); 753 } 754 755 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 756 assert_locked_or_safepoint(CodeCache_lock); 757 758 if (UseG1GC) { 759 return; 760 } 761 762 print_trace("drop_scavenge_root", nm); 763 nmethod* prev = NULL; 764 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 765 if (cur == nm) { 766 unlink_scavenge_root_nmethod(cur, prev); 767 return; 768 } 769 prev = cur; 770 } 771 assert(false, "should have been on list"); 772 } 773 774 void CodeCache::prune_scavenge_root_nmethods() { 775 assert_locked_or_safepoint(CodeCache_lock); 776 777 if (UseG1GC) { 778 return; 779 } 780 781 debug_only(mark_scavenge_root_nmethods()); 782 783 nmethod* last = NULL; 784 nmethod* cur = scavenge_root_nmethods(); 785 while (cur != NULL) { 786 nmethod* next = cur->scavenge_root_link(); 787 debug_only(cur->clear_scavenge_root_marked()); 788 assert(cur->scavenge_root_not_marked(), ""); 789 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 790 791 if (!cur->is_zombie() && !cur->is_unloaded() 792 && cur->detect_scavenge_root_oops()) { 793 // Keep it. Advance 'last' to prevent deletion. 794 last = cur; 795 } else { 796 // Prune it from the list, so we don't have to look at it any more. 797 print_trace("prune_scavenge_root", cur); 798 unlink_scavenge_root_nmethod(cur, last); 799 } 800 cur = next; 801 } 802 803 // Check for stray marks. 804 debug_only(verify_perm_nmethods(NULL)); 805 } 806 807 #ifndef PRODUCT 808 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 809 if (UseG1GC) { 810 return; 811 } 812 813 // While we are here, verify the integrity of the list. 814 mark_scavenge_root_nmethods(); 815 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 816 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 817 cur->clear_scavenge_root_marked(); 818 } 819 verify_perm_nmethods(f); 820 } 821 822 // Temporarily mark nmethods that are claimed to be on the non-perm list. 823 void CodeCache::mark_scavenge_root_nmethods() { 824 NMethodIterator iter; 825 while(iter.next_alive()) { 826 nmethod* nm = iter.method(); 827 assert(nm->scavenge_root_not_marked(), "clean state"); 828 if (nm->on_scavenge_root_list()) 829 nm->set_scavenge_root_marked(); 830 } 831 } 832 833 // If the closure is given, run it on the unlisted nmethods. 834 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 835 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 836 NMethodIterator iter; 837 while(iter.next_alive()) { 838 nmethod* nm = iter.method(); 839 bool call_f = (f_or_null != NULL); 840 assert(nm->scavenge_root_not_marked(), "must be already processed"); 841 if (nm->on_scavenge_root_list()) 842 call_f = false; // don't show this one to the client 843 nm->verify_scavenge_root_oops(); 844 if (call_f) f_or_null->do_code_blob(nm); 845 } 846 } 847 #endif //PRODUCT 848 849 void CodeCache::verify_clean_inline_caches() { 850 #ifdef ASSERT 851 NMethodIterator iter; 852 while(iter.next_alive()) { 853 nmethod* nm = iter.method(); 854 assert(!nm->is_unloaded(), "Tautology"); 855 nm->verify_clean_inline_caches(); 856 nm->verify(); 857 } 858 #endif 859 } 860 861 void CodeCache::verify_icholder_relocations() { 862 #ifdef ASSERT 863 // make sure that we aren't leaking icholders 864 int count = 0; 865 FOR_ALL_HEAPS(heap) { 866 FOR_ALL_BLOBS(cb, *heap) { 867 CompiledMethod *nm = cb->as_compiled_method_or_null(); 868 if (nm != NULL) { 869 count += nm->verify_icholder_relocations(); 870 } 871 } 872 } 873 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 874 CompiledICHolder::live_count(), "must agree"); 875 #endif 876 } 877 878 void CodeCache::gc_prologue() { 879 } 880 881 void CodeCache::gc_epilogue() { 882 assert_locked_or_safepoint(CodeCache_lock); 883 NOT_DEBUG(if (needs_cache_clean())) { 884 CompiledMethodIterator iter; 885 while(iter.next_alive()) { 886 CompiledMethod* cm = iter.method(); 887 assert(!cm->is_unloaded(), "Tautology"); 888 DEBUG_ONLY(if (needs_cache_clean())) { 889 cm->cleanup_inline_caches(); 890 } 891 DEBUG_ONLY(cm->verify()); 892 DEBUG_ONLY(cm->verify_oop_relocations()); 893 } 894 } 895 896 set_needs_cache_clean(false); 897 prune_scavenge_root_nmethods(); 898 899 verify_icholder_relocations(); 900 } 901 902 void CodeCache::verify_oops() { 903 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 904 VerifyOopClosure voc; 905 NMethodIterator iter; 906 while(iter.next_alive()) { 907 nmethod* nm = iter.method(); 908 nm->oops_do(&voc); 909 nm->verify_oop_relocations(); 910 } 911 } 912 913 int CodeCache::blob_count(int code_blob_type) { 914 CodeHeap* heap = get_code_heap(code_blob_type); 915 return (heap != NULL) ? heap->blob_count() : 0; 916 } 917 918 int CodeCache::blob_count() { 919 int count = 0; 920 FOR_ALL_HEAPS(heap) { 921 count += (*heap)->blob_count(); 922 } 923 return count; 924 } 925 926 int CodeCache::nmethod_count(int code_blob_type) { 927 CodeHeap* heap = get_code_heap(code_blob_type); 928 return (heap != NULL) ? heap->nmethod_count() : 0; 929 } 930 931 int CodeCache::nmethod_count() { 932 int count = 0; 933 FOR_ALL_NMETHOD_HEAPS(heap) { 934 count += (*heap)->nmethod_count(); 935 } 936 return count; 937 } 938 939 int CodeCache::adapter_count(int code_blob_type) { 940 CodeHeap* heap = get_code_heap(code_blob_type); 941 return (heap != NULL) ? heap->adapter_count() : 0; 942 } 943 944 int CodeCache::adapter_count() { 945 int count = 0; 946 FOR_ALL_HEAPS(heap) { 947 count += (*heap)->adapter_count(); 948 } 949 return count; 950 } 951 952 address CodeCache::low_bound(int code_blob_type) { 953 CodeHeap* heap = get_code_heap(code_blob_type); 954 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 955 } 956 957 address CodeCache::high_bound(int code_blob_type) { 958 CodeHeap* heap = get_code_heap(code_blob_type); 959 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 960 } 961 962 size_t CodeCache::capacity() { 963 size_t cap = 0; 964 FOR_ALL_NMETHOD_HEAPS(heap) { 965 cap += (*heap)->capacity(); 966 } 967 return cap; 968 } 969 970 size_t CodeCache::unallocated_capacity(int code_blob_type) { 971 CodeHeap* heap = get_code_heap(code_blob_type); 972 return (heap != NULL) ? heap->unallocated_capacity() : 0; 973 } 974 975 size_t CodeCache::unallocated_capacity() { 976 size_t unallocated_cap = 0; 977 FOR_ALL_NMETHOD_HEAPS(heap) { 978 unallocated_cap += (*heap)->unallocated_capacity(); 979 } 980 return unallocated_cap; 981 } 982 983 size_t CodeCache::max_capacity() { 984 size_t max_cap = 0; 985 FOR_ALL_NMETHOD_HEAPS(heap) { 986 max_cap += (*heap)->max_capacity(); 987 } 988 return max_cap; 989 } 990 991 /** 992 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 993 * is free, reverse_free_ratio() returns 4. 994 */ 995 double CodeCache::reverse_free_ratio(int code_blob_type) { 996 CodeHeap* heap = get_code_heap(code_blob_type); 997 if (heap == NULL) { 998 return 0; 999 } 1000 1001 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1002 double max_capacity = (double)heap->max_capacity(); 1003 double result = max_capacity / unallocated_capacity; 1004 assert (max_capacity >= unallocated_capacity, "Must be"); 1005 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1006 return result; 1007 } 1008 1009 size_t CodeCache::bytes_allocated_in_freelists() { 1010 size_t allocated_bytes = 0; 1011 FOR_ALL_NMETHOD_HEAPS(heap) { 1012 allocated_bytes += (*heap)->allocated_in_freelist(); 1013 } 1014 return allocated_bytes; 1015 } 1016 1017 int CodeCache::allocated_segments() { 1018 int number_of_segments = 0; 1019 FOR_ALL_NMETHOD_HEAPS(heap) { 1020 number_of_segments += (*heap)->allocated_segments(); 1021 } 1022 return number_of_segments; 1023 } 1024 1025 size_t CodeCache::freelists_length() { 1026 size_t length = 0; 1027 FOR_ALL_NMETHOD_HEAPS(heap) { 1028 length += (*heap)->freelist_length(); 1029 } 1030 return length; 1031 } 1032 1033 void icache_init(); 1034 1035 void CodeCache::initialize() { 1036 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1037 #ifdef COMPILER2 1038 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1039 #endif 1040 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1041 // This was originally just a check of the alignment, causing failure, instead, round 1042 // the code cache to the page size. In particular, Solaris is moving to a larger 1043 // default page size. 1044 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 1045 1046 if (SegmentedCodeCache) { 1047 // Use multiple code heaps 1048 initialize_heaps(); 1049 } else { 1050 // Use a single code heap 1051 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1052 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1053 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1054 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1055 add_heap(rs, "CodeCache", CodeBlobType::All); 1056 } 1057 1058 // Initialize ICache flush mechanism 1059 // This service is needed for os::register_code_area 1060 icache_init(); 1061 1062 // Give OS a chance to register generated code area. 1063 // This is used on Windows 64 bit platforms to register 1064 // Structured Exception Handlers for our generated code. 1065 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1066 } 1067 1068 void codeCache_init() { 1069 CodeCache::initialize(); 1070 // Load AOT libraries and add AOT code heaps. 1071 AOTLoader::initialize(); 1072 } 1073 1074 //------------------------------------------------------------------------------------------------ 1075 1076 int CodeCache::number_of_nmethods_with_dependencies() { 1077 return _number_of_nmethods_with_dependencies; 1078 } 1079 1080 void CodeCache::clear_inline_caches() { 1081 assert_locked_or_safepoint(CodeCache_lock); 1082 CompiledMethodIterator iter; 1083 while(iter.next_alive()) { 1084 iter.method()->clear_inline_caches(); 1085 } 1086 } 1087 1088 void CodeCache::cleanup_inline_caches() { 1089 assert_locked_or_safepoint(CodeCache_lock); 1090 NMethodIterator iter; 1091 while(iter.next_alive()) { 1092 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1093 } 1094 } 1095 1096 // Keeps track of time spent for checking dependencies 1097 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1098 1099 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1100 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1101 int number_of_marked_CodeBlobs = 0; 1102 1103 // search the hierarchy looking for nmethods which are affected by the loading of this class 1104 1105 // then search the interfaces this class implements looking for nmethods 1106 // which might be dependent of the fact that an interface only had one 1107 // implementor. 1108 // nmethod::check_all_dependencies works only correctly, if no safepoint 1109 // can happen 1110 NoSafepointVerifier nsv; 1111 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1112 Klass* d = str.klass(); 1113 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1114 } 1115 1116 #ifndef PRODUCT 1117 if (VerifyDependencies) { 1118 // Object pointers are used as unique identifiers for dependency arguments. This 1119 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1120 dependentCheckTime.start(); 1121 nmethod::check_all_dependencies(changes); 1122 dependentCheckTime.stop(); 1123 } 1124 #endif 1125 1126 return number_of_marked_CodeBlobs; 1127 } 1128 1129 CompiledMethod* CodeCache::find_compiled(void* start) { 1130 CodeBlob *cb = find_blob(start); 1131 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1132 return (CompiledMethod*)cb; 1133 } 1134 1135 bool CodeCache::is_far_target(address target) { 1136 #if INCLUDE_AOT 1137 return NativeCall::is_far_call(_low_bound, target) || 1138 NativeCall::is_far_call(_high_bound, target); 1139 #else 1140 return false; 1141 #endif 1142 } 1143 1144 #ifdef HOTSWAP 1145 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1146 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1147 int number_of_marked_CodeBlobs = 0; 1148 1149 // Deoptimize all methods of the evolving class itself 1150 Array<Method*>* old_methods = dependee->methods(); 1151 for (int i = 0; i < old_methods->length(); i++) { 1152 ResourceMark rm; 1153 Method* old_method = old_methods->at(i); 1154 CompiledMethod* nm = old_method->code(); 1155 if (nm != NULL) { 1156 nm->mark_for_deoptimization(); 1157 number_of_marked_CodeBlobs++; 1158 } 1159 } 1160 1161 CompiledMethodIterator iter; 1162 while(iter.next_alive()) { 1163 CompiledMethod* nm = iter.method(); 1164 if (nm->is_marked_for_deoptimization()) { 1165 // ...Already marked in the previous pass; don't count it again. 1166 } else if (nm->is_evol_dependent_on(dependee)) { 1167 ResourceMark rm; 1168 nm->mark_for_deoptimization(); 1169 number_of_marked_CodeBlobs++; 1170 } else { 1171 // flush caches in case they refer to a redefined Method* 1172 nm->clear_inline_caches(); 1173 } 1174 } 1175 1176 return number_of_marked_CodeBlobs; 1177 } 1178 #endif // HOTSWAP 1179 1180 1181 // Deoptimize all methods 1182 void CodeCache::mark_all_nmethods_for_deoptimization() { 1183 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1184 CompiledMethodIterator iter; 1185 while(iter.next_alive()) { 1186 CompiledMethod* nm = iter.method(); 1187 if (!nm->method()->is_method_handle_intrinsic()) { 1188 nm->mark_for_deoptimization(); 1189 } 1190 } 1191 } 1192 1193 int CodeCache::mark_for_deoptimization(Method* dependee) { 1194 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1195 int number_of_marked_CodeBlobs = 0; 1196 1197 CompiledMethodIterator iter; 1198 while(iter.next_alive()) { 1199 CompiledMethod* nm = iter.method(); 1200 if (nm->is_dependent_on_method(dependee)) { 1201 ResourceMark rm; 1202 nm->mark_for_deoptimization(); 1203 number_of_marked_CodeBlobs++; 1204 } 1205 } 1206 1207 return number_of_marked_CodeBlobs; 1208 } 1209 1210 void CodeCache::make_marked_nmethods_not_entrant() { 1211 assert_locked_or_safepoint(CodeCache_lock); 1212 CompiledMethodIterator iter; 1213 while(iter.next_alive()) { 1214 CompiledMethod* nm = iter.method(); 1215 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1216 nm->make_not_entrant(); 1217 } 1218 } 1219 } 1220 1221 // Flushes compiled methods dependent on dependee. 1222 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1223 assert_lock_strong(Compile_lock); 1224 1225 if (number_of_nmethods_with_dependencies() == 0) return; 1226 1227 // CodeCache can only be updated by a thread_in_VM and they will all be 1228 // stopped during the safepoint so CodeCache will be safe to update without 1229 // holding the CodeCache_lock. 1230 1231 KlassDepChange changes(dependee); 1232 1233 // Compute the dependent nmethods 1234 if (mark_for_deoptimization(changes) > 0) { 1235 // At least one nmethod has been marked for deoptimization 1236 VM_Deoptimize op; 1237 VMThread::execute(&op); 1238 } 1239 } 1240 1241 #ifdef HOTSWAP 1242 // Flushes compiled methods dependent on dependee in the evolutionary sense 1243 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1244 // --- Compile_lock is not held. However we are at a safepoint. 1245 assert_locked_or_safepoint(Compile_lock); 1246 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1247 1248 // CodeCache can only be updated by a thread_in_VM and they will all be 1249 // stopped during the safepoint so CodeCache will be safe to update without 1250 // holding the CodeCache_lock. 1251 1252 // Compute the dependent nmethods 1253 if (mark_for_evol_deoptimization(ev_k) > 0) { 1254 // At least one nmethod has been marked for deoptimization 1255 1256 // All this already happens inside a VM_Operation, so we'll do all the work here. 1257 // Stuff copied from VM_Deoptimize and modified slightly. 1258 1259 // We do not want any GCs to happen while we are in the middle of this VM operation 1260 ResourceMark rm; 1261 DeoptimizationMarker dm; 1262 1263 // Deoptimize all activations depending on marked nmethods 1264 Deoptimization::deoptimize_dependents(); 1265 1266 // Make the dependent methods not entrant 1267 make_marked_nmethods_not_entrant(); 1268 } 1269 } 1270 #endif // HOTSWAP 1271 1272 1273 // Flushes compiled methods dependent on dependee 1274 void CodeCache::flush_dependents_on_method(methodHandle m_h) { 1275 // --- Compile_lock is not held. However we are at a safepoint. 1276 assert_locked_or_safepoint(Compile_lock); 1277 1278 // CodeCache can only be updated by a thread_in_VM and they will all be 1279 // stopped dring the safepoint so CodeCache will be safe to update without 1280 // holding the CodeCache_lock. 1281 1282 // Compute the dependent nmethods 1283 if (mark_for_deoptimization(m_h()) > 0) { 1284 // At least one nmethod has been marked for deoptimization 1285 1286 // All this already happens inside a VM_Operation, so we'll do all the work here. 1287 // Stuff copied from VM_Deoptimize and modified slightly. 1288 1289 // We do not want any GCs to happen while we are in the middle of this VM operation 1290 ResourceMark rm; 1291 DeoptimizationMarker dm; 1292 1293 // Deoptimize all activations depending on marked nmethods 1294 Deoptimization::deoptimize_dependents(); 1295 1296 // Make the dependent methods not entrant 1297 make_marked_nmethods_not_entrant(); 1298 } 1299 } 1300 1301 void CodeCache::verify() { 1302 assert_locked_or_safepoint(CodeCache_lock); 1303 FOR_ALL_HEAPS(heap) { 1304 (*heap)->verify(); 1305 FOR_ALL_BLOBS(cb, *heap) { 1306 if (cb->is_alive()) { 1307 cb->verify(); 1308 } 1309 } 1310 } 1311 } 1312 1313 // A CodeHeap is full. Print out warning and report event. 1314 PRAGMA_DIAG_PUSH 1315 PRAGMA_FORMAT_NONLITERAL_IGNORED 1316 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1317 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1318 CodeHeap* heap = get_code_heap(code_blob_type); 1319 assert(heap != NULL, "heap is null"); 1320 1321 if ((heap->full_count() == 0) || print) { 1322 // Not yet reported for this heap, report 1323 if (SegmentedCodeCache) { 1324 ResourceMark rm; 1325 stringStream msg1_stream, msg2_stream; 1326 msg1_stream.print("%s is full. Compiler has been disabled.", 1327 get_code_heap_name(code_blob_type)); 1328 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1329 get_code_heap_flag_name(code_blob_type)); 1330 const char *msg1 = msg1_stream.as_string(); 1331 const char *msg2 = msg2_stream.as_string(); 1332 1333 log_warning(codecache)(msg1); 1334 log_warning(codecache)(msg2); 1335 warning(msg1); 1336 warning(msg2); 1337 } else { 1338 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1339 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1340 1341 log_warning(codecache)(msg1); 1342 log_warning(codecache)(msg2); 1343 warning(msg1); 1344 warning(msg2); 1345 } 1346 ResourceMark rm; 1347 stringStream s; 1348 // Dump code cache into a buffer before locking the tty, 1349 { 1350 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1351 print_summary(&s); 1352 } 1353 ttyLocker ttyl; 1354 tty->print("%s", s.as_string()); 1355 } 1356 1357 heap->report_full(); 1358 1359 EventCodeCacheFull event; 1360 if (event.should_commit()) { 1361 event.set_codeBlobType((u1)code_blob_type); 1362 event.set_startAddress((u8)heap->low_boundary()); 1363 event.set_commitedTopAddress((u8)heap->high()); 1364 event.set_reservedTopAddress((u8)heap->high_boundary()); 1365 event.set_entryCount(heap->blob_count()); 1366 event.set_methodCount(heap->nmethod_count()); 1367 event.set_adaptorCount(heap->adapter_count()); 1368 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1369 event.set_fullCount(heap->full_count()); 1370 event.commit(); 1371 } 1372 } 1373 PRAGMA_DIAG_POP 1374 1375 void CodeCache::print_memory_overhead() { 1376 size_t wasted_bytes = 0; 1377 FOR_ALL_NMETHOD_HEAPS(heap) { 1378 CodeHeap* curr_heap = *heap; 1379 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1380 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1381 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1382 } 1383 } 1384 // Print bytes that are allocated in the freelist 1385 ttyLocker ttl; 1386 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1387 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1388 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1389 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1390 } 1391 1392 //------------------------------------------------------------------------------------------------ 1393 // Non-product version 1394 1395 #ifndef PRODUCT 1396 1397 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1398 if (PrintCodeCache2) { // Need to add a new flag 1399 ResourceMark rm; 1400 if (size == 0) size = cb->size(); 1401 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1402 } 1403 } 1404 1405 void CodeCache::print_internals() { 1406 int nmethodCount = 0; 1407 int runtimeStubCount = 0; 1408 int adapterCount = 0; 1409 int deoptimizationStubCount = 0; 1410 int uncommonTrapStubCount = 0; 1411 int bufferBlobCount = 0; 1412 int total = 0; 1413 int nmethodAlive = 0; 1414 int nmethodNotEntrant = 0; 1415 int nmethodZombie = 0; 1416 int nmethodUnloaded = 0; 1417 int nmethodJava = 0; 1418 int nmethodNative = 0; 1419 int max_nm_size = 0; 1420 ResourceMark rm; 1421 1422 int i = 0; 1423 FOR_ALL_NMETHOD_HEAPS(heap) { 1424 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1425 tty->print_cr("-- %s --", (*heap)->name()); 1426 } 1427 FOR_ALL_BLOBS(cb, *heap) { 1428 total++; 1429 if (cb->is_nmethod()) { 1430 nmethod* nm = (nmethod*)cb; 1431 1432 if (Verbose && nm->method() != NULL) { 1433 ResourceMark rm; 1434 char *method_name = nm->method()->name_and_sig_as_C_string(); 1435 tty->print("%s", method_name); 1436 if(nm->is_alive()) { tty->print_cr(" alive"); } 1437 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1438 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1439 } 1440 1441 nmethodCount++; 1442 1443 if(nm->is_alive()) { nmethodAlive++; } 1444 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1445 if(nm->is_zombie()) { nmethodZombie++; } 1446 if(nm->is_unloaded()) { nmethodUnloaded++; } 1447 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1448 1449 if(nm->method() != NULL && nm->is_java_method()) { 1450 nmethodJava++; 1451 max_nm_size = MAX2(max_nm_size, nm->size()); 1452 } 1453 } else if (cb->is_runtime_stub()) { 1454 runtimeStubCount++; 1455 } else if (cb->is_deoptimization_stub()) { 1456 deoptimizationStubCount++; 1457 } else if (cb->is_uncommon_trap_stub()) { 1458 uncommonTrapStubCount++; 1459 } else if (cb->is_adapter_blob()) { 1460 adapterCount++; 1461 } else if (cb->is_buffer_blob()) { 1462 bufferBlobCount++; 1463 } 1464 } 1465 } 1466 1467 int bucketSize = 512; 1468 int bucketLimit = max_nm_size / bucketSize + 1; 1469 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1470 memset(buckets, 0, sizeof(int) * bucketLimit); 1471 1472 NMethodIterator iter; 1473 while(iter.next()) { 1474 nmethod* nm = iter.method(); 1475 if(nm->method() != NULL && nm->is_java_method()) { 1476 buckets[nm->size() / bucketSize]++; 1477 } 1478 } 1479 1480 tty->print_cr("Code Cache Entries (total of %d)",total); 1481 tty->print_cr("-------------------------------------------------"); 1482 tty->print_cr("nmethods: %d",nmethodCount); 1483 tty->print_cr("\talive: %d",nmethodAlive); 1484 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1485 tty->print_cr("\tzombie: %d",nmethodZombie); 1486 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1487 tty->print_cr("\tjava: %d",nmethodJava); 1488 tty->print_cr("\tnative: %d",nmethodNative); 1489 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1490 tty->print_cr("adapters: %d",adapterCount); 1491 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1492 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1493 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1494 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1495 tty->print_cr("-------------------------------------------------"); 1496 1497 for(int i=0; i<bucketLimit; i++) { 1498 if(buckets[i] != 0) { 1499 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1500 tty->fill_to(40); 1501 tty->print_cr("%d",buckets[i]); 1502 } 1503 } 1504 1505 FREE_C_HEAP_ARRAY(int, buckets); 1506 print_memory_overhead(); 1507 } 1508 1509 #endif // !PRODUCT 1510 1511 void CodeCache::print() { 1512 print_summary(tty); 1513 1514 #ifndef PRODUCT 1515 if (!Verbose) return; 1516 1517 CodeBlob_sizes live; 1518 CodeBlob_sizes dead; 1519 1520 FOR_ALL_NMETHOD_HEAPS(heap) { 1521 FOR_ALL_BLOBS(cb, *heap) { 1522 if (!cb->is_alive()) { 1523 dead.add(cb); 1524 } else { 1525 live.add(cb); 1526 } 1527 } 1528 } 1529 1530 tty->print_cr("CodeCache:"); 1531 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1532 1533 if (!live.is_empty()) { 1534 live.print("live"); 1535 } 1536 if (!dead.is_empty()) { 1537 dead.print("dead"); 1538 } 1539 1540 if (WizardMode) { 1541 // print the oop_map usage 1542 int code_size = 0; 1543 int number_of_blobs = 0; 1544 int number_of_oop_maps = 0; 1545 int map_size = 0; 1546 FOR_ALL_NMETHOD_HEAPS(heap) { 1547 FOR_ALL_BLOBS(cb, *heap) { 1548 if (cb->is_alive()) { 1549 number_of_blobs++; 1550 code_size += cb->code_size(); 1551 ImmutableOopMapSet* set = cb->oop_maps(); 1552 if (set != NULL) { 1553 number_of_oop_maps += set->count(); 1554 map_size += set->nr_of_bytes(); 1555 } 1556 } 1557 } 1558 } 1559 tty->print_cr("OopMaps"); 1560 tty->print_cr(" #blobs = %d", number_of_blobs); 1561 tty->print_cr(" code size = %d", code_size); 1562 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1563 tty->print_cr(" map size = %d", map_size); 1564 } 1565 1566 #endif // !PRODUCT 1567 } 1568 1569 void CodeCache::print_summary(outputStream* st, bool detailed) { 1570 FOR_ALL_HEAPS(heap_iterator) { 1571 CodeHeap* heap = (*heap_iterator); 1572 size_t total = (heap->high_boundary() - heap->low_boundary()); 1573 if (_heaps->length() >= 1) { 1574 st->print("%s:", heap->name()); 1575 } else { 1576 st->print("CodeCache:"); 1577 } 1578 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1579 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1580 total/K, (total - heap->unallocated_capacity())/K, 1581 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1582 1583 if (detailed) { 1584 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1585 p2i(heap->low_boundary()), 1586 p2i(heap->high()), 1587 p2i(heap->high_boundary())); 1588 } 1589 } 1590 1591 if (detailed) { 1592 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1593 " adapters=" UINT32_FORMAT, 1594 blob_count(), nmethod_count(), adapter_count()); 1595 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1596 "enabled" : Arguments::mode() == Arguments::_int ? 1597 "disabled (interpreter mode)" : 1598 "disabled (not enough contiguous free space left)"); 1599 } 1600 } 1601 1602 void CodeCache::print_codelist(outputStream* st) { 1603 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1604 1605 CompiledMethodIterator iter; 1606 while (iter.next_alive()) { 1607 CompiledMethod* cm = iter.method(); 1608 ResourceMark rm; 1609 char* method_name = cm->method()->name_and_sig_as_C_string(); 1610 st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1611 cm->compile_id(), cm->comp_level(), cm->get_state(), 1612 method_name, 1613 (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); 1614 } 1615 } 1616 1617 void CodeCache::print_layout(outputStream* st) { 1618 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1619 ResourceMark rm; 1620 print_summary(st, true); 1621 } 1622 1623 void CodeCache::log_state(outputStream* st) { 1624 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1625 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1626 blob_count(), nmethod_count(), adapter_count(), 1627 unallocated_capacity()); 1628 } 1629