1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "gc/shared/gcLocker.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/verifyOopClosure.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/compilationPolicy.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/icache.hpp" 48 #include "runtime/java.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/sweeper.hpp" 51 #include "services/memoryService.hpp" 52 #include "trace/tracing.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/vmError.hpp" 55 #include "utilities/xmlstream.hpp" 56 #ifdef COMPILER1 57 #include "c1/c1_Compilation.hpp" 58 #include "c1/c1_Compiler.hpp" 59 #endif 60 #ifdef COMPILER2 61 #include "opto/c2compiler.hpp" 62 #include "opto/compile.hpp" 63 #include "opto/node.hpp" 64 #endif 65 66 // Helper class for printing in CodeCache 67 class CodeBlob_sizes { 68 private: 69 int count; 70 int total_size; 71 int header_size; 72 int code_size; 73 int stub_size; 74 int relocation_size; 75 int scopes_oop_size; 76 int scopes_metadata_size; 77 int scopes_data_size; 78 int scopes_pcs_size; 79 80 public: 81 CodeBlob_sizes() { 82 count = 0; 83 total_size = 0; 84 header_size = 0; 85 code_size = 0; 86 stub_size = 0; 87 relocation_size = 0; 88 scopes_oop_size = 0; 89 scopes_metadata_size = 0; 90 scopes_data_size = 0; 91 scopes_pcs_size = 0; 92 } 93 94 int total() { return total_size; } 95 bool is_empty() { return count == 0; } 96 97 void print(const char* title) { 98 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 99 count, 100 title, 101 (int)(total() / K), 102 header_size * 100 / total_size, 103 relocation_size * 100 / total_size, 104 code_size * 100 / total_size, 105 stub_size * 100 / total_size, 106 scopes_oop_size * 100 / total_size, 107 scopes_metadata_size * 100 / total_size, 108 scopes_data_size * 100 / total_size, 109 scopes_pcs_size * 100 / total_size); 110 } 111 112 void add(CodeBlob* cb) { 113 count++; 114 total_size += cb->size(); 115 header_size += cb->header_size(); 116 relocation_size += cb->relocation_size(); 117 if (cb->is_nmethod()) { 118 nmethod* nm = cb->as_nmethod_or_null(); 119 code_size += nm->insts_size(); 120 stub_size += nm->stub_size(); 121 122 scopes_oop_size += nm->oops_size(); 123 scopes_metadata_size += nm->metadata_size(); 124 scopes_data_size += nm->scopes_data_size(); 125 scopes_pcs_size += nm->scopes_pcs_size(); 126 } else { 127 code_size += cb->code_size(); 128 } 129 } 130 }; 131 132 // Iterate over all CodeHeaps 133 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 134 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 135 136 // Iterate over all CodeBlobs (cb) on the given CodeHeap 137 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 138 139 address CodeCache::_low_bound = 0; 140 address CodeCache::_high_bound = 0; 141 int CodeCache::_number_of_nmethods_with_dependencies = 0; 142 bool CodeCache::_needs_cache_clean = false; 143 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 144 145 // Initialize array of CodeHeaps 146 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 147 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 148 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 149 150 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 151 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 152 // Prepare error message 153 const char* error = "Invalid code heap sizes"; 154 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 155 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 156 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 157 158 if (total_size > cache_size) { 159 // Some code heap sizes were explicitly set: total_size must be <= cache_size 160 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 161 vm_exit_during_initialization(error, message); 162 } else if (all_set && total_size != cache_size) { 163 // All code heap sizes were explicitly set: total_size must equal cache_size 164 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 165 vm_exit_during_initialization(error, message); 166 } 167 } 168 169 void CodeCache::initialize_heaps() { 170 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 171 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 172 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 173 size_t min_size = os::vm_page_size(); 174 size_t cache_size = ReservedCodeCacheSize; 175 size_t non_nmethod_size = NonNMethodCodeHeapSize; 176 size_t profiled_size = ProfiledCodeHeapSize; 177 size_t non_profiled_size = NonProfiledCodeHeapSize; 178 // Check if total size set via command line flags exceeds the reserved size 179 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 180 (profiled_set ? profiled_size : min_size), 181 (non_profiled_set ? non_profiled_size : min_size), 182 cache_size, 183 non_nmethod_set && profiled_set && non_profiled_set); 184 185 // Determine size of compiler buffers 186 size_t code_buffers_size = 0; 187 #ifdef COMPILER1 188 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 189 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 190 code_buffers_size += c1_count * Compiler::code_buffer_size(); 191 #endif 192 #ifdef COMPILER2 193 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 194 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 195 // Initial size of constant table (this may be increased if a compiled method needs more space) 196 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 197 #endif 198 199 // Increase default non_nmethod_size to account for compiler buffers 200 if (!non_nmethod_set) { 201 non_nmethod_size += code_buffers_size; 202 } 203 // Calculate default CodeHeap sizes if not set by user 204 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 205 // Check if we have enough space for the non-nmethod code heap 206 if (cache_size > non_nmethod_size) { 207 // Use the default value for non_nmethod_size and one half of the 208 // remaining size for non-profiled and one half for profiled methods 209 size_t remaining_size = cache_size - non_nmethod_size; 210 profiled_size = remaining_size / 2; 211 non_profiled_size = remaining_size - profiled_size; 212 } else { 213 // Use all space for the non-nmethod heap and set other heaps to minimal size 214 non_nmethod_size = cache_size - 2 * min_size; 215 profiled_size = min_size; 216 non_profiled_size = min_size; 217 } 218 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 219 // The user explicitly set some code heap sizes. Increase or decrease the (default) 220 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 221 // code heap sizes and then only change non-nmethod code heap size if still necessary. 222 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 223 if (non_profiled_set) { 224 if (!profiled_set) { 225 // Adapt size of profiled code heap 226 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 227 // Not enough space available, set to minimum size 228 diff_size += profiled_size - min_size; 229 profiled_size = min_size; 230 } else { 231 profiled_size += diff_size; 232 diff_size = 0; 233 } 234 } 235 } else if (profiled_set) { 236 // Adapt size of non-profiled code heap 237 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 238 // Not enough space available, set to minimum size 239 diff_size += non_profiled_size - min_size; 240 non_profiled_size = min_size; 241 } else { 242 non_profiled_size += diff_size; 243 diff_size = 0; 244 } 245 } else if (non_nmethod_set) { 246 // Distribute remaining size between profiled and non-profiled code heaps 247 diff_size = cache_size - non_nmethod_size; 248 profiled_size = diff_size / 2; 249 non_profiled_size = diff_size - profiled_size; 250 diff_size = 0; 251 } 252 if (diff_size != 0) { 253 // Use non-nmethod code heap for remaining space requirements 254 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 255 non_nmethod_size += diff_size; 256 } 257 } 258 259 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 260 if(!heap_available(CodeBlobType::MethodProfiled)) { 261 non_profiled_size += profiled_size; 262 profiled_size = 0; 263 } 264 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 265 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 266 non_nmethod_size += non_profiled_size; 267 non_profiled_size = 0; 268 } 269 // Make sure we have enough space for VM internal code 270 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 271 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 272 vm_exit_during_initialization(err_msg( 273 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 274 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 275 } 276 277 // Verify sizes and update flag values 278 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 279 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 280 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 281 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 282 283 // Align CodeHeaps 284 size_t alignment = heap_alignment(); 285 non_nmethod_size = align_up(non_nmethod_size, alignment); 286 profiled_size = align_down(profiled_size, alignment); 287 288 // Reserve one continuous chunk of memory for CodeHeaps and split it into 289 // parts for the individual heaps. The memory layout looks like this: 290 // ---------- high ----------- 291 // Non-profiled nmethods 292 // Profiled nmethods 293 // Non-nmethods 294 // ---------- low ------------ 295 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 296 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 297 ReservedSpace rest = rs.last_part(non_nmethod_size); 298 ReservedSpace profiled_space = rest.first_part(profiled_size); 299 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 300 301 // Non-nmethods (stubs, adapters, ...) 302 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 303 // Tier 2 and tier 3 (profiled) methods 304 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 305 // Tier 1 and tier 4 (non-profiled) methods and native methods 306 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 307 } 308 309 size_t CodeCache::heap_alignment() { 310 // If large page support is enabled, align code heaps according to large 311 // page size to make sure that code cache is covered by large pages. 312 const size_t page_size = os::can_execute_large_page_memory() ? 313 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 314 os::vm_page_size(); 315 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 316 } 317 318 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 319 // Determine alignment 320 const size_t page_size = os::can_execute_large_page_memory() ? 321 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 322 os::page_size_for_region_aligned(size, 8)) : 323 os::vm_page_size(); 324 const size_t granularity = os::vm_allocation_granularity(); 325 const size_t r_align = MAX2(page_size, granularity); 326 const size_t r_size = align_up(size, r_align); 327 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 328 MAX2(page_size, granularity); 329 330 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 331 332 if (!rs.is_reserved()) { 333 vm_exit_during_initialization("Could not reserve enough space for code cache"); 334 } 335 336 // Initialize bounds 337 _low_bound = (address)rs.base(); 338 _high_bound = _low_bound + rs.size(); 339 340 return rs; 341 } 342 343 bool CodeCache::heap_available(int code_blob_type) { 344 if (!SegmentedCodeCache) { 345 // No segmentation: use a single code heap 346 return (code_blob_type == CodeBlobType::All); 347 } else if (Arguments::is_interpreter_only()) { 348 // Interpreter only: we don't need any method code heaps 349 return (code_blob_type == CodeBlobType::NonNMethod); 350 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 351 // Tiered compilation: use all code heaps 352 return (code_blob_type < CodeBlobType::All); 353 } else { 354 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 355 return (code_blob_type == CodeBlobType::NonNMethod) || 356 (code_blob_type == CodeBlobType::MethodNonProfiled); 357 } 358 } 359 360 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 361 switch(code_blob_type) { 362 case CodeBlobType::NonNMethod: 363 return "NonNMethodCodeHeapSize"; 364 break; 365 case CodeBlobType::MethodNonProfiled: 366 return "NonProfiledCodeHeapSize"; 367 break; 368 case CodeBlobType::MethodProfiled: 369 return "ProfiledCodeHeapSize"; 370 break; 371 } 372 ShouldNotReachHere(); 373 return NULL; 374 } 375 376 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 377 if (lhs->code_blob_type() == rhs->code_blob_type()) { 378 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 379 } else { 380 return lhs->code_blob_type() - rhs->code_blob_type(); 381 } 382 } 383 384 void CodeCache::add_heap(CodeHeap* heap) { 385 assert(!Universe::is_fully_initialized(), "late heap addition?"); 386 387 _heaps->insert_sorted<code_heap_compare>(heap); 388 389 int type = heap->code_blob_type(); 390 if (code_blob_type_accepts_compiled(type)) { 391 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 392 } 393 if (code_blob_type_accepts_nmethod(type)) { 394 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 395 } 396 } 397 398 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 399 // Check if heap is needed 400 if (!heap_available(code_blob_type)) { 401 return; 402 } 403 404 // Create CodeHeap 405 CodeHeap* heap = new CodeHeap(name, code_blob_type); 406 add_heap(heap); 407 408 // Reserve Space 409 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 410 size_initial = align_up(size_initial, os::vm_page_size()); 411 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 412 vm_exit_during_initialization("Could not reserve enough space for code cache"); 413 } 414 415 // Register the CodeHeap 416 MemoryService::add_code_heap_memory_pool(heap, name); 417 } 418 419 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 420 assert(cb != NULL, "CodeBlob is null"); 421 FOR_ALL_HEAPS(heap) { 422 if ((*heap)->contains_blob(cb)) { 423 return *heap; 424 } 425 } 426 ShouldNotReachHere(); 427 return NULL; 428 } 429 430 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 431 FOR_ALL_HEAPS(heap) { 432 if ((*heap)->accepts(code_blob_type)) { 433 return *heap; 434 } 435 } 436 return NULL; 437 } 438 439 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 440 assert_locked_or_safepoint(CodeCache_lock); 441 assert(heap != NULL, "heap is null"); 442 return (CodeBlob*)heap->first(); 443 } 444 445 CodeBlob* CodeCache::first_blob(int code_blob_type) { 446 if (heap_available(code_blob_type)) { 447 return first_blob(get_code_heap(code_blob_type)); 448 } else { 449 return NULL; 450 } 451 } 452 453 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 454 assert_locked_or_safepoint(CodeCache_lock); 455 assert(heap != NULL, "heap is null"); 456 return (CodeBlob*)heap->next(cb); 457 } 458 459 /** 460 * Do not seize the CodeCache lock here--if the caller has not 461 * already done so, we are going to lose bigtime, since the code 462 * cache will contain a garbage CodeBlob until the caller can 463 * run the constructor for the CodeBlob subclass he is busy 464 * instantiating. 465 */ 466 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 467 // Possibly wakes up the sweeper thread. 468 NMethodSweeper::notify(code_blob_type); 469 assert_locked_or_safepoint(CodeCache_lock); 470 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 471 if (size <= 0) { 472 return NULL; 473 } 474 CodeBlob* cb = NULL; 475 476 // Get CodeHeap for the given CodeBlobType 477 CodeHeap* heap = get_code_heap(code_blob_type); 478 assert(heap != NULL, "heap is null"); 479 480 while (true) { 481 cb = (CodeBlob*)heap->allocate(size); 482 if (cb != NULL) break; 483 if (!heap->expand_by(CodeCacheExpansionSize)) { 484 // Save original type for error reporting 485 if (orig_code_blob_type == CodeBlobType::All) { 486 orig_code_blob_type = code_blob_type; 487 } 488 // Expansion failed 489 if (SegmentedCodeCache) { 490 // Fallback solution: Try to store code in another code heap. 491 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 492 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 493 // and force stack scanning if less than 10% of the code heap are free. 494 int type = code_blob_type; 495 switch (type) { 496 case CodeBlobType::NonNMethod: 497 type = CodeBlobType::MethodNonProfiled; 498 break; 499 case CodeBlobType::MethodNonProfiled: 500 type = CodeBlobType::MethodProfiled; 501 break; 502 case CodeBlobType::MethodProfiled: 503 // Avoid loop if we already tried that code heap 504 if (type == orig_code_blob_type) { 505 type = CodeBlobType::MethodNonProfiled; 506 } 507 break; 508 } 509 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 510 if (PrintCodeCacheExtension) { 511 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 512 heap->name(), get_code_heap(type)->name()); 513 } 514 return allocate(size, type, orig_code_blob_type); 515 } 516 } 517 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 518 CompileBroker::handle_full_code_cache(orig_code_blob_type); 519 return NULL; 520 } 521 if (PrintCodeCacheExtension) { 522 ResourceMark rm; 523 if (_nmethod_heaps->length() >= 1) { 524 tty->print("%s", heap->name()); 525 } else { 526 tty->print("CodeCache"); 527 } 528 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 529 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 530 (address)heap->high() - (address)heap->low_boundary()); 531 } 532 } 533 print_trace("allocation", cb, size); 534 return cb; 535 } 536 537 void CodeCache::free(CodeBlob* cb) { 538 assert_locked_or_safepoint(CodeCache_lock); 539 CodeHeap* heap = get_code_heap(cb); 540 print_trace("free", cb); 541 if (cb->is_nmethod()) { 542 heap->set_nmethod_count(heap->nmethod_count() - 1); 543 if (((nmethod *)cb)->has_dependencies()) { 544 _number_of_nmethods_with_dependencies--; 545 } 546 } 547 if (cb->is_adapter_blob()) { 548 heap->set_adapter_count(heap->adapter_count() - 1); 549 } 550 551 // Get heap for given CodeBlob and deallocate 552 get_code_heap(cb)->deallocate(cb); 553 554 assert(heap->blob_count() >= 0, "sanity check"); 555 } 556 557 void CodeCache::commit(CodeBlob* cb) { 558 // this is called by nmethod::nmethod, which must already own CodeCache_lock 559 assert_locked_or_safepoint(CodeCache_lock); 560 CodeHeap* heap = get_code_heap(cb); 561 if (cb->is_nmethod()) { 562 heap->set_nmethod_count(heap->nmethod_count() + 1); 563 if (((nmethod *)cb)->has_dependencies()) { 564 _number_of_nmethods_with_dependencies++; 565 } 566 } 567 if (cb->is_adapter_blob()) { 568 heap->set_adapter_count(heap->adapter_count() + 1); 569 } 570 571 // flush the hardware I-cache 572 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 573 } 574 575 bool CodeCache::contains(void *p) { 576 // S390 uses contains() in current_frame(), which is used before 577 // code cache initialization if NativeMemoryTracking=detail is set. 578 S390_ONLY(if (_heaps == NULL) return false;) 579 // It should be ok to call contains without holding a lock. 580 FOR_ALL_HEAPS(heap) { 581 if ((*heap)->contains(p)) { 582 return true; 583 } 584 } 585 return false; 586 } 587 588 bool CodeCache::contains(nmethod *nm) { 589 return contains((void *)nm); 590 } 591 592 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 593 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 594 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 595 CodeBlob* CodeCache::find_blob(void* start) { 596 CodeBlob* result = find_blob_unsafe(start); 597 // We could potentially look up non_entrant methods 598 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method"); 599 return result; 600 } 601 602 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 603 // what you are doing) 604 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 605 // NMT can walk the stack before code cache is created 606 if (_heaps != NULL && !_heaps->is_empty()) { 607 FOR_ALL_HEAPS(heap) { 608 CodeBlob* result = (*heap)->find_blob_unsafe(start); 609 if (result != NULL) { 610 return result; 611 } 612 } 613 } 614 return NULL; 615 } 616 617 nmethod* CodeCache::find_nmethod(void* start) { 618 CodeBlob* cb = find_blob(start); 619 assert(cb->is_nmethod(), "did not find an nmethod"); 620 return (nmethod*)cb; 621 } 622 623 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 624 assert_locked_or_safepoint(CodeCache_lock); 625 FOR_ALL_HEAPS(heap) { 626 FOR_ALL_BLOBS(cb, *heap) { 627 f(cb); 628 } 629 } 630 } 631 632 void CodeCache::nmethods_do(void f(nmethod* nm)) { 633 assert_locked_or_safepoint(CodeCache_lock); 634 NMethodIterator iter; 635 while(iter.next()) { 636 f(iter.method()); 637 } 638 } 639 640 void CodeCache::metadata_do(void f(Metadata* m)) { 641 assert_locked_or_safepoint(CodeCache_lock); 642 NMethodIterator iter; 643 while(iter.next_alive()) { 644 iter.method()->metadata_do(f); 645 } 646 AOTLoader::metadata_do(f); 647 } 648 649 int CodeCache::alignment_unit() { 650 return (int)_heaps->first()->alignment_unit(); 651 } 652 653 int CodeCache::alignment_offset() { 654 return (int)_heaps->first()->alignment_offset(); 655 } 656 657 // Mark nmethods for unloading if they contain otherwise unreachable oops. 658 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 659 assert_locked_or_safepoint(CodeCache_lock); 660 CompiledMethodIterator iter; 661 while(iter.next_alive()) { 662 iter.method()->do_unloading(is_alive, unloading_occurred); 663 } 664 } 665 666 void CodeCache::blobs_do(CodeBlobClosure* f) { 667 assert_locked_or_safepoint(CodeCache_lock); 668 FOR_ALL_NMETHOD_HEAPS(heap) { 669 FOR_ALL_BLOBS(cb, *heap) { 670 if (cb->is_alive()) { 671 f->do_code_blob(cb); 672 #ifdef ASSERT 673 if (cb->is_nmethod()) 674 ((nmethod*)cb)->verify_scavenge_root_oops(); 675 #endif //ASSERT 676 } 677 } 678 } 679 } 680 681 // Walk the list of methods which might contain non-perm oops. 682 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 683 assert_locked_or_safepoint(CodeCache_lock); 684 685 if (UseG1GC) { 686 return; 687 } 688 689 const bool fix_relocations = f->fix_relocations(); 690 debug_only(mark_scavenge_root_nmethods()); 691 692 nmethod* prev = NULL; 693 nmethod* cur = scavenge_root_nmethods(); 694 while (cur != NULL) { 695 debug_only(cur->clear_scavenge_root_marked()); 696 assert(cur->scavenge_root_not_marked(), ""); 697 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 698 699 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 700 if (TraceScavenge) { 701 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 702 } 703 if (is_live) { 704 // Perform cur->oops_do(f), maybe just once per nmethod. 705 f->do_code_blob(cur); 706 } 707 nmethod* const next = cur->scavenge_root_link(); 708 // The scavengable nmethod list must contain all methods with scavengable 709 // oops. It is safe to include more nmethod on the list, but we do not 710 // expect any live non-scavengable nmethods on the list. 711 if (fix_relocations) { 712 if (!is_live || !cur->detect_scavenge_root_oops()) { 713 unlink_scavenge_root_nmethod(cur, prev); 714 } else { 715 prev = cur; 716 } 717 } 718 cur = next; 719 } 720 721 // Check for stray marks. 722 debug_only(verify_perm_nmethods(NULL)); 723 } 724 725 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 726 assert_locked_or_safepoint(CodeCache_lock); 727 728 if (UseG1GC) { 729 return; 730 } 731 732 nm->set_on_scavenge_root_list(); 733 nm->set_scavenge_root_link(_scavenge_root_nmethods); 734 set_scavenge_root_nmethods(nm); 735 print_trace("add_scavenge_root", nm); 736 } 737 738 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 739 assert_locked_or_safepoint(CodeCache_lock); 740 741 assert((prev == NULL && scavenge_root_nmethods() == nm) || 742 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 743 744 assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list"); 745 746 print_trace("unlink_scavenge_root", nm); 747 if (prev == NULL) { 748 set_scavenge_root_nmethods(nm->scavenge_root_link()); 749 } else { 750 prev->set_scavenge_root_link(nm->scavenge_root_link()); 751 } 752 nm->set_scavenge_root_link(NULL); 753 nm->clear_on_scavenge_root_list(); 754 } 755 756 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 757 assert_locked_or_safepoint(CodeCache_lock); 758 759 if (UseG1GC) { 760 return; 761 } 762 763 print_trace("drop_scavenge_root", nm); 764 nmethod* prev = NULL; 765 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 766 if (cur == nm) { 767 unlink_scavenge_root_nmethod(cur, prev); 768 return; 769 } 770 prev = cur; 771 } 772 assert(false, "should have been on list"); 773 } 774 775 void CodeCache::prune_scavenge_root_nmethods() { 776 assert_locked_or_safepoint(CodeCache_lock); 777 778 if (UseG1GC) { 779 return; 780 } 781 782 debug_only(mark_scavenge_root_nmethods()); 783 784 nmethod* last = NULL; 785 nmethod* cur = scavenge_root_nmethods(); 786 while (cur != NULL) { 787 nmethod* next = cur->scavenge_root_link(); 788 debug_only(cur->clear_scavenge_root_marked()); 789 assert(cur->scavenge_root_not_marked(), ""); 790 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 791 792 if (!cur->is_zombie() && !cur->is_unloaded() 793 && cur->detect_scavenge_root_oops()) { 794 // Keep it. Advance 'last' to prevent deletion. 795 last = cur; 796 } else { 797 // Prune it from the list, so we don't have to look at it any more. 798 print_trace("prune_scavenge_root", cur); 799 unlink_scavenge_root_nmethod(cur, last); 800 } 801 cur = next; 802 } 803 804 // Check for stray marks. 805 debug_only(verify_perm_nmethods(NULL)); 806 } 807 808 #ifndef PRODUCT 809 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 810 if (UseG1GC) { 811 return; 812 } 813 814 // While we are here, verify the integrity of the list. 815 mark_scavenge_root_nmethods(); 816 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 817 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 818 cur->clear_scavenge_root_marked(); 819 } 820 verify_perm_nmethods(f); 821 } 822 823 // Temporarily mark nmethods that are claimed to be on the non-perm list. 824 void CodeCache::mark_scavenge_root_nmethods() { 825 NMethodIterator iter; 826 while(iter.next_alive()) { 827 nmethod* nm = iter.method(); 828 assert(nm->scavenge_root_not_marked(), "clean state"); 829 if (nm->on_scavenge_root_list()) 830 nm->set_scavenge_root_marked(); 831 } 832 } 833 834 // If the closure is given, run it on the unlisted nmethods. 835 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 836 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 837 NMethodIterator iter; 838 while(iter.next_alive()) { 839 nmethod* nm = iter.method(); 840 bool call_f = (f_or_null != NULL); 841 assert(nm->scavenge_root_not_marked(), "must be already processed"); 842 if (nm->on_scavenge_root_list()) 843 call_f = false; // don't show this one to the client 844 nm->verify_scavenge_root_oops(); 845 if (call_f) f_or_null->do_code_blob(nm); 846 } 847 } 848 #endif //PRODUCT 849 850 void CodeCache::verify_clean_inline_caches() { 851 #ifdef ASSERT 852 NMethodIterator iter; 853 while(iter.next_alive()) { 854 nmethod* nm = iter.method(); 855 assert(!nm->is_unloaded(), "Tautology"); 856 nm->verify_clean_inline_caches(); 857 nm->verify(); 858 } 859 #endif 860 } 861 862 void CodeCache::verify_icholder_relocations() { 863 #ifdef ASSERT 864 // make sure that we aren't leaking icholders 865 int count = 0; 866 FOR_ALL_HEAPS(heap) { 867 FOR_ALL_BLOBS(cb, *heap) { 868 CompiledMethod *nm = cb->as_compiled_method_or_null(); 869 if (nm != NULL) { 870 count += nm->verify_icholder_relocations(); 871 } 872 } 873 } 874 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 875 CompiledICHolder::live_count(), "must agree"); 876 #endif 877 } 878 879 void CodeCache::gc_prologue() { 880 } 881 882 void CodeCache::gc_epilogue() { 883 assert_locked_or_safepoint(CodeCache_lock); 884 NOT_DEBUG(if (needs_cache_clean())) { 885 CompiledMethodIterator iter; 886 while(iter.next_alive()) { 887 CompiledMethod* cm = iter.method(); 888 assert(!cm->is_unloaded(), "Tautology"); 889 DEBUG_ONLY(if (needs_cache_clean())) { 890 cm->cleanup_inline_caches(); 891 } 892 DEBUG_ONLY(cm->verify()); 893 DEBUG_ONLY(cm->verify_oop_relocations()); 894 } 895 } 896 897 set_needs_cache_clean(false); 898 prune_scavenge_root_nmethods(); 899 900 verify_icholder_relocations(); 901 } 902 903 void CodeCache::verify_oops() { 904 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 905 VerifyOopClosure voc; 906 NMethodIterator iter; 907 while(iter.next_alive()) { 908 nmethod* nm = iter.method(); 909 nm->oops_do(&voc); 910 nm->verify_oop_relocations(); 911 } 912 } 913 914 int CodeCache::blob_count(int code_blob_type) { 915 CodeHeap* heap = get_code_heap(code_blob_type); 916 return (heap != NULL) ? heap->blob_count() : 0; 917 } 918 919 int CodeCache::blob_count() { 920 int count = 0; 921 FOR_ALL_HEAPS(heap) { 922 count += (*heap)->blob_count(); 923 } 924 return count; 925 } 926 927 int CodeCache::nmethod_count(int code_blob_type) { 928 CodeHeap* heap = get_code_heap(code_blob_type); 929 return (heap != NULL) ? heap->nmethod_count() : 0; 930 } 931 932 int CodeCache::nmethod_count() { 933 int count = 0; 934 FOR_ALL_NMETHOD_HEAPS(heap) { 935 count += (*heap)->nmethod_count(); 936 } 937 return count; 938 } 939 940 int CodeCache::adapter_count(int code_blob_type) { 941 CodeHeap* heap = get_code_heap(code_blob_type); 942 return (heap != NULL) ? heap->adapter_count() : 0; 943 } 944 945 int CodeCache::adapter_count() { 946 int count = 0; 947 FOR_ALL_HEAPS(heap) { 948 count += (*heap)->adapter_count(); 949 } 950 return count; 951 } 952 953 address CodeCache::low_bound(int code_blob_type) { 954 CodeHeap* heap = get_code_heap(code_blob_type); 955 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 956 } 957 958 address CodeCache::high_bound(int code_blob_type) { 959 CodeHeap* heap = get_code_heap(code_blob_type); 960 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 961 } 962 963 size_t CodeCache::capacity() { 964 size_t cap = 0; 965 FOR_ALL_NMETHOD_HEAPS(heap) { 966 cap += (*heap)->capacity(); 967 } 968 return cap; 969 } 970 971 size_t CodeCache::unallocated_capacity(int code_blob_type) { 972 CodeHeap* heap = get_code_heap(code_blob_type); 973 return (heap != NULL) ? heap->unallocated_capacity() : 0; 974 } 975 976 size_t CodeCache::unallocated_capacity() { 977 size_t unallocated_cap = 0; 978 FOR_ALL_NMETHOD_HEAPS(heap) { 979 unallocated_cap += (*heap)->unallocated_capacity(); 980 } 981 return unallocated_cap; 982 } 983 984 size_t CodeCache::max_capacity() { 985 size_t max_cap = 0; 986 FOR_ALL_NMETHOD_HEAPS(heap) { 987 max_cap += (*heap)->max_capacity(); 988 } 989 return max_cap; 990 } 991 992 /** 993 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 994 * is free, reverse_free_ratio() returns 4. 995 */ 996 double CodeCache::reverse_free_ratio(int code_blob_type) { 997 CodeHeap* heap = get_code_heap(code_blob_type); 998 if (heap == NULL) { 999 return 0; 1000 } 1001 1002 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1003 double max_capacity = (double)heap->max_capacity(); 1004 double result = max_capacity / unallocated_capacity; 1005 assert (max_capacity >= unallocated_capacity, "Must be"); 1006 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1007 return result; 1008 } 1009 1010 size_t CodeCache::bytes_allocated_in_freelists() { 1011 size_t allocated_bytes = 0; 1012 FOR_ALL_NMETHOD_HEAPS(heap) { 1013 allocated_bytes += (*heap)->allocated_in_freelist(); 1014 } 1015 return allocated_bytes; 1016 } 1017 1018 int CodeCache::allocated_segments() { 1019 int number_of_segments = 0; 1020 FOR_ALL_NMETHOD_HEAPS(heap) { 1021 number_of_segments += (*heap)->allocated_segments(); 1022 } 1023 return number_of_segments; 1024 } 1025 1026 size_t CodeCache::freelists_length() { 1027 size_t length = 0; 1028 FOR_ALL_NMETHOD_HEAPS(heap) { 1029 length += (*heap)->freelist_length(); 1030 } 1031 return length; 1032 } 1033 1034 void icache_init(); 1035 1036 void CodeCache::initialize() { 1037 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1038 #ifdef COMPILER2 1039 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1040 #endif 1041 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1042 // This was originally just a check of the alignment, causing failure, instead, round 1043 // the code cache to the page size. In particular, Solaris is moving to a larger 1044 // default page size. 1045 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1046 1047 if (SegmentedCodeCache) { 1048 // Use multiple code heaps 1049 initialize_heaps(); 1050 } else { 1051 // Use a single code heap 1052 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1053 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1054 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1055 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1056 add_heap(rs, "CodeCache", CodeBlobType::All); 1057 } 1058 1059 // Initialize ICache flush mechanism 1060 // This service is needed for os::register_code_area 1061 icache_init(); 1062 1063 // Give OS a chance to register generated code area. 1064 // This is used on Windows 64 bit platforms to register 1065 // Structured Exception Handlers for our generated code. 1066 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1067 } 1068 1069 void codeCache_init() { 1070 CodeCache::initialize(); 1071 // Load AOT libraries and add AOT code heaps. 1072 AOTLoader::initialize(); 1073 } 1074 1075 //------------------------------------------------------------------------------------------------ 1076 1077 int CodeCache::number_of_nmethods_with_dependencies() { 1078 return _number_of_nmethods_with_dependencies; 1079 } 1080 1081 void CodeCache::clear_inline_caches() { 1082 assert_locked_or_safepoint(CodeCache_lock); 1083 CompiledMethodIterator iter; 1084 while(iter.next_alive()) { 1085 iter.method()->clear_inline_caches(); 1086 } 1087 } 1088 1089 void CodeCache::cleanup_inline_caches() { 1090 assert_locked_or_safepoint(CodeCache_lock); 1091 NMethodIterator iter; 1092 while(iter.next_alive()) { 1093 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1094 } 1095 } 1096 1097 // Keeps track of time spent for checking dependencies 1098 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1099 1100 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1101 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1102 int number_of_marked_CodeBlobs = 0; 1103 1104 // search the hierarchy looking for nmethods which are affected by the loading of this class 1105 1106 // then search the interfaces this class implements looking for nmethods 1107 // which might be dependent of the fact that an interface only had one 1108 // implementor. 1109 // nmethod::check_all_dependencies works only correctly, if no safepoint 1110 // can happen 1111 NoSafepointVerifier nsv; 1112 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1113 Klass* d = str.klass(); 1114 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1115 } 1116 1117 #ifndef PRODUCT 1118 if (VerifyDependencies) { 1119 // Object pointers are used as unique identifiers for dependency arguments. This 1120 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1121 dependentCheckTime.start(); 1122 nmethod::check_all_dependencies(changes); 1123 dependentCheckTime.stop(); 1124 } 1125 #endif 1126 1127 return number_of_marked_CodeBlobs; 1128 } 1129 1130 CompiledMethod* CodeCache::find_compiled(void* start) { 1131 CodeBlob *cb = find_blob(start); 1132 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1133 return (CompiledMethod*)cb; 1134 } 1135 1136 bool CodeCache::is_far_target(address target) { 1137 #if INCLUDE_AOT 1138 return NativeCall::is_far_call(_low_bound, target) || 1139 NativeCall::is_far_call(_high_bound, target); 1140 #else 1141 return false; 1142 #endif 1143 } 1144 1145 #ifdef HOTSWAP 1146 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1147 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1148 int number_of_marked_CodeBlobs = 0; 1149 1150 // Deoptimize all methods of the evolving class itself 1151 Array<Method*>* old_methods = dependee->methods(); 1152 for (int i = 0; i < old_methods->length(); i++) { 1153 ResourceMark rm; 1154 Method* old_method = old_methods->at(i); 1155 CompiledMethod* nm = old_method->code(); 1156 if (nm != NULL) { 1157 nm->mark_for_deoptimization(); 1158 number_of_marked_CodeBlobs++; 1159 } 1160 } 1161 1162 CompiledMethodIterator iter; 1163 while(iter.next_alive()) { 1164 CompiledMethod* nm = iter.method(); 1165 if (nm->is_marked_for_deoptimization()) { 1166 // ...Already marked in the previous pass; don't count it again. 1167 } else if (nm->is_evol_dependent_on(dependee)) { 1168 ResourceMark rm; 1169 nm->mark_for_deoptimization(); 1170 number_of_marked_CodeBlobs++; 1171 } else { 1172 // flush caches in case they refer to a redefined Method* 1173 nm->clear_inline_caches(); 1174 } 1175 } 1176 1177 return number_of_marked_CodeBlobs; 1178 } 1179 #endif // HOTSWAP 1180 1181 1182 // Deoptimize all methods 1183 void CodeCache::mark_all_nmethods_for_deoptimization() { 1184 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1185 CompiledMethodIterator iter; 1186 while(iter.next_alive()) { 1187 CompiledMethod* nm = iter.method(); 1188 if (!nm->method()->is_method_handle_intrinsic()) { 1189 nm->mark_for_deoptimization(); 1190 } 1191 } 1192 } 1193 1194 int CodeCache::mark_for_deoptimization(Method* dependee) { 1195 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1196 int number_of_marked_CodeBlobs = 0; 1197 1198 CompiledMethodIterator iter; 1199 while(iter.next_alive()) { 1200 CompiledMethod* nm = iter.method(); 1201 if (nm->is_dependent_on_method(dependee)) { 1202 ResourceMark rm; 1203 nm->mark_for_deoptimization(); 1204 number_of_marked_CodeBlobs++; 1205 } 1206 } 1207 1208 return number_of_marked_CodeBlobs; 1209 } 1210 1211 void CodeCache::make_marked_nmethods_not_entrant() { 1212 assert_locked_or_safepoint(CodeCache_lock); 1213 CompiledMethodIterator iter; 1214 while(iter.next_alive()) { 1215 CompiledMethod* nm = iter.method(); 1216 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1217 nm->make_not_entrant(); 1218 } 1219 } 1220 } 1221 1222 // Flushes compiled methods dependent on dependee. 1223 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1224 assert_lock_strong(Compile_lock); 1225 1226 if (number_of_nmethods_with_dependencies() == 0) return; 1227 1228 // CodeCache can only be updated by a thread_in_VM and they will all be 1229 // stopped during the safepoint so CodeCache will be safe to update without 1230 // holding the CodeCache_lock. 1231 1232 KlassDepChange changes(dependee); 1233 1234 // Compute the dependent nmethods 1235 if (mark_for_deoptimization(changes) > 0) { 1236 // At least one nmethod has been marked for deoptimization 1237 VM_Deoptimize op; 1238 VMThread::execute(&op); 1239 } 1240 } 1241 1242 #ifdef HOTSWAP 1243 // Flushes compiled methods dependent on dependee in the evolutionary sense 1244 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1245 // --- Compile_lock is not held. However we are at a safepoint. 1246 assert_locked_or_safepoint(Compile_lock); 1247 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1248 1249 // CodeCache can only be updated by a thread_in_VM and they will all be 1250 // stopped during the safepoint so CodeCache will be safe to update without 1251 // holding the CodeCache_lock. 1252 1253 // Compute the dependent nmethods 1254 if (mark_for_evol_deoptimization(ev_k) > 0) { 1255 // At least one nmethod has been marked for deoptimization 1256 1257 // All this already happens inside a VM_Operation, so we'll do all the work here. 1258 // Stuff copied from VM_Deoptimize and modified slightly. 1259 1260 // We do not want any GCs to happen while we are in the middle of this VM operation 1261 ResourceMark rm; 1262 DeoptimizationMarker dm; 1263 1264 // Deoptimize all activations depending on marked nmethods 1265 Deoptimization::deoptimize_dependents(); 1266 1267 // Make the dependent methods not entrant 1268 make_marked_nmethods_not_entrant(); 1269 } 1270 } 1271 #endif // HOTSWAP 1272 1273 1274 // Flushes compiled methods dependent on dependee 1275 void CodeCache::flush_dependents_on_method(methodHandle m_h) { 1276 // --- Compile_lock is not held. However we are at a safepoint. 1277 assert_locked_or_safepoint(Compile_lock); 1278 1279 // CodeCache can only be updated by a thread_in_VM and they will all be 1280 // stopped dring the safepoint so CodeCache will be safe to update without 1281 // holding the CodeCache_lock. 1282 1283 // Compute the dependent nmethods 1284 if (mark_for_deoptimization(m_h()) > 0) { 1285 // At least one nmethod has been marked for deoptimization 1286 1287 // All this already happens inside a VM_Operation, so we'll do all the work here. 1288 // Stuff copied from VM_Deoptimize and modified slightly. 1289 1290 // We do not want any GCs to happen while we are in the middle of this VM operation 1291 ResourceMark rm; 1292 DeoptimizationMarker dm; 1293 1294 // Deoptimize all activations depending on marked nmethods 1295 Deoptimization::deoptimize_dependents(); 1296 1297 // Make the dependent methods not entrant 1298 make_marked_nmethods_not_entrant(); 1299 } 1300 } 1301 1302 void CodeCache::verify() { 1303 assert_locked_or_safepoint(CodeCache_lock); 1304 FOR_ALL_HEAPS(heap) { 1305 (*heap)->verify(); 1306 FOR_ALL_BLOBS(cb, *heap) { 1307 if (cb->is_alive()) { 1308 cb->verify(); 1309 } 1310 } 1311 } 1312 } 1313 1314 // A CodeHeap is full. Print out warning and report event. 1315 PRAGMA_DIAG_PUSH 1316 PRAGMA_FORMAT_NONLITERAL_IGNORED 1317 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1318 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1319 CodeHeap* heap = get_code_heap(code_blob_type); 1320 assert(heap != NULL, "heap is null"); 1321 1322 if ((heap->full_count() == 0) || print) { 1323 // Not yet reported for this heap, report 1324 if (SegmentedCodeCache) { 1325 ResourceMark rm; 1326 stringStream msg1_stream, msg2_stream; 1327 msg1_stream.print("%s is full. Compiler has been disabled.", 1328 get_code_heap_name(code_blob_type)); 1329 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1330 get_code_heap_flag_name(code_blob_type)); 1331 const char *msg1 = msg1_stream.as_string(); 1332 const char *msg2 = msg2_stream.as_string(); 1333 1334 log_warning(codecache)(msg1); 1335 log_warning(codecache)(msg2); 1336 warning(msg1); 1337 warning(msg2); 1338 } else { 1339 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1340 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1341 1342 log_warning(codecache)(msg1); 1343 log_warning(codecache)(msg2); 1344 warning(msg1); 1345 warning(msg2); 1346 } 1347 ResourceMark rm; 1348 stringStream s; 1349 // Dump code cache into a buffer before locking the tty, 1350 { 1351 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1352 print_summary(&s); 1353 } 1354 ttyLocker ttyl; 1355 tty->print("%s", s.as_string()); 1356 } 1357 1358 heap->report_full(); 1359 1360 EventCodeCacheFull event; 1361 if (event.should_commit()) { 1362 event.set_codeBlobType((u1)code_blob_type); 1363 event.set_startAddress((u8)heap->low_boundary()); 1364 event.set_commitedTopAddress((u8)heap->high()); 1365 event.set_reservedTopAddress((u8)heap->high_boundary()); 1366 event.set_entryCount(heap->blob_count()); 1367 event.set_methodCount(heap->nmethod_count()); 1368 event.set_adaptorCount(heap->adapter_count()); 1369 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1370 event.set_fullCount(heap->full_count()); 1371 event.commit(); 1372 } 1373 } 1374 PRAGMA_DIAG_POP 1375 1376 void CodeCache::print_memory_overhead() { 1377 size_t wasted_bytes = 0; 1378 FOR_ALL_NMETHOD_HEAPS(heap) { 1379 CodeHeap* curr_heap = *heap; 1380 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1381 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1382 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1383 } 1384 } 1385 // Print bytes that are allocated in the freelist 1386 ttyLocker ttl; 1387 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1388 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1389 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1390 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1391 } 1392 1393 //------------------------------------------------------------------------------------------------ 1394 // Non-product version 1395 1396 #ifndef PRODUCT 1397 1398 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1399 if (PrintCodeCache2) { // Need to add a new flag 1400 ResourceMark rm; 1401 if (size == 0) size = cb->size(); 1402 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1403 } 1404 } 1405 1406 void CodeCache::print_internals() { 1407 int nmethodCount = 0; 1408 int runtimeStubCount = 0; 1409 int adapterCount = 0; 1410 int deoptimizationStubCount = 0; 1411 int uncommonTrapStubCount = 0; 1412 int bufferBlobCount = 0; 1413 int total = 0; 1414 int nmethodAlive = 0; 1415 int nmethodNotEntrant = 0; 1416 int nmethodZombie = 0; 1417 int nmethodUnloaded = 0; 1418 int nmethodJava = 0; 1419 int nmethodNative = 0; 1420 int max_nm_size = 0; 1421 ResourceMark rm; 1422 1423 int i = 0; 1424 FOR_ALL_NMETHOD_HEAPS(heap) { 1425 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1426 tty->print_cr("-- %s --", (*heap)->name()); 1427 } 1428 FOR_ALL_BLOBS(cb, *heap) { 1429 total++; 1430 if (cb->is_nmethod()) { 1431 nmethod* nm = (nmethod*)cb; 1432 1433 if (Verbose && nm->method() != NULL) { 1434 ResourceMark rm; 1435 char *method_name = nm->method()->name_and_sig_as_C_string(); 1436 tty->print("%s", method_name); 1437 if(nm->is_alive()) { tty->print_cr(" alive"); } 1438 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1439 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1440 } 1441 1442 nmethodCount++; 1443 1444 if(nm->is_alive()) { nmethodAlive++; } 1445 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1446 if(nm->is_zombie()) { nmethodZombie++; } 1447 if(nm->is_unloaded()) { nmethodUnloaded++; } 1448 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1449 1450 if(nm->method() != NULL && nm->is_java_method()) { 1451 nmethodJava++; 1452 max_nm_size = MAX2(max_nm_size, nm->size()); 1453 } 1454 } else if (cb->is_runtime_stub()) { 1455 runtimeStubCount++; 1456 } else if (cb->is_deoptimization_stub()) { 1457 deoptimizationStubCount++; 1458 } else if (cb->is_uncommon_trap_stub()) { 1459 uncommonTrapStubCount++; 1460 } else if (cb->is_adapter_blob()) { 1461 adapterCount++; 1462 } else if (cb->is_buffer_blob()) { 1463 bufferBlobCount++; 1464 } 1465 } 1466 } 1467 1468 int bucketSize = 512; 1469 int bucketLimit = max_nm_size / bucketSize + 1; 1470 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1471 memset(buckets, 0, sizeof(int) * bucketLimit); 1472 1473 NMethodIterator iter; 1474 while(iter.next()) { 1475 nmethod* nm = iter.method(); 1476 if(nm->method() != NULL && nm->is_java_method()) { 1477 buckets[nm->size() / bucketSize]++; 1478 } 1479 } 1480 1481 tty->print_cr("Code Cache Entries (total of %d)",total); 1482 tty->print_cr("-------------------------------------------------"); 1483 tty->print_cr("nmethods: %d",nmethodCount); 1484 tty->print_cr("\talive: %d",nmethodAlive); 1485 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1486 tty->print_cr("\tzombie: %d",nmethodZombie); 1487 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1488 tty->print_cr("\tjava: %d",nmethodJava); 1489 tty->print_cr("\tnative: %d",nmethodNative); 1490 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1491 tty->print_cr("adapters: %d",adapterCount); 1492 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1493 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1494 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1495 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1496 tty->print_cr("-------------------------------------------------"); 1497 1498 for(int i=0; i<bucketLimit; i++) { 1499 if(buckets[i] != 0) { 1500 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1501 tty->fill_to(40); 1502 tty->print_cr("%d",buckets[i]); 1503 } 1504 } 1505 1506 FREE_C_HEAP_ARRAY(int, buckets); 1507 print_memory_overhead(); 1508 } 1509 1510 #endif // !PRODUCT 1511 1512 void CodeCache::print() { 1513 print_summary(tty); 1514 1515 #ifndef PRODUCT 1516 if (!Verbose) return; 1517 1518 CodeBlob_sizes live; 1519 CodeBlob_sizes dead; 1520 1521 FOR_ALL_NMETHOD_HEAPS(heap) { 1522 FOR_ALL_BLOBS(cb, *heap) { 1523 if (!cb->is_alive()) { 1524 dead.add(cb); 1525 } else { 1526 live.add(cb); 1527 } 1528 } 1529 } 1530 1531 tty->print_cr("CodeCache:"); 1532 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1533 1534 if (!live.is_empty()) { 1535 live.print("live"); 1536 } 1537 if (!dead.is_empty()) { 1538 dead.print("dead"); 1539 } 1540 1541 if (WizardMode) { 1542 // print the oop_map usage 1543 int code_size = 0; 1544 int number_of_blobs = 0; 1545 int number_of_oop_maps = 0; 1546 int map_size = 0; 1547 FOR_ALL_NMETHOD_HEAPS(heap) { 1548 FOR_ALL_BLOBS(cb, *heap) { 1549 if (cb->is_alive()) { 1550 number_of_blobs++; 1551 code_size += cb->code_size(); 1552 ImmutableOopMapSet* set = cb->oop_maps(); 1553 if (set != NULL) { 1554 number_of_oop_maps += set->count(); 1555 map_size += set->nr_of_bytes(); 1556 } 1557 } 1558 } 1559 } 1560 tty->print_cr("OopMaps"); 1561 tty->print_cr(" #blobs = %d", number_of_blobs); 1562 tty->print_cr(" code size = %d", code_size); 1563 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1564 tty->print_cr(" map size = %d", map_size); 1565 } 1566 1567 #endif // !PRODUCT 1568 } 1569 1570 void CodeCache::print_summary(outputStream* st, bool detailed) { 1571 FOR_ALL_HEAPS(heap_iterator) { 1572 CodeHeap* heap = (*heap_iterator); 1573 size_t total = (heap->high_boundary() - heap->low_boundary()); 1574 if (_heaps->length() >= 1) { 1575 st->print("%s:", heap->name()); 1576 } else { 1577 st->print("CodeCache:"); 1578 } 1579 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1580 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1581 total/K, (total - heap->unallocated_capacity())/K, 1582 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1583 1584 if (detailed) { 1585 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1586 p2i(heap->low_boundary()), 1587 p2i(heap->high()), 1588 p2i(heap->high_boundary())); 1589 } 1590 } 1591 1592 if (detailed) { 1593 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1594 " adapters=" UINT32_FORMAT, 1595 blob_count(), nmethod_count(), adapter_count()); 1596 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1597 "enabled" : Arguments::mode() == Arguments::_int ? 1598 "disabled (interpreter mode)" : 1599 "disabled (not enough contiguous free space left)"); 1600 } 1601 } 1602 1603 void CodeCache::print_codelist(outputStream* st) { 1604 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1605 1606 NMethodIterator iter; 1607 while(iter.next_alive()) { 1608 nmethod* nm = iter.method(); 1609 ResourceMark rm; 1610 char *method_name = nm->method()->name_and_sig_as_C_string(); 1611 st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1612 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1613 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1614 } 1615 } 1616 1617 void CodeCache::print_layout(outputStream* st) { 1618 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1619 ResourceMark rm; 1620 print_summary(st, true); 1621 } 1622 1623 void CodeCache::log_state(outputStream* st) { 1624 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1625 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1626 blob_count(), nmethod_count(), adapter_count(), 1627 unallocated_capacity()); 1628 } 1629