1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "aot/aotLoader.hpp" 27 #include "code/codeBlob.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "gc/shared/gcLocker.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "oops/verifyOopClosure.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/compilationPolicy.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/icache.hpp" 48 #include "runtime/java.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/sweeper.hpp" 51 #include "services/memoryService.hpp" 52 #include "trace/tracing.hpp" 53 #include "utilities/xmlstream.hpp" 54 #ifdef COMPILER1 55 #include "c1/c1_Compilation.hpp" 56 #include "c1/c1_Compiler.hpp" 57 #endif 58 #ifdef COMPILER2 59 #include "opto/c2compiler.hpp" 60 #include "opto/compile.hpp" 61 #include "opto/node.hpp" 62 #endif 63 64 // Helper class for printing in CodeCache 65 class CodeBlob_sizes { 66 private: 67 int count; 68 int total_size; 69 int header_size; 70 int code_size; 71 int stub_size; 72 int relocation_size; 73 int scopes_oop_size; 74 int scopes_metadata_size; 75 int scopes_data_size; 76 int scopes_pcs_size; 77 78 public: 79 CodeBlob_sizes() { 80 count = 0; 81 total_size = 0; 82 header_size = 0; 83 code_size = 0; 84 stub_size = 0; 85 relocation_size = 0; 86 scopes_oop_size = 0; 87 scopes_metadata_size = 0; 88 scopes_data_size = 0; 89 scopes_pcs_size = 0; 90 } 91 92 int total() { return total_size; } 93 bool is_empty() { return count == 0; } 94 95 void print(const char* title) { 96 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 97 count, 98 title, 99 (int)(total() / K), 100 header_size * 100 / total_size, 101 relocation_size * 100 / total_size, 102 code_size * 100 / total_size, 103 stub_size * 100 / total_size, 104 scopes_oop_size * 100 / total_size, 105 scopes_metadata_size * 100 / total_size, 106 scopes_data_size * 100 / total_size, 107 scopes_pcs_size * 100 / total_size); 108 } 109 110 void add(CodeBlob* cb) { 111 count++; 112 total_size += cb->size(); 113 header_size += cb->header_size(); 114 relocation_size += cb->relocation_size(); 115 if (cb->is_nmethod()) { 116 nmethod* nm = cb->as_nmethod_or_null(); 117 code_size += nm->insts_size(); 118 stub_size += nm->stub_size(); 119 120 scopes_oop_size += nm->oops_size(); 121 scopes_metadata_size += nm->metadata_size(); 122 scopes_data_size += nm->scopes_data_size(); 123 scopes_pcs_size += nm->scopes_pcs_size(); 124 } else { 125 code_size += cb->code_size(); 126 } 127 } 128 }; 129 130 // Iterate over all CodeHeaps 131 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 132 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) 133 134 // Iterate over all CodeBlobs (cb) on the given CodeHeap 135 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 136 137 address CodeCache::_low_bound = 0; 138 address CodeCache::_high_bound = 0; 139 int CodeCache::_number_of_nmethods_with_dependencies = 0; 140 bool CodeCache::_needs_cache_clean = false; 141 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 142 143 // Initialize array of CodeHeaps 144 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 145 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 146 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 147 148 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 149 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 150 // Prepare error message 151 const char* error = "Invalid code heap sizes"; 152 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" 153 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K", 154 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 155 156 if (total_size > cache_size) { 157 // Some code heap sizes were explicitly set: total_size must be <= cache_size 158 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 159 vm_exit_during_initialization(error, message); 160 } else if (all_set && total_size != cache_size) { 161 // All code heap sizes were explicitly set: total_size must equal cache_size 162 message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K); 163 vm_exit_during_initialization(error, message); 164 } 165 } 166 167 void CodeCache::initialize_heaps() { 168 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 169 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 170 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 171 size_t min_size = os::vm_page_size(); 172 size_t cache_size = ReservedCodeCacheSize; 173 size_t non_nmethod_size = NonNMethodCodeHeapSize; 174 size_t profiled_size = ProfiledCodeHeapSize; 175 size_t non_profiled_size = NonProfiledCodeHeapSize; 176 // Check if total size set via command line flags exceeds the reserved size 177 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 178 (profiled_set ? profiled_size : min_size), 179 (non_profiled_set ? non_profiled_size : min_size), 180 cache_size, 181 non_nmethod_set && profiled_set && non_profiled_set); 182 183 // Determine size of compiler buffers 184 size_t code_buffers_size = 0; 185 #ifdef COMPILER1 186 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 187 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 188 code_buffers_size += c1_count * Compiler::code_buffer_size(); 189 #endif 190 #ifdef COMPILER2 191 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 192 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 193 // Initial size of constant table (this may be increased if a compiled method needs more space) 194 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 195 #endif 196 197 // Increase default non_nmethod_size to account for compiler buffers 198 if (!non_nmethod_set) { 199 non_nmethod_size += code_buffers_size; 200 } 201 // Calculate default CodeHeap sizes if not set by user 202 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 203 // Check if we have enough space for the non-nmethod code heap 204 if (cache_size > non_nmethod_size) { 205 // Use the default value for non_nmethod_size and one half of the 206 // remaining size for non-profiled and one half for profiled methods 207 size_t remaining_size = cache_size - non_nmethod_size; 208 profiled_size = remaining_size / 2; 209 non_profiled_size = remaining_size - profiled_size; 210 } else { 211 // Use all space for the non-nmethod heap and set other heaps to minimal size 212 non_nmethod_size = cache_size - 2 * min_size; 213 profiled_size = min_size; 214 non_profiled_size = min_size; 215 } 216 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 217 // The user explicitly set some code heap sizes. Increase or decrease the (default) 218 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 219 // code heap sizes and then only change non-nmethod code heap size if still necessary. 220 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 221 if (non_profiled_set) { 222 if (!profiled_set) { 223 // Adapt size of profiled code heap 224 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 225 // Not enough space available, set to minimum size 226 diff_size += profiled_size - min_size; 227 profiled_size = min_size; 228 } else { 229 profiled_size += diff_size; 230 diff_size = 0; 231 } 232 } 233 } else if (profiled_set) { 234 // Adapt size of non-profiled code heap 235 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 236 // Not enough space available, set to minimum size 237 diff_size += non_profiled_size - min_size; 238 non_profiled_size = min_size; 239 } else { 240 non_profiled_size += diff_size; 241 diff_size = 0; 242 } 243 } else if (non_nmethod_set) { 244 // Distribute remaining size between profiled and non-profiled code heaps 245 diff_size = cache_size - non_nmethod_size; 246 profiled_size = diff_size / 2; 247 non_profiled_size = diff_size - profiled_size; 248 diff_size = 0; 249 } 250 if (diff_size != 0) { 251 // Use non-nmethod code heap for remaining space requirements 252 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 253 non_nmethod_size += diff_size; 254 } 255 } 256 257 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 258 if(!heap_available(CodeBlobType::MethodProfiled)) { 259 non_profiled_size += profiled_size; 260 profiled_size = 0; 261 } 262 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 263 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 264 non_nmethod_size += non_profiled_size; 265 non_profiled_size = 0; 266 } 267 // Make sure we have enough space for VM internal code 268 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 269 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 270 vm_exit_during_initialization(err_msg( 271 "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K", 272 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 273 } 274 275 // Verify sizes and update flag values 276 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 277 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 278 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 279 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 280 281 // Align CodeHeaps 282 size_t alignment = heap_alignment(); 283 non_nmethod_size = align_size_up(non_nmethod_size, alignment); 284 profiled_size = align_size_down(profiled_size, alignment); 285 286 // Reserve one continuous chunk of memory for CodeHeaps and split it into 287 // parts for the individual heaps. The memory layout looks like this: 288 // ---------- high ----------- 289 // Non-profiled nmethods 290 // Profiled nmethods 291 // Non-nmethods 292 // ---------- low ------------ 293 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 294 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 295 ReservedSpace rest = rs.last_part(non_nmethod_size); 296 ReservedSpace profiled_space = rest.first_part(profiled_size); 297 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 298 299 // Non-nmethods (stubs, adapters, ...) 300 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 301 // Tier 2 and tier 3 (profiled) methods 302 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 303 // Tier 1 and tier 4 (non-profiled) methods and native methods 304 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 305 } 306 307 size_t CodeCache::heap_alignment() { 308 // If large page support is enabled, align code heaps according to large 309 // page size to make sure that code cache is covered by large pages. 310 const size_t page_size = os::can_execute_large_page_memory() ? 311 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 312 os::vm_page_size(); 313 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 314 } 315 316 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 317 // Determine alignment 318 const size_t page_size = os::can_execute_large_page_memory() ? 319 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 320 os::page_size_for_region_aligned(size, 8)) : 321 os::vm_page_size(); 322 const size_t granularity = os::vm_allocation_granularity(); 323 const size_t r_align = MAX2(page_size, granularity); 324 const size_t r_size = align_size_up(size, r_align); 325 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 326 MAX2(page_size, granularity); 327 328 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 329 330 if (!rs.is_reserved()) { 331 vm_exit_during_initialization("Could not reserve enough space for code cache"); 332 } 333 334 // Initialize bounds 335 _low_bound = (address)rs.base(); 336 _high_bound = _low_bound + rs.size(); 337 338 return rs; 339 } 340 341 bool CodeCache::heap_available(int code_blob_type) { 342 if (!SegmentedCodeCache) { 343 // No segmentation: use a single code heap 344 return (code_blob_type == CodeBlobType::All); 345 } else if (Arguments::is_interpreter_only()) { 346 // Interpreter only: we don't need any method code heaps 347 return (code_blob_type == CodeBlobType::NonNMethod); 348 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 349 // Tiered compilation: use all code heaps 350 return (code_blob_type < CodeBlobType::All); 351 } else { 352 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 353 return (code_blob_type == CodeBlobType::NonNMethod) || 354 (code_blob_type == CodeBlobType::MethodNonProfiled); 355 } 356 } 357 358 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 359 switch(code_blob_type) { 360 case CodeBlobType::NonNMethod: 361 return "NonNMethodCodeHeapSize"; 362 break; 363 case CodeBlobType::MethodNonProfiled: 364 return "NonProfiledCodeHeapSize"; 365 break; 366 case CodeBlobType::MethodProfiled: 367 return "ProfiledCodeHeapSize"; 368 break; 369 } 370 ShouldNotReachHere(); 371 return NULL; 372 } 373 374 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 375 if (lhs->code_blob_type() == rhs->code_blob_type()) { 376 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 377 } else { 378 return lhs->code_blob_type() - rhs->code_blob_type(); 379 } 380 } 381 382 void CodeCache::add_heap(CodeHeap* heap) { 383 assert(!Universe::is_fully_initialized(), "late heap addition?"); 384 385 _heaps->insert_sorted<code_heap_compare>(heap); 386 387 int type = heap->code_blob_type(); 388 if (code_blob_type_accepts_compiled(type)) { 389 _compiled_heaps->insert_sorted<code_heap_compare>(heap); 390 } 391 if (code_blob_type_accepts_nmethod(type)) { 392 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 393 } 394 } 395 396 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 397 // Check if heap is needed 398 if (!heap_available(code_blob_type)) { 399 return; 400 } 401 402 // Create CodeHeap 403 CodeHeap* heap = new CodeHeap(name, code_blob_type); 404 add_heap(heap); 405 406 // Reserve Space 407 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 408 size_initial = round_to(size_initial, os::vm_page_size()); 409 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 410 vm_exit_during_initialization("Could not reserve enough space for code cache"); 411 } 412 413 // Register the CodeHeap 414 MemoryService::add_code_heap_memory_pool(heap, name); 415 } 416 417 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 418 assert(cb != NULL, "CodeBlob is null"); 419 FOR_ALL_HEAPS(heap) { 420 if ((*heap)->contains_blob(cb)) { 421 return *heap; 422 } 423 } 424 ShouldNotReachHere(); 425 return NULL; 426 } 427 428 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 429 FOR_ALL_HEAPS(heap) { 430 if ((*heap)->accepts(code_blob_type)) { 431 return *heap; 432 } 433 } 434 return NULL; 435 } 436 437 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 438 assert_locked_or_safepoint(CodeCache_lock); 439 assert(heap != NULL, "heap is null"); 440 return (CodeBlob*)heap->first(); 441 } 442 443 CodeBlob* CodeCache::first_blob(int code_blob_type) { 444 if (heap_available(code_blob_type)) { 445 return first_blob(get_code_heap(code_blob_type)); 446 } else { 447 return NULL; 448 } 449 } 450 451 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 452 assert_locked_or_safepoint(CodeCache_lock); 453 assert(heap != NULL, "heap is null"); 454 return (CodeBlob*)heap->next(cb); 455 } 456 457 /** 458 * Do not seize the CodeCache lock here--if the caller has not 459 * already done so, we are going to lose bigtime, since the code 460 * cache will contain a garbage CodeBlob until the caller can 461 * run the constructor for the CodeBlob subclass he is busy 462 * instantiating. 463 */ 464 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 465 // Possibly wakes up the sweeper thread. 466 NMethodSweeper::notify(code_blob_type); 467 assert_locked_or_safepoint(CodeCache_lock); 468 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 469 if (size <= 0) { 470 return NULL; 471 } 472 CodeBlob* cb = NULL; 473 474 // Get CodeHeap for the given CodeBlobType 475 CodeHeap* heap = get_code_heap(code_blob_type); 476 assert(heap != NULL, "heap is null"); 477 478 while (true) { 479 cb = (CodeBlob*)heap->allocate(size); 480 if (cb != NULL) break; 481 if (!heap->expand_by(CodeCacheExpansionSize)) { 482 // Save original type for error reporting 483 if (orig_code_blob_type == CodeBlobType::All) { 484 orig_code_blob_type = code_blob_type; 485 } 486 // Expansion failed 487 if (SegmentedCodeCache) { 488 // Fallback solution: Try to store code in another code heap. 489 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 490 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 491 // and force stack scanning if less than 10% of the code heap are free. 492 int type = code_blob_type; 493 switch (type) { 494 case CodeBlobType::NonNMethod: 495 type = CodeBlobType::MethodNonProfiled; 496 break; 497 case CodeBlobType::MethodNonProfiled: 498 type = CodeBlobType::MethodProfiled; 499 break; 500 case CodeBlobType::MethodProfiled: 501 // Avoid loop if we already tried that code heap 502 if (type == orig_code_blob_type) { 503 type = CodeBlobType::MethodNonProfiled; 504 } 505 break; 506 } 507 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 508 if (PrintCodeCacheExtension) { 509 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 510 heap->name(), get_code_heap(type)->name()); 511 } 512 return allocate(size, type, orig_code_blob_type); 513 } 514 } 515 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 516 CompileBroker::handle_full_code_cache(orig_code_blob_type); 517 return NULL; 518 } 519 if (PrintCodeCacheExtension) { 520 ResourceMark rm; 521 if (_nmethod_heaps->length() >= 1) { 522 tty->print("%s", heap->name()); 523 } else { 524 tty->print("CodeCache"); 525 } 526 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 527 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 528 (address)heap->high() - (address)heap->low_boundary()); 529 } 530 } 531 print_trace("allocation", cb, size); 532 return cb; 533 } 534 535 void CodeCache::free(CodeBlob* cb) { 536 assert_locked_or_safepoint(CodeCache_lock); 537 CodeHeap* heap = get_code_heap(cb); 538 print_trace("free", cb); 539 if (cb->is_nmethod()) { 540 heap->set_nmethod_count(heap->nmethod_count() - 1); 541 if (((nmethod *)cb)->has_dependencies()) { 542 _number_of_nmethods_with_dependencies--; 543 } 544 } 545 if (cb->is_adapter_blob()) { 546 heap->set_adapter_count(heap->adapter_count() - 1); 547 } 548 549 // Get heap for given CodeBlob and deallocate 550 get_code_heap(cb)->deallocate(cb); 551 552 assert(heap->blob_count() >= 0, "sanity check"); 553 } 554 555 void CodeCache::commit(CodeBlob* cb) { 556 // this is called by nmethod::nmethod, which must already own CodeCache_lock 557 assert_locked_or_safepoint(CodeCache_lock); 558 CodeHeap* heap = get_code_heap(cb); 559 if (cb->is_nmethod()) { 560 heap->set_nmethod_count(heap->nmethod_count() + 1); 561 if (((nmethod *)cb)->has_dependencies()) { 562 _number_of_nmethods_with_dependencies++; 563 } 564 } 565 if (cb->is_adapter_blob()) { 566 heap->set_adapter_count(heap->adapter_count() + 1); 567 } 568 569 // flush the hardware I-cache 570 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 571 } 572 573 bool CodeCache::contains(void *p) { 574 // S390 uses contains() in current_frame(), which is used before 575 // code cache initialization if NativeMemoryTracking=detail is set. 576 S390_ONLY(if (_heaps == NULL) return false;) 577 // It should be ok to call contains without holding a lock. 578 FOR_ALL_HEAPS(heap) { 579 if ((*heap)->contains(p)) { 580 return true; 581 } 582 } 583 return false; 584 } 585 586 bool CodeCache::contains(nmethod *nm) { 587 return contains((void *)nm); 588 } 589 590 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 591 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 592 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 593 CodeBlob* CodeCache::find_blob(void* start) { 594 CodeBlob* result = find_blob_unsafe(start); 595 // We could potentially look up non_entrant methods 596 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 597 return result; 598 } 599 600 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 601 // what you are doing) 602 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 603 // NMT can walk the stack before code cache is created 604 if (_heaps != NULL && !_heaps->is_empty()) { 605 FOR_ALL_HEAPS(heap) { 606 CodeBlob* result = (*heap)->find_blob_unsafe(start); 607 if (result != NULL) { 608 return result; 609 } 610 } 611 } 612 return NULL; 613 } 614 615 nmethod* CodeCache::find_nmethod(void* start) { 616 CodeBlob* cb = find_blob(start); 617 assert(cb->is_nmethod(), "did not find an nmethod"); 618 return (nmethod*)cb; 619 } 620 621 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 622 assert_locked_or_safepoint(CodeCache_lock); 623 FOR_ALL_NMETHOD_HEAPS(heap) { 624 FOR_ALL_BLOBS(cb, *heap) { 625 f(cb); 626 } 627 } 628 } 629 630 void CodeCache::nmethods_do(void f(nmethod* nm)) { 631 assert_locked_or_safepoint(CodeCache_lock); 632 NMethodIterator iter; 633 while(iter.next()) { 634 f(iter.method()); 635 } 636 } 637 638 void CodeCache::metadata_do(void f(Metadata* m)) { 639 assert_locked_or_safepoint(CodeCache_lock); 640 NMethodIterator iter; 641 while(iter.next_alive()) { 642 iter.method()->metadata_do(f); 643 } 644 AOTLoader::metadata_do(f); 645 } 646 647 int CodeCache::alignment_unit() { 648 return (int)_heaps->first()->alignment_unit(); 649 } 650 651 int CodeCache::alignment_offset() { 652 return (int)_heaps->first()->alignment_offset(); 653 } 654 655 // Mark nmethods for unloading if they contain otherwise unreachable oops. 656 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 657 assert_locked_or_safepoint(CodeCache_lock); 658 CompiledMethodIterator iter; 659 while(iter.next_alive()) { 660 iter.method()->do_unloading(is_alive, unloading_occurred); 661 } 662 } 663 664 void CodeCache::blobs_do(CodeBlobClosure* f) { 665 assert_locked_or_safepoint(CodeCache_lock); 666 FOR_ALL_NMETHOD_HEAPS(heap) { 667 FOR_ALL_BLOBS(cb, *heap) { 668 if (cb->is_alive()) { 669 f->do_code_blob(cb); 670 #ifdef ASSERT 671 if (cb->is_nmethod()) 672 ((nmethod*)cb)->verify_scavenge_root_oops(); 673 #endif //ASSERT 674 } 675 } 676 } 677 } 678 679 // Walk the list of methods which might contain non-perm oops. 680 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 681 assert_locked_or_safepoint(CodeCache_lock); 682 683 if (UseG1GC) { 684 return; 685 } 686 687 const bool fix_relocations = f->fix_relocations(); 688 debug_only(mark_scavenge_root_nmethods()); 689 690 nmethod* prev = NULL; 691 nmethod* cur = scavenge_root_nmethods(); 692 while (cur != NULL) { 693 debug_only(cur->clear_scavenge_root_marked()); 694 assert(cur->scavenge_root_not_marked(), ""); 695 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 696 697 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 698 if (TraceScavenge) { 699 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 700 } 701 if (is_live) { 702 // Perform cur->oops_do(f), maybe just once per nmethod. 703 f->do_code_blob(cur); 704 } 705 nmethod* const next = cur->scavenge_root_link(); 706 // The scavengable nmethod list must contain all methods with scavengable 707 // oops. It is safe to include more nmethod on the list, but we do not 708 // expect any live non-scavengable nmethods on the list. 709 if (fix_relocations) { 710 if (!is_live || !cur->detect_scavenge_root_oops()) { 711 unlink_scavenge_root_nmethod(cur, prev); 712 } else { 713 prev = cur; 714 } 715 } 716 cur = next; 717 } 718 719 // Check for stray marks. 720 debug_only(verify_perm_nmethods(NULL)); 721 } 722 723 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 724 assert_locked_or_safepoint(CodeCache_lock); 725 726 if (UseG1GC) { 727 return; 728 } 729 730 nm->set_on_scavenge_root_list(); 731 nm->set_scavenge_root_link(_scavenge_root_nmethods); 732 set_scavenge_root_nmethods(nm); 733 print_trace("add_scavenge_root", nm); 734 } 735 736 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 737 assert_locked_or_safepoint(CodeCache_lock); 738 739 assert((prev == NULL && scavenge_root_nmethods() == nm) || 740 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 741 742 assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list"); 743 744 print_trace("unlink_scavenge_root", nm); 745 if (prev == NULL) { 746 set_scavenge_root_nmethods(nm->scavenge_root_link()); 747 } else { 748 prev->set_scavenge_root_link(nm->scavenge_root_link()); 749 } 750 nm->set_scavenge_root_link(NULL); 751 nm->clear_on_scavenge_root_list(); 752 } 753 754 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 755 assert_locked_or_safepoint(CodeCache_lock); 756 757 if (UseG1GC) { 758 return; 759 } 760 761 print_trace("drop_scavenge_root", nm); 762 nmethod* prev = NULL; 763 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 764 if (cur == nm) { 765 unlink_scavenge_root_nmethod(cur, prev); 766 return; 767 } 768 prev = cur; 769 } 770 assert(false, "should have been on list"); 771 } 772 773 void CodeCache::prune_scavenge_root_nmethods() { 774 assert_locked_or_safepoint(CodeCache_lock); 775 776 if (UseG1GC) { 777 return; 778 } 779 780 debug_only(mark_scavenge_root_nmethods()); 781 782 nmethod* last = NULL; 783 nmethod* cur = scavenge_root_nmethods(); 784 while (cur != NULL) { 785 nmethod* next = cur->scavenge_root_link(); 786 debug_only(cur->clear_scavenge_root_marked()); 787 assert(cur->scavenge_root_not_marked(), ""); 788 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 789 790 if (!cur->is_zombie() && !cur->is_unloaded() 791 && cur->detect_scavenge_root_oops()) { 792 // Keep it. Advance 'last' to prevent deletion. 793 last = cur; 794 } else { 795 // Prune it from the list, so we don't have to look at it any more. 796 print_trace("prune_scavenge_root", cur); 797 unlink_scavenge_root_nmethod(cur, last); 798 } 799 cur = next; 800 } 801 802 // Check for stray marks. 803 debug_only(verify_perm_nmethods(NULL)); 804 } 805 806 #ifndef PRODUCT 807 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 808 if (UseG1GC) { 809 return; 810 } 811 812 // While we are here, verify the integrity of the list. 813 mark_scavenge_root_nmethods(); 814 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 815 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 816 cur->clear_scavenge_root_marked(); 817 } 818 verify_perm_nmethods(f); 819 } 820 821 // Temporarily mark nmethods that are claimed to be on the non-perm list. 822 void CodeCache::mark_scavenge_root_nmethods() { 823 NMethodIterator iter; 824 while(iter.next_alive()) { 825 nmethod* nm = iter.method(); 826 assert(nm->scavenge_root_not_marked(), "clean state"); 827 if (nm->on_scavenge_root_list()) 828 nm->set_scavenge_root_marked(); 829 } 830 } 831 832 // If the closure is given, run it on the unlisted nmethods. 833 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 834 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 835 NMethodIterator iter; 836 while(iter.next_alive()) { 837 nmethod* nm = iter.method(); 838 bool call_f = (f_or_null != NULL); 839 assert(nm->scavenge_root_not_marked(), "must be already processed"); 840 if (nm->on_scavenge_root_list()) 841 call_f = false; // don't show this one to the client 842 nm->verify_scavenge_root_oops(); 843 if (call_f) f_or_null->do_code_blob(nm); 844 } 845 } 846 #endif //PRODUCT 847 848 void CodeCache::verify_clean_inline_caches() { 849 #ifdef ASSERT 850 NMethodIterator iter; 851 while(iter.next_alive()) { 852 nmethod* nm = iter.method(); 853 assert(!nm->is_unloaded(), "Tautology"); 854 nm->verify_clean_inline_caches(); 855 nm->verify(); 856 } 857 #endif 858 } 859 860 void CodeCache::verify_icholder_relocations() { 861 #ifdef ASSERT 862 // make sure that we aren't leaking icholders 863 int count = 0; 864 FOR_ALL_HEAPS(heap) { 865 FOR_ALL_BLOBS(cb, *heap) { 866 CompiledMethod *nm = cb->as_compiled_method_or_null(); 867 if (nm != NULL) { 868 count += nm->verify_icholder_relocations(); 869 } 870 } 871 } 872 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 873 CompiledICHolder::live_count(), "must agree"); 874 #endif 875 } 876 877 void CodeCache::gc_prologue() { 878 } 879 880 void CodeCache::gc_epilogue() { 881 assert_locked_or_safepoint(CodeCache_lock); 882 NOT_DEBUG(if (needs_cache_clean())) { 883 CompiledMethodIterator iter; 884 while(iter.next_alive()) { 885 CompiledMethod* cm = iter.method(); 886 assert(!cm->is_unloaded(), "Tautology"); 887 DEBUG_ONLY(if (needs_cache_clean())) { 888 cm->cleanup_inline_caches(); 889 } 890 DEBUG_ONLY(cm->verify()); 891 DEBUG_ONLY(cm->verify_oop_relocations()); 892 } 893 } 894 895 set_needs_cache_clean(false); 896 prune_scavenge_root_nmethods(); 897 898 verify_icholder_relocations(); 899 } 900 901 void CodeCache::verify_oops() { 902 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 903 VerifyOopClosure voc; 904 NMethodIterator iter; 905 while(iter.next_alive()) { 906 nmethod* nm = iter.method(); 907 nm->oops_do(&voc); 908 nm->verify_oop_relocations(); 909 } 910 } 911 912 int CodeCache::blob_count(int code_blob_type) { 913 CodeHeap* heap = get_code_heap(code_blob_type); 914 return (heap != NULL) ? heap->blob_count() : 0; 915 } 916 917 int CodeCache::blob_count() { 918 int count = 0; 919 FOR_ALL_HEAPS(heap) { 920 count += (*heap)->blob_count(); 921 } 922 return count; 923 } 924 925 int CodeCache::nmethod_count(int code_blob_type) { 926 CodeHeap* heap = get_code_heap(code_blob_type); 927 return (heap != NULL) ? heap->nmethod_count() : 0; 928 } 929 930 int CodeCache::nmethod_count() { 931 int count = 0; 932 FOR_ALL_NMETHOD_HEAPS(heap) { 933 count += (*heap)->nmethod_count(); 934 } 935 return count; 936 } 937 938 int CodeCache::adapter_count(int code_blob_type) { 939 CodeHeap* heap = get_code_heap(code_blob_type); 940 return (heap != NULL) ? heap->adapter_count() : 0; 941 } 942 943 int CodeCache::adapter_count() { 944 int count = 0; 945 FOR_ALL_HEAPS(heap) { 946 count += (*heap)->adapter_count(); 947 } 948 return count; 949 } 950 951 address CodeCache::low_bound(int code_blob_type) { 952 CodeHeap* heap = get_code_heap(code_blob_type); 953 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 954 } 955 956 address CodeCache::high_bound(int code_blob_type) { 957 CodeHeap* heap = get_code_heap(code_blob_type); 958 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 959 } 960 961 size_t CodeCache::capacity() { 962 size_t cap = 0; 963 FOR_ALL_NMETHOD_HEAPS(heap) { 964 cap += (*heap)->capacity(); 965 } 966 return cap; 967 } 968 969 size_t CodeCache::unallocated_capacity(int code_blob_type) { 970 CodeHeap* heap = get_code_heap(code_blob_type); 971 return (heap != NULL) ? heap->unallocated_capacity() : 0; 972 } 973 974 size_t CodeCache::unallocated_capacity() { 975 size_t unallocated_cap = 0; 976 FOR_ALL_NMETHOD_HEAPS(heap) { 977 unallocated_cap += (*heap)->unallocated_capacity(); 978 } 979 return unallocated_cap; 980 } 981 982 size_t CodeCache::max_capacity() { 983 size_t max_cap = 0; 984 FOR_ALL_NMETHOD_HEAPS(heap) { 985 max_cap += (*heap)->max_capacity(); 986 } 987 return max_cap; 988 } 989 990 /** 991 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 992 * is free, reverse_free_ratio() returns 4. 993 */ 994 double CodeCache::reverse_free_ratio(int code_blob_type) { 995 CodeHeap* heap = get_code_heap(code_blob_type); 996 if (heap == NULL) { 997 return 0; 998 } 999 1000 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 1001 double max_capacity = (double)heap->max_capacity(); 1002 double result = max_capacity / unallocated_capacity; 1003 assert (max_capacity >= unallocated_capacity, "Must be"); 1004 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1005 return result; 1006 } 1007 1008 size_t CodeCache::bytes_allocated_in_freelists() { 1009 size_t allocated_bytes = 0; 1010 FOR_ALL_NMETHOD_HEAPS(heap) { 1011 allocated_bytes += (*heap)->allocated_in_freelist(); 1012 } 1013 return allocated_bytes; 1014 } 1015 1016 int CodeCache::allocated_segments() { 1017 int number_of_segments = 0; 1018 FOR_ALL_NMETHOD_HEAPS(heap) { 1019 number_of_segments += (*heap)->allocated_segments(); 1020 } 1021 return number_of_segments; 1022 } 1023 1024 size_t CodeCache::freelists_length() { 1025 size_t length = 0; 1026 FOR_ALL_NMETHOD_HEAPS(heap) { 1027 length += (*heap)->freelist_length(); 1028 } 1029 return length; 1030 } 1031 1032 void icache_init(); 1033 1034 void CodeCache::initialize() { 1035 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1036 #ifdef COMPILER2 1037 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1038 #endif 1039 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1040 // This was originally just a check of the alignment, causing failure, instead, round 1041 // the code cache to the page size. In particular, Solaris is moving to a larger 1042 // default page size. 1043 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 1044 1045 if (SegmentedCodeCache) { 1046 // Use multiple code heaps 1047 initialize_heaps(); 1048 } else { 1049 // Use a single code heap 1050 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1051 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1052 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1053 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1054 add_heap(rs, "CodeCache", CodeBlobType::All); 1055 } 1056 1057 // Initialize ICache flush mechanism 1058 // This service is needed for os::register_code_area 1059 icache_init(); 1060 1061 // Give OS a chance to register generated code area. 1062 // This is used on Windows 64 bit platforms to register 1063 // Structured Exception Handlers for our generated code. 1064 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1065 } 1066 1067 void codeCache_init() { 1068 CodeCache::initialize(); 1069 // Load AOT libraries and add AOT code heaps. 1070 AOTLoader::initialize(); 1071 } 1072 1073 //------------------------------------------------------------------------------------------------ 1074 1075 int CodeCache::number_of_nmethods_with_dependencies() { 1076 return _number_of_nmethods_with_dependencies; 1077 } 1078 1079 void CodeCache::clear_inline_caches() { 1080 assert_locked_or_safepoint(CodeCache_lock); 1081 CompiledMethodIterator iter; 1082 while(iter.next_alive()) { 1083 iter.method()->clear_inline_caches(); 1084 } 1085 } 1086 1087 void CodeCache::cleanup_inline_caches() { 1088 assert_locked_or_safepoint(CodeCache_lock); 1089 NMethodIterator iter; 1090 while(iter.next_alive()) { 1091 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1092 } 1093 } 1094 1095 // Keeps track of time spent for checking dependencies 1096 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1097 1098 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1099 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1100 int number_of_marked_CodeBlobs = 0; 1101 1102 // search the hierarchy looking for nmethods which are affected by the loading of this class 1103 1104 // then search the interfaces this class implements looking for nmethods 1105 // which might be dependent of the fact that an interface only had one 1106 // implementor. 1107 // nmethod::check_all_dependencies works only correctly, if no safepoint 1108 // can happen 1109 NoSafepointVerifier nsv; 1110 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1111 Klass* d = str.klass(); 1112 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1113 } 1114 1115 #ifndef PRODUCT 1116 if (VerifyDependencies) { 1117 // Object pointers are used as unique identifiers for dependency arguments. This 1118 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1119 dependentCheckTime.start(); 1120 nmethod::check_all_dependencies(changes); 1121 dependentCheckTime.stop(); 1122 } 1123 #endif 1124 1125 return number_of_marked_CodeBlobs; 1126 } 1127 1128 CompiledMethod* CodeCache::find_compiled(void* start) { 1129 CodeBlob *cb = find_blob(start); 1130 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1131 return (CompiledMethod*)cb; 1132 } 1133 1134 bool CodeCache::is_far_target(address target) { 1135 #if INCLUDE_AOT 1136 return NativeCall::is_far_call(_low_bound, target) || 1137 NativeCall::is_far_call(_high_bound, target); 1138 #else 1139 return false; 1140 #endif 1141 } 1142 1143 #ifdef HOTSWAP 1144 int CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { 1145 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1146 int number_of_marked_CodeBlobs = 0; 1147 1148 // Deoptimize all methods of the evolving class itself 1149 Array<Method*>* old_methods = dependee->methods(); 1150 for (int i = 0; i < old_methods->length(); i++) { 1151 ResourceMark rm; 1152 Method* old_method = old_methods->at(i); 1153 CompiledMethod* nm = old_method->code(); 1154 if (nm != NULL) { 1155 nm->mark_for_deoptimization(); 1156 number_of_marked_CodeBlobs++; 1157 } 1158 } 1159 1160 CompiledMethodIterator iter; 1161 while(iter.next_alive()) { 1162 CompiledMethod* nm = iter.method(); 1163 if (nm->is_marked_for_deoptimization()) { 1164 // ...Already marked in the previous pass; don't count it again. 1165 } else if (nm->is_evol_dependent_on(dependee)) { 1166 ResourceMark rm; 1167 nm->mark_for_deoptimization(); 1168 number_of_marked_CodeBlobs++; 1169 } else { 1170 // flush caches in case they refer to a redefined Method* 1171 nm->clear_inline_caches(); 1172 } 1173 } 1174 1175 return number_of_marked_CodeBlobs; 1176 } 1177 #endif // HOTSWAP 1178 1179 1180 // Deoptimize all methods 1181 void CodeCache::mark_all_nmethods_for_deoptimization() { 1182 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1183 CompiledMethodIterator iter; 1184 while(iter.next_alive()) { 1185 CompiledMethod* nm = iter.method(); 1186 if (!nm->method()->is_method_handle_intrinsic()) { 1187 nm->mark_for_deoptimization(); 1188 } 1189 } 1190 } 1191 1192 int CodeCache::mark_for_deoptimization(Method* dependee) { 1193 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1194 int number_of_marked_CodeBlobs = 0; 1195 1196 CompiledMethodIterator iter; 1197 while(iter.next_alive()) { 1198 CompiledMethod* nm = iter.method(); 1199 if (nm->is_dependent_on_method(dependee)) { 1200 ResourceMark rm; 1201 nm->mark_for_deoptimization(); 1202 number_of_marked_CodeBlobs++; 1203 } 1204 } 1205 1206 return number_of_marked_CodeBlobs; 1207 } 1208 1209 void CodeCache::make_marked_nmethods_not_entrant() { 1210 assert_locked_or_safepoint(CodeCache_lock); 1211 CompiledMethodIterator iter; 1212 while(iter.next_alive()) { 1213 CompiledMethod* nm = iter.method(); 1214 if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { 1215 nm->make_not_entrant(); 1216 } 1217 } 1218 } 1219 1220 // Flushes compiled methods dependent on dependee. 1221 void CodeCache::flush_dependents_on(InstanceKlass* dependee) { 1222 assert_lock_strong(Compile_lock); 1223 1224 if (number_of_nmethods_with_dependencies() == 0) return; 1225 1226 // CodeCache can only be updated by a thread_in_VM and they will all be 1227 // stopped during the safepoint so CodeCache will be safe to update without 1228 // holding the CodeCache_lock. 1229 1230 KlassDepChange changes(dependee); 1231 1232 // Compute the dependent nmethods 1233 if (mark_for_deoptimization(changes) > 0) { 1234 // At least one nmethod has been marked for deoptimization 1235 VM_Deoptimize op; 1236 VMThread::execute(&op); 1237 } 1238 } 1239 1240 #ifdef HOTSWAP 1241 // Flushes compiled methods dependent on dependee in the evolutionary sense 1242 void CodeCache::flush_evol_dependents_on(InstanceKlass* ev_k) { 1243 // --- Compile_lock is not held. However we are at a safepoint. 1244 assert_locked_or_safepoint(Compile_lock); 1245 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; 1246 1247 // CodeCache can only be updated by a thread_in_VM and they will all be 1248 // stopped during the safepoint so CodeCache will be safe to update without 1249 // holding the CodeCache_lock. 1250 1251 // Compute the dependent nmethods 1252 if (mark_for_evol_deoptimization(ev_k) > 0) { 1253 // At least one nmethod has been marked for deoptimization 1254 1255 // All this already happens inside a VM_Operation, so we'll do all the work here. 1256 // Stuff copied from VM_Deoptimize and modified slightly. 1257 1258 // We do not want any GCs to happen while we are in the middle of this VM operation 1259 ResourceMark rm; 1260 DeoptimizationMarker dm; 1261 1262 // Deoptimize all activations depending on marked nmethods 1263 Deoptimization::deoptimize_dependents(); 1264 1265 // Make the dependent methods not entrant 1266 make_marked_nmethods_not_entrant(); 1267 } 1268 } 1269 #endif // HOTSWAP 1270 1271 1272 // Flushes compiled methods dependent on dependee 1273 void CodeCache::flush_dependents_on_method(methodHandle m_h) { 1274 // --- Compile_lock is not held. However we are at a safepoint. 1275 assert_locked_or_safepoint(Compile_lock); 1276 1277 // CodeCache can only be updated by a thread_in_VM and they will all be 1278 // stopped dring the safepoint so CodeCache will be safe to update without 1279 // holding the CodeCache_lock. 1280 1281 // Compute the dependent nmethods 1282 if (mark_for_deoptimization(m_h()) > 0) { 1283 // At least one nmethod has been marked for deoptimization 1284 1285 // All this already happens inside a VM_Operation, so we'll do all the work here. 1286 // Stuff copied from VM_Deoptimize and modified slightly. 1287 1288 // We do not want any GCs to happen while we are in the middle of this VM operation 1289 ResourceMark rm; 1290 DeoptimizationMarker dm; 1291 1292 // Deoptimize all activations depending on marked nmethods 1293 Deoptimization::deoptimize_dependents(); 1294 1295 // Make the dependent methods not entrant 1296 make_marked_nmethods_not_entrant(); 1297 } 1298 } 1299 1300 void CodeCache::verify() { 1301 assert_locked_or_safepoint(CodeCache_lock); 1302 FOR_ALL_HEAPS(heap) { 1303 (*heap)->verify(); 1304 FOR_ALL_BLOBS(cb, *heap) { 1305 if (cb->is_alive()) { 1306 cb->verify(); 1307 } 1308 } 1309 } 1310 } 1311 1312 // A CodeHeap is full. Print out warning and report event. 1313 PRAGMA_DIAG_PUSH 1314 PRAGMA_FORMAT_NONLITERAL_IGNORED 1315 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1316 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1317 CodeHeap* heap = get_code_heap(code_blob_type); 1318 assert(heap != NULL, "heap is null"); 1319 1320 if ((heap->full_count() == 0) || print) { 1321 // Not yet reported for this heap, report 1322 if (SegmentedCodeCache) { 1323 ResourceMark rm; 1324 stringStream msg1_stream, msg2_stream; 1325 msg1_stream.print("%s is full. Compiler has been disabled.", 1326 get_code_heap_name(code_blob_type)); 1327 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1328 get_code_heap_flag_name(code_blob_type)); 1329 const char *msg1 = msg1_stream.as_string(); 1330 const char *msg2 = msg2_stream.as_string(); 1331 1332 log_warning(codecache)(msg1); 1333 log_warning(codecache)(msg2); 1334 warning(msg1); 1335 warning(msg2); 1336 } else { 1337 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1338 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1339 1340 log_warning(codecache)(msg1); 1341 log_warning(codecache)(msg2); 1342 warning(msg1); 1343 warning(msg2); 1344 } 1345 ResourceMark rm; 1346 stringStream s; 1347 // Dump code cache into a buffer before locking the tty, 1348 { 1349 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1350 print_summary(&s); 1351 } 1352 ttyLocker ttyl; 1353 tty->print("%s", s.as_string()); 1354 } 1355 1356 heap->report_full(); 1357 1358 EventCodeCacheFull event; 1359 if (event.should_commit()) { 1360 event.set_codeBlobType((u1)code_blob_type); 1361 event.set_startAddress((u8)heap->low_boundary()); 1362 event.set_commitedTopAddress((u8)heap->high()); 1363 event.set_reservedTopAddress((u8)heap->high_boundary()); 1364 event.set_entryCount(heap->blob_count()); 1365 event.set_methodCount(heap->nmethod_count()); 1366 event.set_adaptorCount(heap->adapter_count()); 1367 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1368 event.set_fullCount(heap->full_count()); 1369 event.commit(); 1370 } 1371 } 1372 PRAGMA_DIAG_POP 1373 1374 void CodeCache::print_memory_overhead() { 1375 size_t wasted_bytes = 0; 1376 FOR_ALL_NMETHOD_HEAPS(heap) { 1377 CodeHeap* curr_heap = *heap; 1378 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1379 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1380 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1381 } 1382 } 1383 // Print bytes that are allocated in the freelist 1384 ttyLocker ttl; 1385 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1386 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1387 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1388 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1389 } 1390 1391 //------------------------------------------------------------------------------------------------ 1392 // Non-product version 1393 1394 #ifndef PRODUCT 1395 1396 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1397 if (PrintCodeCache2) { // Need to add a new flag 1398 ResourceMark rm; 1399 if (size == 0) size = cb->size(); 1400 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1401 } 1402 } 1403 1404 void CodeCache::print_internals() { 1405 int nmethodCount = 0; 1406 int runtimeStubCount = 0; 1407 int adapterCount = 0; 1408 int deoptimizationStubCount = 0; 1409 int uncommonTrapStubCount = 0; 1410 int bufferBlobCount = 0; 1411 int total = 0; 1412 int nmethodAlive = 0; 1413 int nmethodNotEntrant = 0; 1414 int nmethodZombie = 0; 1415 int nmethodUnloaded = 0; 1416 int nmethodJava = 0; 1417 int nmethodNative = 0; 1418 int max_nm_size = 0; 1419 ResourceMark rm; 1420 1421 int i = 0; 1422 FOR_ALL_NMETHOD_HEAPS(heap) { 1423 if ((_nmethod_heaps->length() >= 1) && Verbose) { 1424 tty->print_cr("-- %s --", (*heap)->name()); 1425 } 1426 FOR_ALL_BLOBS(cb, *heap) { 1427 total++; 1428 if (cb->is_nmethod()) { 1429 nmethod* nm = (nmethod*)cb; 1430 1431 if (Verbose && nm->method() != NULL) { 1432 ResourceMark rm; 1433 char *method_name = nm->method()->name_and_sig_as_C_string(); 1434 tty->print("%s", method_name); 1435 if(nm->is_alive()) { tty->print_cr(" alive"); } 1436 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1437 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1438 } 1439 1440 nmethodCount++; 1441 1442 if(nm->is_alive()) { nmethodAlive++; } 1443 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1444 if(nm->is_zombie()) { nmethodZombie++; } 1445 if(nm->is_unloaded()) { nmethodUnloaded++; } 1446 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1447 1448 if(nm->method() != NULL && nm->is_java_method()) { 1449 nmethodJava++; 1450 max_nm_size = MAX2(max_nm_size, nm->size()); 1451 } 1452 } else if (cb->is_runtime_stub()) { 1453 runtimeStubCount++; 1454 } else if (cb->is_deoptimization_stub()) { 1455 deoptimizationStubCount++; 1456 } else if (cb->is_uncommon_trap_stub()) { 1457 uncommonTrapStubCount++; 1458 } else if (cb->is_adapter_blob()) { 1459 adapterCount++; 1460 } else if (cb->is_buffer_blob()) { 1461 bufferBlobCount++; 1462 } 1463 } 1464 } 1465 1466 int bucketSize = 512; 1467 int bucketLimit = max_nm_size / bucketSize + 1; 1468 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1469 memset(buckets, 0, sizeof(int) * bucketLimit); 1470 1471 NMethodIterator iter; 1472 while(iter.next()) { 1473 nmethod* nm = iter.method(); 1474 if(nm->method() != NULL && nm->is_java_method()) { 1475 buckets[nm->size() / bucketSize]++; 1476 } 1477 } 1478 1479 tty->print_cr("Code Cache Entries (total of %d)",total); 1480 tty->print_cr("-------------------------------------------------"); 1481 tty->print_cr("nmethods: %d",nmethodCount); 1482 tty->print_cr("\talive: %d",nmethodAlive); 1483 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1484 tty->print_cr("\tzombie: %d",nmethodZombie); 1485 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1486 tty->print_cr("\tjava: %d",nmethodJava); 1487 tty->print_cr("\tnative: %d",nmethodNative); 1488 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1489 tty->print_cr("adapters: %d",adapterCount); 1490 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1491 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1492 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1493 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1494 tty->print_cr("-------------------------------------------------"); 1495 1496 for(int i=0; i<bucketLimit; i++) { 1497 if(buckets[i] != 0) { 1498 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1499 tty->fill_to(40); 1500 tty->print_cr("%d",buckets[i]); 1501 } 1502 } 1503 1504 FREE_C_HEAP_ARRAY(int, buckets); 1505 print_memory_overhead(); 1506 } 1507 1508 #endif // !PRODUCT 1509 1510 void CodeCache::print() { 1511 print_summary(tty); 1512 1513 #ifndef PRODUCT 1514 if (!Verbose) return; 1515 1516 CodeBlob_sizes live; 1517 CodeBlob_sizes dead; 1518 1519 FOR_ALL_NMETHOD_HEAPS(heap) { 1520 FOR_ALL_BLOBS(cb, *heap) { 1521 if (!cb->is_alive()) { 1522 dead.add(cb); 1523 } else { 1524 live.add(cb); 1525 } 1526 } 1527 } 1528 1529 tty->print_cr("CodeCache:"); 1530 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1531 1532 if (!live.is_empty()) { 1533 live.print("live"); 1534 } 1535 if (!dead.is_empty()) { 1536 dead.print("dead"); 1537 } 1538 1539 if (WizardMode) { 1540 // print the oop_map usage 1541 int code_size = 0; 1542 int number_of_blobs = 0; 1543 int number_of_oop_maps = 0; 1544 int map_size = 0; 1545 FOR_ALL_NMETHOD_HEAPS(heap) { 1546 FOR_ALL_BLOBS(cb, *heap) { 1547 if (cb->is_alive()) { 1548 number_of_blobs++; 1549 code_size += cb->code_size(); 1550 ImmutableOopMapSet* set = cb->oop_maps(); 1551 if (set != NULL) { 1552 number_of_oop_maps += set->count(); 1553 map_size += set->nr_of_bytes(); 1554 } 1555 } 1556 } 1557 } 1558 tty->print_cr("OopMaps"); 1559 tty->print_cr(" #blobs = %d", number_of_blobs); 1560 tty->print_cr(" code size = %d", code_size); 1561 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1562 tty->print_cr(" map size = %d", map_size); 1563 } 1564 1565 #endif // !PRODUCT 1566 } 1567 1568 void CodeCache::print_summary(outputStream* st, bool detailed) { 1569 FOR_ALL_HEAPS(heap_iterator) { 1570 CodeHeap* heap = (*heap_iterator); 1571 size_t total = (heap->high_boundary() - heap->low_boundary()); 1572 if (_heaps->length() >= 1) { 1573 st->print("%s:", heap->name()); 1574 } else { 1575 st->print("CodeCache:"); 1576 } 1577 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1578 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1579 total/K, (total - heap->unallocated_capacity())/K, 1580 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1581 1582 if (detailed) { 1583 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1584 p2i(heap->low_boundary()), 1585 p2i(heap->high()), 1586 p2i(heap->high_boundary())); 1587 } 1588 } 1589 1590 if (detailed) { 1591 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1592 " adapters=" UINT32_FORMAT, 1593 blob_count(), nmethod_count(), adapter_count()); 1594 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1595 "enabled" : Arguments::mode() == Arguments::_int ? 1596 "disabled (interpreter mode)" : 1597 "disabled (not enough contiguous free space left)"); 1598 } 1599 } 1600 1601 void CodeCache::print_codelist(outputStream* st) { 1602 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1603 1604 NMethodIterator iter; 1605 while(iter.next_alive()) { 1606 nmethod* nm = iter.method(); 1607 ResourceMark rm; 1608 char *method_name = nm->method()->name_and_sig_as_C_string(); 1609 st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1610 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1611 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1612 } 1613 } 1614 1615 void CodeCache::print_layout(outputStream* st) { 1616 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1617 ResourceMark rm; 1618 print_summary(st, true); 1619 } 1620 1621 void CodeCache::log_state(outputStream* st) { 1622 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1623 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1624 blob_count(), nmethod_count(), adapter_count(), 1625 unallocated_capacity()); 1626 } 1627