1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc/shared/gcLocker.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/iterator.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/method.hpp" 39 #include "oops/objArrayOop.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/verifyOopClosure.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/compilationPolicy.hpp" 44 #include "runtime/deoptimization.hpp" 45 #include "runtime/handles.inline.hpp" 46 #include "runtime/icache.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/mutexLocker.hpp" 49 #include "runtime/sweeper.hpp" 50 #include "services/memoryService.hpp" 51 #include "trace/tracing.hpp" 52 #include "utilities/xmlstream.hpp" 53 #ifdef COMPILER1 54 #include "c1/c1_Compilation.hpp" 55 #include "c1/c1_Compiler.hpp" 56 #endif 57 #ifdef COMPILER2 58 #include "opto/c2compiler.hpp" 59 #include "opto/compile.hpp" 60 #include "opto/node.hpp" 61 #endif 62 63 // Helper class for printing in CodeCache 64 class CodeBlob_sizes { 65 private: 66 int count; 67 int total_size; 68 int header_size; 69 int code_size; 70 int stub_size; 71 int relocation_size; 72 int scopes_oop_size; 73 int scopes_metadata_size; 74 int scopes_data_size; 75 int scopes_pcs_size; 76 77 public: 78 CodeBlob_sizes() { 79 count = 0; 80 total_size = 0; 81 header_size = 0; 82 code_size = 0; 83 stub_size = 0; 84 relocation_size = 0; 85 scopes_oop_size = 0; 86 scopes_metadata_size = 0; 87 scopes_data_size = 0; 88 scopes_pcs_size = 0; 89 } 90 91 int total() { return total_size; } 92 bool is_empty() { return count == 0; } 93 94 void print(const char* title) { 95 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 96 count, 97 title, 98 (int)(total() / K), 99 header_size * 100 / total_size, 100 relocation_size * 100 / total_size, 101 code_size * 100 / total_size, 102 stub_size * 100 / total_size, 103 scopes_oop_size * 100 / total_size, 104 scopes_metadata_size * 100 / total_size, 105 scopes_data_size * 100 / total_size, 106 scopes_pcs_size * 100 / total_size); 107 } 108 109 void add(CodeBlob* cb) { 110 count++; 111 total_size += cb->size(); 112 header_size += cb->header_size(); 113 relocation_size += cb->relocation_size(); 114 if (cb->is_nmethod()) { 115 nmethod* nm = cb->as_nmethod_or_null(); 116 code_size += nm->insts_size(); 117 stub_size += nm->stub_size(); 118 119 scopes_oop_size += nm->oops_size(); 120 scopes_metadata_size += nm->metadata_size(); 121 scopes_data_size += nm->scopes_data_size(); 122 scopes_pcs_size += nm->scopes_pcs_size(); 123 } else { 124 code_size += cb->code_size(); 125 } 126 } 127 }; 128 129 // Iterate over all CodeHeaps 130 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 131 // Iterate over all CodeBlobs (cb) on the given CodeHeap 132 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 133 134 address CodeCache::_low_bound = 0; 135 address CodeCache::_high_bound = 0; 136 int CodeCache::_number_of_nmethods_with_dependencies = 0; 137 bool CodeCache::_needs_cache_clean = false; 138 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 139 140 // Initialize array of CodeHeaps 141 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 142 143 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { 144 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; 145 // Prepare error message 146 const char* error = "Invalid code heap sizes"; 147 err_msg message("NonNMethodCodeHeapSize (%zuK) + ProfiledCodeHeapSize (%zuK) + NonProfiledCodeHeapSize (%zuK) = %zuK", 148 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); 149 150 if (total_size > cache_size) { 151 // Some code heap sizes were explicitly set: total_size must be <= cache_size 152 message.append(" is greater than ReservedCodeCacheSize (%zuK).", cache_size/K); 153 vm_exit_during_initialization(error, message); 154 } else if (all_set && total_size != cache_size) { 155 // All code heap sizes were explicitly set: total_size must equal cache_size 156 message.append(" is not equal to ReservedCodeCacheSize (%zuK).", cache_size/K); 157 vm_exit_during_initialization(error, message); 158 } 159 } 160 161 void CodeCache::initialize_heaps() { 162 bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); 163 bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); 164 bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); 165 size_t min_size = os::vm_page_size(); 166 size_t cache_size = ReservedCodeCacheSize; 167 size_t non_nmethod_size = NonNMethodCodeHeapSize; 168 size_t profiled_size = ProfiledCodeHeapSize; 169 size_t non_profiled_size = NonProfiledCodeHeapSize; 170 // Check if total size set via command line flags exceeds the reserved size 171 check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), 172 (profiled_set ? profiled_size : min_size), 173 (non_profiled_set ? non_profiled_size : min_size), 174 cache_size, 175 non_nmethod_set && profiled_set && non_profiled_set); 176 177 // Determine size of compiler buffers 178 size_t code_buffers_size = 0; 179 #ifdef COMPILER1 180 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 181 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 182 code_buffers_size += c1_count * Compiler::code_buffer_size(); 183 #endif 184 #ifdef COMPILER2 185 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 186 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 187 // Initial size of constant table (this may be increased if a compiled method needs more space) 188 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 189 #endif 190 191 // Increase default non_nmethod_size to account for compiler buffers 192 if (!non_nmethod_set) { 193 non_nmethod_size += code_buffers_size; 194 } 195 // Calculate default CodeHeap sizes if not set by user 196 if (!non_nmethod_set && !profiled_set && !non_profiled_set) { 197 // Check if we have enough space for the non-nmethod code heap 198 if (cache_size > non_nmethod_size) { 199 // Use the default value for non_nmethod_size and one half of the 200 // remaining size for non-profiled and one half for profiled methods 201 size_t remaining_size = cache_size - non_nmethod_size; 202 profiled_size = remaining_size / 2; 203 non_profiled_size = remaining_size - profiled_size; 204 } else { 205 // Use all space for the non-nmethod heap and set other heaps to minimal size 206 non_nmethod_size = cache_size - 2 * min_size; 207 profiled_size = min_size; 208 non_profiled_size = min_size; 209 } 210 } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { 211 // The user explicitly set some code heap sizes. Increase or decrease the (default) 212 // sizes of the other code heaps accordingly. First adapt non-profiled and profiled 213 // code heap sizes and then only change non-nmethod code heap size if still necessary. 214 intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); 215 if (non_profiled_set) { 216 if (!profiled_set) { 217 // Adapt size of profiled code heap 218 if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { 219 // Not enough space available, set to minimum size 220 diff_size += profiled_size - min_size; 221 profiled_size = min_size; 222 } else { 223 profiled_size += diff_size; 224 diff_size = 0; 225 } 226 } 227 } else if (profiled_set) { 228 // Adapt size of non-profiled code heap 229 if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { 230 // Not enough space available, set to minimum size 231 diff_size += non_profiled_size - min_size; 232 non_profiled_size = min_size; 233 } else { 234 non_profiled_size += diff_size; 235 diff_size = 0; 236 } 237 } else if (non_nmethod_set) { 238 // Distribute remaining size between profiled and non-profiled code heaps 239 diff_size = cache_size - non_nmethod_size; 240 profiled_size = diff_size / 2; 241 non_profiled_size = diff_size - profiled_size; 242 diff_size = 0; 243 } 244 if (diff_size != 0) { 245 // Use non-nmethod code heap for remaining space requirements 246 assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity"); 247 non_nmethod_size += diff_size; 248 } 249 } 250 251 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 252 if(!heap_available(CodeBlobType::MethodProfiled)) { 253 non_profiled_size += profiled_size; 254 profiled_size = 0; 255 } 256 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 257 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 258 non_nmethod_size += non_profiled_size; 259 non_profiled_size = 0; 260 } 261 // Make sure we have enough space for VM internal code 262 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 263 if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) { 264 vm_exit_during_initialization(err_msg( 265 "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK", 266 non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K)); 267 } 268 269 // Verify sizes and update flag values 270 assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes"); 271 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size); 272 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 273 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 274 275 // Align CodeHeaps 276 size_t alignment = heap_alignment(); 277 non_nmethod_size = align_size_up(non_nmethod_size, alignment); 278 profiled_size = align_size_down(profiled_size, alignment); 279 280 // Reserve one continuous chunk of memory for CodeHeaps and split it into 281 // parts for the individual heaps. The memory layout looks like this: 282 // ---------- high ----------- 283 // Non-profiled nmethods 284 // Profiled nmethods 285 // Non-nmethods 286 // ---------- low ------------ 287 ReservedCodeSpace rs = reserve_heap_memory(cache_size); 288 ReservedSpace non_method_space = rs.first_part(non_nmethod_size); 289 ReservedSpace rest = rs.last_part(non_nmethod_size); 290 ReservedSpace profiled_space = rest.first_part(profiled_size); 291 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 292 293 // Non-nmethods (stubs, adapters, ...) 294 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 295 // Tier 2 and tier 3 (profiled) methods 296 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 297 // Tier 1 and tier 4 (non-profiled) methods and native methods 298 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 299 } 300 301 size_t CodeCache::heap_alignment() { 302 // If large page support is enabled, align code heaps according to large 303 // page size to make sure that code cache is covered by large pages. 304 const size_t page_size = os::can_execute_large_page_memory() ? 305 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 306 os::vm_page_size(); 307 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 308 } 309 310 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 311 // Determine alignment 312 const size_t page_size = os::can_execute_large_page_memory() ? 313 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 314 os::page_size_for_region_aligned(size, 8)) : 315 os::vm_page_size(); 316 const size_t granularity = os::vm_allocation_granularity(); 317 const size_t r_align = MAX2(page_size, granularity); 318 const size_t r_size = align_size_up(size, r_align); 319 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 320 MAX2(page_size, granularity); 321 322 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 323 324 if (!rs.is_reserved()) { 325 vm_exit_during_initialization("Could not reserve enough space for code cache"); 326 } 327 328 // Initialize bounds 329 _low_bound = (address)rs.base(); 330 _high_bound = _low_bound + rs.size(); 331 332 return rs; 333 } 334 335 bool CodeCache::heap_available(int code_blob_type) { 336 if (!SegmentedCodeCache) { 337 // No segmentation: use a single code heap 338 return (code_blob_type == CodeBlobType::All); 339 } else if (Arguments::is_interpreter_only()) { 340 // Interpreter only: we don't need any method code heaps 341 return (code_blob_type == CodeBlobType::NonNMethod); 342 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 343 // Tiered compilation: use all code heaps 344 return (code_blob_type < CodeBlobType::All); 345 } else { 346 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 347 return (code_blob_type == CodeBlobType::NonNMethod) || 348 (code_blob_type == CodeBlobType::MethodNonProfiled); 349 } 350 } 351 352 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 353 switch(code_blob_type) { 354 case CodeBlobType::NonNMethod: 355 return "NonNMethodCodeHeapSize"; 356 break; 357 case CodeBlobType::MethodNonProfiled: 358 return "NonProfiledCodeHeapSize"; 359 break; 360 case CodeBlobType::MethodProfiled: 361 return "ProfiledCodeHeapSize"; 362 break; 363 } 364 ShouldNotReachHere(); 365 return NULL; 366 } 367 368 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 369 // Check if heap is needed 370 if (!heap_available(code_blob_type)) { 371 return; 372 } 373 374 // Create CodeHeap 375 CodeHeap* heap = new CodeHeap(name, code_blob_type); 376 _heaps->append(heap); 377 378 // Reserve Space 379 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 380 size_initial = round_to(size_initial, os::vm_page_size()); 381 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 382 vm_exit_during_initialization("Could not reserve enough space for code cache"); 383 } 384 385 // Register the CodeHeap 386 MemoryService::add_code_heap_memory_pool(heap, name); 387 } 388 389 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 390 assert(cb != NULL, "CodeBlob is null"); 391 FOR_ALL_HEAPS(heap) { 392 if ((*heap)->contains(cb)) { 393 return *heap; 394 } 395 } 396 ShouldNotReachHere(); 397 return NULL; 398 } 399 400 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 401 FOR_ALL_HEAPS(heap) { 402 if ((*heap)->accepts(code_blob_type)) { 403 return *heap; 404 } 405 } 406 return NULL; 407 } 408 409 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 410 assert_locked_or_safepoint(CodeCache_lock); 411 assert(heap != NULL, "heap is null"); 412 return (CodeBlob*)heap->first(); 413 } 414 415 CodeBlob* CodeCache::first_blob(int code_blob_type) { 416 if (heap_available(code_blob_type)) { 417 return first_blob(get_code_heap(code_blob_type)); 418 } else { 419 return NULL; 420 } 421 } 422 423 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 424 assert_locked_or_safepoint(CodeCache_lock); 425 assert(heap != NULL, "heap is null"); 426 return (CodeBlob*)heap->next(cb); 427 } 428 429 CodeBlob* CodeCache::next_blob(CodeBlob* cb) { 430 return next_blob(get_code_heap(cb), cb); 431 } 432 433 /** 434 * Do not seize the CodeCache lock here--if the caller has not 435 * already done so, we are going to lose bigtime, since the code 436 * cache will contain a garbage CodeBlob until the caller can 437 * run the constructor for the CodeBlob subclass he is busy 438 * instantiating. 439 */ 440 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { 441 // Possibly wakes up the sweeper thread. 442 NMethodSweeper::notify(code_blob_type); 443 assert_locked_or_safepoint(CodeCache_lock); 444 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); 445 if (size <= 0) { 446 return NULL; 447 } 448 CodeBlob* cb = NULL; 449 450 // Get CodeHeap for the given CodeBlobType 451 CodeHeap* heap = get_code_heap(code_blob_type); 452 assert(heap != NULL, "heap is null"); 453 454 while (true) { 455 cb = (CodeBlob*)heap->allocate(size); 456 if (cb != NULL) break; 457 if (!heap->expand_by(CodeCacheExpansionSize)) { 458 // Save original type for error reporting 459 if (orig_code_blob_type == CodeBlobType::All) { 460 orig_code_blob_type = code_blob_type; 461 } 462 // Expansion failed 463 if (SegmentedCodeCache) { 464 // Fallback solution: Try to store code in another code heap. 465 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 466 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 467 // and force stack scanning if less than 10% of the code heap are free. 468 int type = code_blob_type; 469 switch (type) { 470 case CodeBlobType::NonNMethod: 471 type = CodeBlobType::MethodNonProfiled; 472 break; 473 case CodeBlobType::MethodNonProfiled: 474 type = CodeBlobType::MethodProfiled; 475 break; 476 case CodeBlobType::MethodProfiled: 477 // Avoid loop if we already tried that code heap 478 if (type == orig_code_blob_type) { 479 type = CodeBlobType::MethodNonProfiled; 480 } 481 break; 482 } 483 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 484 if (PrintCodeCacheExtension) { 485 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 486 heap->name(), get_code_heap(type)->name()); 487 } 488 return allocate(size, type, orig_code_blob_type); 489 } 490 } 491 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 492 CompileBroker::handle_full_code_cache(orig_code_blob_type); 493 return NULL; 494 } 495 if (PrintCodeCacheExtension) { 496 ResourceMark rm; 497 if (_heaps->length() >= 1) { 498 tty->print("%s", heap->name()); 499 } else { 500 tty->print("CodeCache"); 501 } 502 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 503 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 504 (address)heap->high() - (address)heap->low_boundary()); 505 } 506 } 507 print_trace("allocation", cb, size); 508 return cb; 509 } 510 511 void CodeCache::free(CodeBlob* cb) { 512 assert_locked_or_safepoint(CodeCache_lock); 513 CodeHeap* heap = get_code_heap(cb); 514 print_trace("free", cb); 515 if (cb->is_nmethod()) { 516 heap->set_nmethod_count(heap->nmethod_count() - 1); 517 if (((nmethod *)cb)->has_dependencies()) { 518 _number_of_nmethods_with_dependencies--; 519 } 520 } 521 if (cb->is_adapter_blob()) { 522 heap->set_adapter_count(heap->adapter_count() - 1); 523 } 524 525 // Get heap for given CodeBlob and deallocate 526 get_code_heap(cb)->deallocate(cb); 527 528 assert(heap->blob_count() >= 0, "sanity check"); 529 } 530 531 void CodeCache::commit(CodeBlob* cb) { 532 // this is called by nmethod::nmethod, which must already own CodeCache_lock 533 assert_locked_or_safepoint(CodeCache_lock); 534 CodeHeap* heap = get_code_heap(cb); 535 if (cb->is_nmethod()) { 536 heap->set_nmethod_count(heap->nmethod_count() + 1); 537 if (((nmethod *)cb)->has_dependencies()) { 538 _number_of_nmethods_with_dependencies++; 539 } 540 } 541 if (cb->is_adapter_blob()) { 542 heap->set_adapter_count(heap->adapter_count() + 1); 543 } 544 545 // flush the hardware I-cache 546 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 547 } 548 549 bool CodeCache::contains(void *p) { 550 // It should be ok to call contains without holding a lock 551 FOR_ALL_HEAPS(heap) { 552 if ((*heap)->contains(p)) { 553 return true; 554 } 555 } 556 return false; 557 } 558 559 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 560 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 561 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 562 CodeBlob* CodeCache::find_blob(void* start) { 563 CodeBlob* result = find_blob_unsafe(start); 564 // We could potentially look up non_entrant methods 565 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 566 return result; 567 } 568 569 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 570 // what you are doing) 571 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 572 // NMT can walk the stack before code cache is created 573 if (_heaps != NULL && !_heaps->is_empty()) { 574 FOR_ALL_HEAPS(heap) { 575 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); 576 if (result != NULL && result->blob_contains((address)start)) { 577 return result; 578 } 579 } 580 } 581 return NULL; 582 } 583 584 nmethod* CodeCache::find_nmethod(void* start) { 585 CodeBlob* cb = find_blob(start); 586 assert(cb->is_nmethod(), "did not find an nmethod"); 587 return (nmethod*)cb; 588 } 589 590 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 591 assert_locked_or_safepoint(CodeCache_lock); 592 FOR_ALL_HEAPS(heap) { 593 FOR_ALL_BLOBS(cb, *heap) { 594 f(cb); 595 } 596 } 597 } 598 599 void CodeCache::nmethods_do(void f(nmethod* nm)) { 600 assert_locked_or_safepoint(CodeCache_lock); 601 NMethodIterator iter; 602 while(iter.next()) { 603 f(iter.method()); 604 } 605 } 606 607 void CodeCache::metadata_do(void f(Metadata* m)) { 608 assert_locked_or_safepoint(CodeCache_lock); 609 NMethodIterator iter; 610 while(iter.next_alive()) { 611 iter.method()->metadata_do(f); 612 } 613 } 614 615 int CodeCache::alignment_unit() { 616 return (int)_heaps->first()->alignment_unit(); 617 } 618 619 int CodeCache::alignment_offset() { 620 return (int)_heaps->first()->alignment_offset(); 621 } 622 623 // Mark nmethods for unloading if they contain otherwise unreachable oops. 624 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 625 assert_locked_or_safepoint(CodeCache_lock); 626 CompiledMethodIterator iter; 627 while(iter.next_alive()) { 628 iter.method()->do_unloading(is_alive, unloading_occurred); 629 } 630 } 631 632 void CodeCache::blobs_do(CodeBlobClosure* f) { 633 assert_locked_or_safepoint(CodeCache_lock); 634 FOR_ALL_HEAPS(heap) { 635 FOR_ALL_BLOBS(cb, *heap) { 636 if (cb->is_alive()) { 637 f->do_code_blob(cb); 638 639 #ifdef ASSERT 640 if (cb->is_nmethod()) 641 ((nmethod*)cb)->verify_scavenge_root_oops(); 642 #endif //ASSERT 643 } 644 } 645 } 646 } 647 648 // Walk the list of methods which might contain non-perm oops. 649 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 650 assert_locked_or_safepoint(CodeCache_lock); 651 652 if (UseG1GC) { 653 return; 654 } 655 656 const bool fix_relocations = f->fix_relocations(); 657 debug_only(mark_scavenge_root_nmethods()); 658 659 nmethod* prev = NULL; 660 nmethod* cur = scavenge_root_nmethods(); 661 while (cur != NULL) { 662 debug_only(cur->clear_scavenge_root_marked()); 663 assert(cur->scavenge_root_not_marked(), ""); 664 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 665 666 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 667 if (TraceScavenge) { 668 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 669 } 670 if (is_live) { 671 // Perform cur->oops_do(f), maybe just once per nmethod. 672 f->do_code_blob(cur); 673 } 674 nmethod* const next = cur->scavenge_root_link(); 675 // The scavengable nmethod list must contain all methods with scavengable 676 // oops. It is safe to include more nmethod on the list, but we do not 677 // expect any live non-scavengable nmethods on the list. 678 if (fix_relocations) { 679 if (!is_live || !cur->detect_scavenge_root_oops()) { 680 unlink_scavenge_root_nmethod(cur, prev); 681 } else { 682 prev = cur; 683 } 684 } 685 cur = next; 686 } 687 688 // Check for stray marks. 689 debug_only(verify_perm_nmethods(NULL)); 690 } 691 692 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 693 assert_locked_or_safepoint(CodeCache_lock); 694 695 if (UseG1GC) { 696 return; 697 } 698 699 nm->set_on_scavenge_root_list(); 700 nm->set_scavenge_root_link(_scavenge_root_nmethods); 701 set_scavenge_root_nmethods(nm); 702 print_trace("add_scavenge_root", nm); 703 } 704 705 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 706 assert_locked_or_safepoint(CodeCache_lock); 707 708 assert((prev == NULL && scavenge_root_nmethods() == nm) || 709 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 710 711 assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list"); 712 713 print_trace("unlink_scavenge_root", nm); 714 if (prev == NULL) { 715 set_scavenge_root_nmethods(nm->scavenge_root_link()); 716 } else { 717 prev->set_scavenge_root_link(nm->scavenge_root_link()); 718 } 719 nm->set_scavenge_root_link(NULL); 720 nm->clear_on_scavenge_root_list(); 721 } 722 723 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 724 assert_locked_or_safepoint(CodeCache_lock); 725 726 if (UseG1GC) { 727 return; 728 } 729 730 print_trace("drop_scavenge_root", nm); 731 nmethod* prev = NULL; 732 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 733 if (cur == nm) { 734 unlink_scavenge_root_nmethod(cur, prev); 735 return; 736 } 737 prev = cur; 738 } 739 assert(false, "should have been on list"); 740 } 741 742 void CodeCache::prune_scavenge_root_nmethods() { 743 assert_locked_or_safepoint(CodeCache_lock); 744 745 if (UseG1GC) { 746 return; 747 } 748 749 debug_only(mark_scavenge_root_nmethods()); 750 751 nmethod* last = NULL; 752 nmethod* cur = scavenge_root_nmethods(); 753 while (cur != NULL) { 754 nmethod* next = cur->scavenge_root_link(); 755 debug_only(cur->clear_scavenge_root_marked()); 756 assert(cur->scavenge_root_not_marked(), ""); 757 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 758 759 if (!cur->is_zombie() && !cur->is_unloaded() 760 && cur->detect_scavenge_root_oops()) { 761 // Keep it. Advance 'last' to prevent deletion. 762 last = cur; 763 } else { 764 // Prune it from the list, so we don't have to look at it any more. 765 print_trace("prune_scavenge_root", cur); 766 unlink_scavenge_root_nmethod(cur, last); 767 } 768 cur = next; 769 } 770 771 // Check for stray marks. 772 debug_only(verify_perm_nmethods(NULL)); 773 } 774 775 #ifndef PRODUCT 776 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 777 if (UseG1GC) { 778 return; 779 } 780 781 // While we are here, verify the integrity of the list. 782 mark_scavenge_root_nmethods(); 783 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 784 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 785 cur->clear_scavenge_root_marked(); 786 } 787 verify_perm_nmethods(f); 788 } 789 790 // Temporarily mark nmethods that are claimed to be on the non-perm list. 791 void CodeCache::mark_scavenge_root_nmethods() { 792 NMethodIterator iter; 793 while(iter.next_alive()) { 794 nmethod* nm = iter.method(); 795 assert(nm->scavenge_root_not_marked(), "clean state"); 796 if (nm->on_scavenge_root_list()) 797 nm->set_scavenge_root_marked(); 798 } 799 } 800 801 // If the closure is given, run it on the unlisted nmethods. 802 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 803 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 804 NMethodIterator iter; 805 while(iter.next_alive()) { 806 nmethod* nm = iter.method(); 807 bool call_f = (f_or_null != NULL); 808 assert(nm->scavenge_root_not_marked(), "must be already processed"); 809 if (nm->on_scavenge_root_list()) 810 call_f = false; // don't show this one to the client 811 nm->verify_scavenge_root_oops(); 812 if (call_f) f_or_null->do_code_blob(nm); 813 } 814 } 815 #endif //PRODUCT 816 817 void CodeCache::verify_clean_inline_caches() { 818 #ifdef ASSERT 819 NMethodIterator iter; 820 while(iter.next_alive()) { 821 nmethod* nm = iter.method(); 822 assert(!nm->is_unloaded(), "Tautology"); 823 nm->verify_clean_inline_caches(); 824 nm->verify(); 825 } 826 #endif 827 } 828 829 void CodeCache::verify_icholder_relocations() { 830 #ifdef ASSERT 831 // make sure that we aren't leaking icholders 832 int count = 0; 833 FOR_ALL_HEAPS(heap) { 834 FOR_ALL_BLOBS(cb, *heap) { 835 if (cb->is_nmethod()) { 836 nmethod* nm = (nmethod*)cb; 837 count += nm->verify_icholder_relocations(); 838 } 839 } 840 } 841 842 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 843 CompiledICHolder::live_count(), "must agree"); 844 #endif 845 } 846 847 void CodeCache::gc_prologue() { 848 } 849 850 void CodeCache::gc_epilogue() { 851 assert_locked_or_safepoint(CodeCache_lock); 852 NOT_DEBUG(if (needs_cache_clean())) { 853 CompiledMethodIterator iter; 854 while(iter.next_alive()) { 855 CompiledMethod* cm = iter.method(); 856 assert(!cm->is_unloaded(), "Tautology"); 857 DEBUG_ONLY(if (needs_cache_clean())) { 858 cm->cleanup_inline_caches(); 859 } 860 DEBUG_ONLY(cm->verify()); 861 DEBUG_ONLY(cm->verify_oop_relocations()); 862 } 863 } 864 865 set_needs_cache_clean(false); 866 prune_scavenge_root_nmethods(); 867 868 verify_icholder_relocations(); 869 } 870 871 void CodeCache::verify_oops() { 872 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 873 VerifyOopClosure voc; 874 NMethodIterator iter; 875 while(iter.next_alive()) { 876 nmethod* nm = iter.method(); 877 nm->oops_do(&voc); 878 nm->verify_oop_relocations(); 879 } 880 } 881 882 int CodeCache::blob_count(int code_blob_type) { 883 CodeHeap* heap = get_code_heap(code_blob_type); 884 return (heap != NULL) ? heap->blob_count() : 0; 885 } 886 887 int CodeCache::blob_count() { 888 int count = 0; 889 FOR_ALL_HEAPS(heap) { 890 count += (*heap)->blob_count(); 891 } 892 return count; 893 } 894 895 int CodeCache::nmethod_count(int code_blob_type) { 896 CodeHeap* heap = get_code_heap(code_blob_type); 897 return (heap != NULL) ? heap->nmethod_count() : 0; 898 } 899 900 int CodeCache::nmethod_count() { 901 int count = 0; 902 FOR_ALL_HEAPS(heap) { 903 count += (*heap)->nmethod_count(); 904 } 905 return count; 906 } 907 908 int CodeCache::adapter_count(int code_blob_type) { 909 CodeHeap* heap = get_code_heap(code_blob_type); 910 return (heap != NULL) ? heap->adapter_count() : 0; 911 } 912 913 int CodeCache::adapter_count() { 914 int count = 0; 915 FOR_ALL_HEAPS(heap) { 916 count += (*heap)->adapter_count(); 917 } 918 return count; 919 } 920 921 address CodeCache::low_bound(int code_blob_type) { 922 CodeHeap* heap = get_code_heap(code_blob_type); 923 return (heap != NULL) ? (address)heap->low_boundary() : NULL; 924 } 925 926 address CodeCache::high_bound(int code_blob_type) { 927 CodeHeap* heap = get_code_heap(code_blob_type); 928 return (heap != NULL) ? (address)heap->high_boundary() : NULL; 929 } 930 931 size_t CodeCache::capacity() { 932 size_t cap = 0; 933 FOR_ALL_HEAPS(heap) { 934 cap += (*heap)->capacity(); 935 } 936 return cap; 937 } 938 939 size_t CodeCache::unallocated_capacity(int code_blob_type) { 940 CodeHeap* heap = get_code_heap(code_blob_type); 941 return (heap != NULL) ? heap->unallocated_capacity() : 0; 942 } 943 944 size_t CodeCache::unallocated_capacity() { 945 size_t unallocated_cap = 0; 946 FOR_ALL_HEAPS(heap) { 947 unallocated_cap += (*heap)->unallocated_capacity(); 948 } 949 return unallocated_cap; 950 } 951 952 size_t CodeCache::max_capacity() { 953 size_t max_cap = 0; 954 FOR_ALL_HEAPS(heap) { 955 max_cap += (*heap)->max_capacity(); 956 } 957 return max_cap; 958 } 959 960 /** 961 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 962 * is free, reverse_free_ratio() returns 4. 963 */ 964 double CodeCache::reverse_free_ratio(int code_blob_type) { 965 CodeHeap* heap = get_code_heap(code_blob_type); 966 if (heap == NULL) { 967 return 0; 968 } 969 970 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 971 double max_capacity = (double)heap->max_capacity(); 972 double result = max_capacity / unallocated_capacity; 973 assert (max_capacity >= unallocated_capacity, "Must be"); 974 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 975 return result; 976 } 977 978 size_t CodeCache::bytes_allocated_in_freelists() { 979 size_t allocated_bytes = 0; 980 FOR_ALL_HEAPS(heap) { 981 allocated_bytes += (*heap)->allocated_in_freelist(); 982 } 983 return allocated_bytes; 984 } 985 986 int CodeCache::allocated_segments() { 987 int number_of_segments = 0; 988 FOR_ALL_HEAPS(heap) { 989 number_of_segments += (*heap)->allocated_segments(); 990 } 991 return number_of_segments; 992 } 993 994 size_t CodeCache::freelists_length() { 995 size_t length = 0; 996 FOR_ALL_HEAPS(heap) { 997 length += (*heap)->freelist_length(); 998 } 999 return length; 1000 } 1001 1002 void icache_init(); 1003 1004 void CodeCache::initialize() { 1005 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1006 #ifdef COMPILER2 1007 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1008 #endif 1009 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1010 // This was originally just a check of the alignment, causing failure, instead, round 1011 // the code cache to the page size. In particular, Solaris is moving to a larger 1012 // default page size. 1013 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 1014 1015 if (SegmentedCodeCache) { 1016 // Use multiple code heaps 1017 initialize_heaps(); 1018 } else { 1019 // Use a single code heap 1020 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0); 1021 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 1022 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 1023 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 1024 add_heap(rs, "CodeCache", CodeBlobType::All); 1025 } 1026 1027 // Initialize ICache flush mechanism 1028 // This service is needed for os::register_code_area 1029 icache_init(); 1030 1031 // Give OS a chance to register generated code area. 1032 // This is used on Windows 64 bit platforms to register 1033 // Structured Exception Handlers for our generated code. 1034 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1035 } 1036 1037 void codeCache_init() { 1038 CodeCache::initialize(); 1039 } 1040 1041 //------------------------------------------------------------------------------------------------ 1042 1043 int CodeCache::number_of_nmethods_with_dependencies() { 1044 return _number_of_nmethods_with_dependencies; 1045 } 1046 1047 void CodeCache::clear_inline_caches() { 1048 assert_locked_or_safepoint(CodeCache_lock); 1049 CompiledMethodIterator iter; 1050 while(iter.next_alive()) { 1051 iter.method()->clear_inline_caches(); 1052 } 1053 } 1054 1055 void CodeCache::cleanup_inline_caches() { 1056 assert_locked_or_safepoint(CodeCache_lock); 1057 NMethodIterator iter; 1058 while(iter.next_alive()) { 1059 iter.method()->cleanup_inline_caches(/*clean_all=*/true); 1060 } 1061 } 1062 1063 // Keeps track of time spent for checking dependencies 1064 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1065 1066 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { 1067 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1068 int number_of_marked_CodeBlobs = 0; 1069 1070 // search the hierarchy looking for nmethods which are affected by the loading of this class 1071 1072 // then search the interfaces this class implements looking for nmethods 1073 // which might be dependent of the fact that an interface only had one 1074 // implementor. 1075 // nmethod::check_all_dependencies works only correctly, if no safepoint 1076 // can happen 1077 NoSafepointVerifier nsv; 1078 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1079 Klass* d = str.klass(); 1080 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 1081 } 1082 1083 #ifndef PRODUCT 1084 if (VerifyDependencies) { 1085 // Object pointers are used as unique identifiers for dependency arguments. This 1086 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1087 dependentCheckTime.start(); 1088 nmethod::check_all_dependencies(changes); 1089 dependentCheckTime.stop(); 1090 } 1091 #endif 1092 1093 return number_of_marked_CodeBlobs; 1094 } 1095 1096 CompiledMethod* CodeCache::find_compiled(void* start) { 1097 CodeBlob *cb = find_blob(start); 1098 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method"); 1099 return (CompiledMethod*)cb; 1100 } 1101 1102 #ifdef HOTSWAP 1103 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 1104 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1105 int number_of_marked_CodeBlobs = 0; 1106 1107 // Deoptimize all methods of the evolving class itself 1108 Array<Method*>* old_methods = dependee->methods(); 1109 for (int i = 0; i < old_methods->length(); i++) { 1110 ResourceMark rm; 1111 Method* old_method = old_methods->at(i); 1112 CompiledMethod* nm = old_method->code(); 1113 if (nm != NULL) { 1114 nm->mark_for_deoptimization(); 1115 number_of_marked_CodeBlobs++; 1116 } 1117 } 1118 1119 CompiledMethodIterator iter; 1120 while(iter.next_alive()) { 1121 CompiledMethod* nm = iter.method(); 1122 if (nm->is_marked_for_deoptimization()) { 1123 // ...Already marked in the previous pass; don't count it again. 1124 } else if (nm->is_evol_dependent_on(dependee())) { 1125 ResourceMark rm; 1126 nm->mark_for_deoptimization(); 1127 number_of_marked_CodeBlobs++; 1128 } else { 1129 // flush caches in case they refer to a redefined Method* 1130 nm->clear_inline_caches(); 1131 } 1132 } 1133 1134 return number_of_marked_CodeBlobs; 1135 } 1136 #endif // HOTSWAP 1137 1138 1139 // Deoptimize all methods 1140 void CodeCache::mark_all_nmethods_for_deoptimization() { 1141 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1142 CompiledMethodIterator iter; 1143 while(iter.next_alive()) { 1144 CompiledMethod* nm = iter.method(); 1145 if (!nm->method()->is_method_handle_intrinsic()) { 1146 nm->mark_for_deoptimization(); 1147 } 1148 } 1149 } 1150 1151 int CodeCache::mark_for_deoptimization(Method* dependee) { 1152 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1153 int number_of_marked_CodeBlobs = 0; 1154 1155 CompiledMethodIterator iter; 1156 while(iter.next_alive()) { 1157 CompiledMethod* nm = iter.method(); 1158 if (nm->is_dependent_on_method(dependee)) { 1159 ResourceMark rm; 1160 nm->mark_for_deoptimization(); 1161 number_of_marked_CodeBlobs++; 1162 } 1163 } 1164 1165 return number_of_marked_CodeBlobs; 1166 } 1167 1168 void CodeCache::make_marked_nmethods_not_entrant() { 1169 assert_locked_or_safepoint(CodeCache_lock); 1170 CompiledMethodIterator iter; 1171 while(iter.next_alive()) { 1172 CompiledMethod* nm = iter.method(); 1173 if (nm->is_marked_for_deoptimization()) { 1174 nm->make_not_entrant(); 1175 } 1176 } 1177 } 1178 1179 // Flushes compiled methods dependent on dependee. 1180 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) { 1181 assert_lock_strong(Compile_lock); 1182 1183 if (number_of_nmethods_with_dependencies() == 0) return; 1184 1185 // CodeCache can only be updated by a thread_in_VM and they will all be 1186 // stopped during the safepoint so CodeCache will be safe to update without 1187 // holding the CodeCache_lock. 1188 1189 KlassDepChange changes(dependee); 1190 1191 // Compute the dependent nmethods 1192 if (mark_for_deoptimization(changes) > 0) { 1193 // At least one nmethod has been marked for deoptimization 1194 VM_Deoptimize op; 1195 VMThread::execute(&op); 1196 } 1197 } 1198 1199 #ifdef HOTSWAP 1200 // Flushes compiled methods dependent on dependee in the evolutionary sense 1201 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1202 // --- Compile_lock is not held. However we are at a safepoint. 1203 assert_locked_or_safepoint(Compile_lock); 1204 if (number_of_nmethods_with_dependencies() == 0) return; 1205 1206 // CodeCache can only be updated by a thread_in_VM and they will all be 1207 // stopped during the safepoint so CodeCache will be safe to update without 1208 // holding the CodeCache_lock. 1209 1210 // Compute the dependent nmethods 1211 if (mark_for_evol_deoptimization(ev_k_h) > 0) { 1212 // At least one nmethod has been marked for deoptimization 1213 1214 // All this already happens inside a VM_Operation, so we'll do all the work here. 1215 // Stuff copied from VM_Deoptimize and modified slightly. 1216 1217 // We do not want any GCs to happen while we are in the middle of this VM operation 1218 ResourceMark rm; 1219 DeoptimizationMarker dm; 1220 1221 // Deoptimize all activations depending on marked nmethods 1222 Deoptimization::deoptimize_dependents(); 1223 1224 // Make the dependent methods not entrant 1225 make_marked_nmethods_not_entrant(); 1226 } 1227 } 1228 #endif // HOTSWAP 1229 1230 1231 // Flushes compiled methods dependent on dependee 1232 void CodeCache::flush_dependents_on_method(methodHandle m_h) { 1233 // --- Compile_lock is not held. However we are at a safepoint. 1234 assert_locked_or_safepoint(Compile_lock); 1235 1236 // CodeCache can only be updated by a thread_in_VM and they will all be 1237 // stopped dring the safepoint so CodeCache will be safe to update without 1238 // holding the CodeCache_lock. 1239 1240 // Compute the dependent nmethods 1241 if (mark_for_deoptimization(m_h()) > 0) { 1242 // At least one nmethod has been marked for deoptimization 1243 1244 // All this already happens inside a VM_Operation, so we'll do all the work here. 1245 // Stuff copied from VM_Deoptimize and modified slightly. 1246 1247 // We do not want any GCs to happen while we are in the middle of this VM operation 1248 ResourceMark rm; 1249 DeoptimizationMarker dm; 1250 1251 // Deoptimize all activations depending on marked nmethods 1252 Deoptimization::deoptimize_dependents(); 1253 1254 // Make the dependent methods not entrant 1255 make_marked_nmethods_not_entrant(); 1256 } 1257 } 1258 1259 void CodeCache::verify() { 1260 assert_locked_or_safepoint(CodeCache_lock); 1261 FOR_ALL_HEAPS(heap) { 1262 (*heap)->verify(); 1263 FOR_ALL_BLOBS(cb, *heap) { 1264 if (cb->is_alive()) { 1265 cb->verify(); 1266 } 1267 } 1268 } 1269 } 1270 1271 // A CodeHeap is full. Print out warning and report event. 1272 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1273 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1274 CodeHeap* heap = get_code_heap(code_blob_type); 1275 assert(heap != NULL, "heap is null"); 1276 1277 if ((heap->full_count() == 0) || print) { 1278 // Not yet reported for this heap, report 1279 if (SegmentedCodeCache) { 1280 warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type)); 1281 warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type)); 1282 } else { 1283 warning("CodeCache is full. Compiler has been disabled."); 1284 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 1285 } 1286 ResourceMark rm; 1287 stringStream s; 1288 // Dump code cache into a buffer before locking the tty, 1289 { 1290 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1291 print_summary(&s); 1292 } 1293 ttyLocker ttyl; 1294 tty->print("%s", s.as_string()); 1295 } 1296 1297 heap->report_full(); 1298 1299 EventCodeCacheFull event; 1300 if (event.should_commit()) { 1301 event.set_codeBlobType((u1)code_blob_type); 1302 event.set_startAddress((u8)heap->low_boundary()); 1303 event.set_commitedTopAddress((u8)heap->high()); 1304 event.set_reservedTopAddress((u8)heap->high_boundary()); 1305 event.set_entryCount(heap->blob_count()); 1306 event.set_methodCount(heap->nmethod_count()); 1307 event.set_adaptorCount(heap->adapter_count()); 1308 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1309 event.set_fullCount(heap->full_count()); 1310 event.commit(); 1311 } 1312 } 1313 1314 void CodeCache::print_memory_overhead() { 1315 size_t wasted_bytes = 0; 1316 FOR_ALL_HEAPS(heap) { 1317 CodeHeap* curr_heap = *heap; 1318 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1319 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1320 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1321 } 1322 } 1323 // Print bytes that are allocated in the freelist 1324 ttyLocker ttl; 1325 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1326 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1327 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1328 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1329 } 1330 1331 //------------------------------------------------------------------------------------------------ 1332 // Non-product version 1333 1334 #ifndef PRODUCT 1335 1336 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1337 if (PrintCodeCache2) { // Need to add a new flag 1338 ResourceMark rm; 1339 if (size == 0) size = cb->size(); 1340 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1341 } 1342 } 1343 1344 void CodeCache::print_internals() { 1345 int nmethodCount = 0; 1346 int runtimeStubCount = 0; 1347 int adapterCount = 0; 1348 int deoptimizationStubCount = 0; 1349 int uncommonTrapStubCount = 0; 1350 int bufferBlobCount = 0; 1351 int total = 0; 1352 int nmethodAlive = 0; 1353 int nmethodNotEntrant = 0; 1354 int nmethodZombie = 0; 1355 int nmethodUnloaded = 0; 1356 int nmethodJava = 0; 1357 int nmethodNative = 0; 1358 int max_nm_size = 0; 1359 ResourceMark rm; 1360 1361 int i = 0; 1362 FOR_ALL_HEAPS(heap) { 1363 if ((_heaps->length() >= 1) && Verbose) { 1364 tty->print_cr("-- %s --", (*heap)->name()); 1365 } 1366 FOR_ALL_BLOBS(cb, *heap) { 1367 total++; 1368 if (cb->is_nmethod()) { 1369 nmethod* nm = (nmethod*)cb; 1370 1371 if (Verbose && nm->method() != NULL) { 1372 ResourceMark rm; 1373 char *method_name = nm->method()->name_and_sig_as_C_string(); 1374 tty->print("%s", method_name); 1375 if(nm->is_alive()) { tty->print_cr(" alive"); } 1376 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1377 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1378 } 1379 1380 nmethodCount++; 1381 1382 if(nm->is_alive()) { nmethodAlive++; } 1383 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1384 if(nm->is_zombie()) { nmethodZombie++; } 1385 if(nm->is_unloaded()) { nmethodUnloaded++; } 1386 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1387 1388 if(nm->method() != NULL && nm->is_java_method()) { 1389 nmethodJava++; 1390 max_nm_size = MAX2(max_nm_size, nm->size()); 1391 } 1392 } else if (cb->is_runtime_stub()) { 1393 runtimeStubCount++; 1394 } else if (cb->is_deoptimization_stub()) { 1395 deoptimizationStubCount++; 1396 } else if (cb->is_uncommon_trap_stub()) { 1397 uncommonTrapStubCount++; 1398 } else if (cb->is_adapter_blob()) { 1399 adapterCount++; 1400 } else if (cb->is_buffer_blob()) { 1401 bufferBlobCount++; 1402 } 1403 } 1404 } 1405 1406 int bucketSize = 512; 1407 int bucketLimit = max_nm_size / bucketSize + 1; 1408 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1409 memset(buckets, 0, sizeof(int) * bucketLimit); 1410 1411 NMethodIterator iter; 1412 while(iter.next()) { 1413 nmethod* nm = iter.method(); 1414 if(nm->method() != NULL && nm->is_java_method()) { 1415 buckets[nm->size() / bucketSize]++; 1416 } 1417 } 1418 1419 tty->print_cr("Code Cache Entries (total of %d)",total); 1420 tty->print_cr("-------------------------------------------------"); 1421 tty->print_cr("nmethods: %d",nmethodCount); 1422 tty->print_cr("\talive: %d",nmethodAlive); 1423 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1424 tty->print_cr("\tzombie: %d",nmethodZombie); 1425 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1426 tty->print_cr("\tjava: %d",nmethodJava); 1427 tty->print_cr("\tnative: %d",nmethodNative); 1428 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1429 tty->print_cr("adapters: %d",adapterCount); 1430 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1431 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1432 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1433 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1434 tty->print_cr("-------------------------------------------------"); 1435 1436 for(int i=0; i<bucketLimit; i++) { 1437 if(buckets[i] != 0) { 1438 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1439 tty->fill_to(40); 1440 tty->print_cr("%d",buckets[i]); 1441 } 1442 } 1443 1444 FREE_C_HEAP_ARRAY(int, buckets); 1445 print_memory_overhead(); 1446 } 1447 1448 #endif // !PRODUCT 1449 1450 void CodeCache::print() { 1451 print_summary(tty); 1452 1453 #ifndef PRODUCT 1454 if (!Verbose) return; 1455 1456 CodeBlob_sizes live; 1457 CodeBlob_sizes dead; 1458 1459 FOR_ALL_HEAPS(heap) { 1460 FOR_ALL_BLOBS(cb, *heap) { 1461 if (!cb->is_alive()) { 1462 dead.add(cb); 1463 } else { 1464 live.add(cb); 1465 } 1466 } 1467 } 1468 1469 tty->print_cr("CodeCache:"); 1470 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1471 1472 if (!live.is_empty()) { 1473 live.print("live"); 1474 } 1475 if (!dead.is_empty()) { 1476 dead.print("dead"); 1477 } 1478 1479 if (WizardMode) { 1480 // print the oop_map usage 1481 int code_size = 0; 1482 int number_of_blobs = 0; 1483 int number_of_oop_maps = 0; 1484 int map_size = 0; 1485 FOR_ALL_HEAPS(heap) { 1486 FOR_ALL_BLOBS(cb, *heap) { 1487 if (cb->is_alive()) { 1488 number_of_blobs++; 1489 code_size += cb->code_size(); 1490 ImmutableOopMapSet* set = cb->oop_maps(); 1491 if (set != NULL) { 1492 number_of_oop_maps += set->count(); 1493 map_size += set->nr_of_bytes(); 1494 } 1495 } 1496 } 1497 } 1498 tty->print_cr("OopMaps"); 1499 tty->print_cr(" #blobs = %d", number_of_blobs); 1500 tty->print_cr(" code size = %d", code_size); 1501 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1502 tty->print_cr(" map size = %d", map_size); 1503 } 1504 1505 #endif // !PRODUCT 1506 } 1507 1508 void CodeCache::print_summary(outputStream* st, bool detailed) { 1509 FOR_ALL_HEAPS(heap_iterator) { 1510 CodeHeap* heap = (*heap_iterator); 1511 size_t total = (heap->high_boundary() - heap->low_boundary()); 1512 if (_heaps->length() >= 1) { 1513 st->print("%s:", heap->name()); 1514 } else { 1515 st->print("CodeCache:"); 1516 } 1517 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1518 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1519 total/K, (total - heap->unallocated_capacity())/K, 1520 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1521 1522 if (detailed) { 1523 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1524 p2i(heap->low_boundary()), 1525 p2i(heap->high()), 1526 p2i(heap->high_boundary())); 1527 } 1528 } 1529 1530 if (detailed) { 1531 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1532 " adapters=" UINT32_FORMAT, 1533 blob_count(), nmethod_count(), adapter_count()); 1534 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1535 "enabled" : Arguments::mode() == Arguments::_int ? 1536 "disabled (interpreter mode)" : 1537 "disabled (not enough contiguous free space left)"); 1538 } 1539 } 1540 1541 void CodeCache::print_codelist(outputStream* st) { 1542 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1543 1544 NMethodIterator iter; 1545 while(iter.next_alive()) { 1546 nmethod* nm = iter.method(); 1547 ResourceMark rm; 1548 char *method_name = nm->method()->name_and_sig_as_C_string(); 1549 st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1550 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1551 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1552 } 1553 } 1554 1555 void CodeCache::print_layout(outputStream* st) { 1556 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1557 ResourceMark rm; 1558 print_summary(st, true); 1559 } 1560 1561 void CodeCache::log_state(outputStream* st) { 1562 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1563 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1564 blob_count(), nmethod_count(), adapter_count(), 1565 unallocated_capacity()); 1566 } 1567 1568 // Initialize iterator to given compiled method 1569 void CompiledMethodIterator::initialize(CompiledMethod* cm) { 1570 _code_blob = (CodeBlob*)cm; 1571 if (!SegmentedCodeCache) { 1572 // Iterate over all CodeBlobs 1573 _code_blob_type = CodeBlobType::All; 1574 } else if (cm != NULL) { 1575 _code_blob_type = CodeCache::get_code_blob_type(cm); 1576 } else { 1577 // Only iterate over method code heaps, starting with non-profiled 1578 _code_blob_type = CodeBlobType::MethodNonProfiled; 1579 } 1580 } 1581 1582 // Advance iterator to the next compiled method in the current code heap 1583 bool CompiledMethodIterator::next_compiled_method() { 1584 // Get first method CodeBlob 1585 if (_code_blob == NULL) { 1586 _code_blob = CodeCache::first_blob(_code_blob_type); 1587 if (_code_blob == NULL) { 1588 return false; 1589 } else if (_code_blob->is_nmethod()) { 1590 return true; 1591 } 1592 } 1593 // Search for next method CodeBlob 1594 _code_blob = CodeCache::next_blob(_code_blob); 1595 while (_code_blob != NULL && !_code_blob->is_compiled()) { 1596 _code_blob = CodeCache::next_blob(_code_blob); 1597 } 1598 return _code_blob != NULL; 1599 }