1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/gcLocker.hpp" 36 #include "memory/iterator.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/method.hpp" 39 #include "oops/objArrayOop.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/verifyOopClosure.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/deoptimization.hpp" 45 #include "runtime/icache.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/mutexLocker.hpp" 48 #include "runtime/sweeper.hpp" 49 #include "runtime/compilationPolicy.hpp" 50 #include "services/memoryService.hpp" 51 #include "trace/tracing.hpp" 52 #include "utilities/xmlstream.hpp" 53 #ifdef COMPILER1 54 #include "c1/c1_Compilation.hpp" 55 #include "c1/c1_Compiler.hpp" 56 #endif 57 #ifdef COMPILER2 58 #include "opto/c2compiler.hpp" 59 #include "opto/compile.hpp" 60 #include "opto/node.hpp" 61 #endif 62 63 // Helper class for printing in CodeCache 64 class CodeBlob_sizes { 65 private: 66 int count; 67 int total_size; 68 int header_size; 69 int code_size; 70 int stub_size; 71 int relocation_size; 72 int scopes_oop_size; 73 int scopes_metadata_size; 74 int scopes_data_size; 75 int scopes_pcs_size; 76 77 public: 78 CodeBlob_sizes() { 79 count = 0; 80 total_size = 0; 81 header_size = 0; 82 code_size = 0; 83 stub_size = 0; 84 relocation_size = 0; 85 scopes_oop_size = 0; 86 scopes_metadata_size = 0; 87 scopes_data_size = 0; 88 scopes_pcs_size = 0; 89 } 90 91 int total() { return total_size; } 92 bool is_empty() { return count == 0; } 93 94 void print(const char* title) { 95 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 96 count, 97 title, 98 (int)(total() / K), 99 header_size * 100 / total_size, 100 relocation_size * 100 / total_size, 101 code_size * 100 / total_size, 102 stub_size * 100 / total_size, 103 scopes_oop_size * 100 / total_size, 104 scopes_metadata_size * 100 / total_size, 105 scopes_data_size * 100 / total_size, 106 scopes_pcs_size * 100 / total_size); 107 } 108 109 void add(CodeBlob* cb) { 110 count++; 111 total_size += cb->size(); 112 header_size += cb->header_size(); 113 relocation_size += cb->relocation_size(); 114 if (cb->is_nmethod()) { 115 nmethod* nm = cb->as_nmethod_or_null(); 116 code_size += nm->insts_size(); 117 stub_size += nm->stub_size(); 118 119 scopes_oop_size += nm->oops_size(); 120 scopes_metadata_size += nm->metadata_size(); 121 scopes_data_size += nm->scopes_data_size(); 122 scopes_pcs_size += nm->scopes_pcs_size(); 123 } else { 124 code_size += cb->code_size(); 125 } 126 } 127 }; 128 129 // Iterate over all CodeHeaps 130 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 131 // Iterate over all CodeBlobs (cb) on the given CodeHeap 132 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 133 134 address CodeCache::_low_bound = 0; 135 address CodeCache::_high_bound = 0; 136 int CodeCache::_number_of_blobs = 0; 137 int CodeCache::_number_of_adapters = 0; 138 int CodeCache::_number_of_nmethods = 0; 139 int CodeCache::_number_of_nmethods_with_dependencies = 0; 140 bool CodeCache::_needs_cache_clean = false; 141 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 142 int CodeCache::_codemem_full_count = 0; 143 144 // Initialize array of CodeHeaps 145 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 146 147 void CodeCache::initialize_heaps() { 148 // Determine size of compiler buffers 149 size_t code_buffers_size = 0; 150 #ifdef COMPILER1 151 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 152 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 153 code_buffers_size += c1_count * Compiler::code_buffer_size(); 154 #endif 155 #ifdef COMPILER2 156 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 157 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 158 // Initial size of constant table (this may be increased if a compiled method needs more space) 159 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 160 #endif 161 162 // Calculate default CodeHeap sizes if not set by user 163 if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize) 164 && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) { 165 // Increase default NonNMethodCodeHeapSize to account for compiler buffers 166 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size); 167 168 // Check if we have enough space for the non-nmethod code heap 169 if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) { 170 // Use the default value for NonNMethodCodeHeapSize and one half of the 171 // remaining size for non-profiled methods and one half for profiled methods 172 size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize; 173 size_t profiled_size = remaining_size / 2; 174 size_t non_profiled_size = remaining_size - profiled_size; 175 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 176 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 177 } else { 178 // Use all space for the non-nmethod heap and set other heaps to minimal size 179 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); 180 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size()); 181 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size()); 182 } 183 } 184 185 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 186 if(!heap_available(CodeBlobType::MethodProfiled)) { 187 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); 188 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 189 } 190 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 191 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 192 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); 193 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 194 } 195 196 // Make sure we have enough space for VM internal code 197 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 198 if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { 199 vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); 200 } 201 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); 202 203 // Align CodeHeaps 204 size_t alignment = heap_alignment(); 205 size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment); 206 size_t profiled_size = align_size_down(ProfiledCodeHeapSize, alignment); 207 208 // Reserve one continuous chunk of memory for CodeHeaps and split it into 209 // parts for the individual heaps. The memory layout looks like this: 210 // ---------- high ----------- 211 // Non-profiled nmethods 212 // Profiled nmethods 213 // Non-nmethods 214 // ---------- low ------------ 215 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 216 ReservedSpace non_method_space = rs.first_part(non_method_size); 217 ReservedSpace rest = rs.last_part(non_method_size); 218 ReservedSpace profiled_space = rest.first_part(profiled_size); 219 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 220 221 // Non-nmethods (stubs, adapters, ...) 222 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 223 // Tier 2 and tier 3 (profiled) methods 224 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 225 // Tier 1 and tier 4 (non-profiled) methods and native methods 226 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 227 } 228 229 size_t CodeCache::heap_alignment() { 230 // If large page support is enabled, align code heaps according to large 231 // page size to make sure that code cache is covered by large pages. 232 const size_t page_size = os::can_execute_large_page_memory() ? 233 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 234 os::vm_page_size(); 235 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 236 } 237 238 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 239 // Determine alignment 240 const size_t page_size = os::can_execute_large_page_memory() ? 241 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 242 os::page_size_for_region_aligned(size, 8)) : 243 os::vm_page_size(); 244 const size_t granularity = os::vm_allocation_granularity(); 245 const size_t r_align = MAX2(page_size, granularity); 246 const size_t r_size = align_size_up(size, r_align); 247 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 248 MAX2(page_size, granularity); 249 250 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 251 252 // Initialize bounds 253 _low_bound = (address)rs.base(); 254 _high_bound = _low_bound + rs.size(); 255 256 return rs; 257 } 258 259 bool CodeCache::heap_available(int code_blob_type) { 260 if (!SegmentedCodeCache) { 261 // No segmentation: use a single code heap 262 return (code_blob_type == CodeBlobType::All); 263 } else if (Arguments::mode() == Arguments::_int) { 264 // Interpreter only: we don't need any method code heaps 265 return (code_blob_type == CodeBlobType::NonNMethod); 266 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 267 // Tiered compilation: use all code heaps 268 return (code_blob_type < CodeBlobType::All); 269 } else { 270 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 271 return (code_blob_type == CodeBlobType::NonNMethod) || 272 (code_blob_type == CodeBlobType::MethodNonProfiled); 273 } 274 } 275 276 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 277 switch(code_blob_type) { 278 case CodeBlobType::NonNMethod: 279 return "NonNMethodCodeHeapSize"; 280 break; 281 case CodeBlobType::MethodNonProfiled: 282 return "NonProfiledCodeHeapSize"; 283 break; 284 case CodeBlobType::MethodProfiled: 285 return "ProfiledCodeHeapSize"; 286 break; 287 } 288 ShouldNotReachHere(); 289 return NULL; 290 } 291 292 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 293 // Check if heap is needed 294 if (!heap_available(code_blob_type)) { 295 return; 296 } 297 298 // Create CodeHeap 299 CodeHeap* heap = new CodeHeap(name, code_blob_type); 300 _heaps->append(heap); 301 302 // Reserve Space 303 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 304 size_initial = round_to(size_initial, os::vm_page_size()); 305 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 306 vm_exit_during_initialization("Could not reserve enough space for code cache"); 307 } 308 309 // Register the CodeHeap 310 MemoryService::add_code_heap_memory_pool(heap, name); 311 } 312 313 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 314 assert(cb != NULL, "CodeBlob is null"); 315 FOR_ALL_HEAPS(heap) { 316 if ((*heap)->contains(cb)) { 317 return *heap; 318 } 319 } 320 ShouldNotReachHere(); 321 return NULL; 322 } 323 324 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 325 FOR_ALL_HEAPS(heap) { 326 if ((*heap)->accepts(code_blob_type)) { 327 return *heap; 328 } 329 } 330 return NULL; 331 } 332 333 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 334 assert_locked_or_safepoint(CodeCache_lock); 335 assert(heap != NULL, "heap is null"); 336 return (CodeBlob*)heap->first(); 337 } 338 339 CodeBlob* CodeCache::first_blob(int code_blob_type) { 340 if (heap_available(code_blob_type)) { 341 return first_blob(get_code_heap(code_blob_type)); 342 } else { 343 return NULL; 344 } 345 } 346 347 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 348 assert_locked_or_safepoint(CodeCache_lock); 349 assert(heap != NULL, "heap is null"); 350 return (CodeBlob*)heap->next(cb); 351 } 352 353 CodeBlob* CodeCache::next_blob(CodeBlob* cb) { 354 return next_blob(get_code_heap(cb), cb); 355 } 356 357 /** 358 * Do not seize the CodeCache lock here--if the caller has not 359 * already done so, we are going to lose bigtime, since the code 360 * cache will contain a garbage CodeBlob until the caller can 361 * run the constructor for the CodeBlob subclass he is busy 362 * instantiating. 363 */ 364 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) { 365 // Possibly wakes up the sweeper thread. 366 NMethodSweeper::notify(code_blob_type); 367 assert_locked_or_safepoint(CodeCache_lock); 368 assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size)); 369 if (size <= 0) { 370 return NULL; 371 } 372 CodeBlob* cb = NULL; 373 374 // Get CodeHeap for the given CodeBlobType 375 CodeHeap* heap = get_code_heap(code_blob_type); 376 assert(heap != NULL, "heap is null"); 377 378 while (true) { 379 cb = (CodeBlob*)heap->allocate(size); 380 if (cb != NULL) break; 381 if (!heap->expand_by(CodeCacheExpansionSize)) { 382 // Expansion failed 383 if (SegmentedCodeCache && !strict) { 384 // Fallback solution: Try to store code in another code heap. 385 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 386 // and force stack scanning if less than 10% of the code heap are free. 387 int type = code_blob_type; 388 switch (type) { 389 case CodeBlobType::NonNMethod: 390 type = CodeBlobType::MethodNonProfiled; 391 strict = false; // Allow recursive search for other heaps 392 break; 393 case CodeBlobType::MethodProfiled: 394 type = CodeBlobType::MethodNonProfiled; 395 strict = true; 396 break; 397 case CodeBlobType::MethodNonProfiled: 398 type = CodeBlobType::MethodProfiled; 399 strict = true; 400 break; 401 } 402 if (heap_available(type)) { 403 return allocate(size, type, strict); 404 } 405 } 406 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 407 CompileBroker::handle_full_code_cache(code_blob_type); 408 return NULL; 409 } 410 if (PrintCodeCacheExtension) { 411 ResourceMark rm; 412 if (SegmentedCodeCache) { 413 tty->print("%s", heap->name()); 414 } else { 415 tty->print("CodeCache"); 416 } 417 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 418 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 419 (address)heap->high() - (address)heap->low_boundary()); 420 } 421 } 422 print_trace("allocation", cb, size); 423 _number_of_blobs++; 424 return cb; 425 } 426 427 void CodeCache::free(CodeBlob* cb) { 428 assert_locked_or_safepoint(CodeCache_lock); 429 430 print_trace("free", cb); 431 if (cb->is_nmethod()) { 432 _number_of_nmethods--; 433 if (((nmethod *)cb)->has_dependencies()) { 434 _number_of_nmethods_with_dependencies--; 435 } 436 } 437 if (cb->is_adapter_blob()) { 438 _number_of_adapters--; 439 } 440 _number_of_blobs--; 441 442 // Get heap for given CodeBlob and deallocate 443 get_code_heap(cb)->deallocate(cb); 444 445 assert(_number_of_blobs >= 0, "sanity check"); 446 } 447 448 void CodeCache::commit(CodeBlob* cb) { 449 // this is called by nmethod::nmethod, which must already own CodeCache_lock 450 assert_locked_or_safepoint(CodeCache_lock); 451 if (cb->is_nmethod()) { 452 _number_of_nmethods++; 453 if (((nmethod *)cb)->has_dependencies()) { 454 _number_of_nmethods_with_dependencies++; 455 } 456 } 457 if (cb->is_adapter_blob()) { 458 _number_of_adapters++; 459 } 460 461 // flush the hardware I-cache 462 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 463 } 464 465 bool CodeCache::contains(void *p) { 466 // It should be ok to call contains without holding a lock 467 FOR_ALL_HEAPS(heap) { 468 if ((*heap)->contains(p)) { 469 return true; 470 } 471 } 472 return false; 473 } 474 475 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 476 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 477 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 478 CodeBlob* CodeCache::find_blob(void* start) { 479 CodeBlob* result = find_blob_unsafe(start); 480 // We could potentially look up non_entrant methods 481 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 482 return result; 483 } 484 485 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 486 // what you are doing) 487 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 488 // NMT can walk the stack before code cache is created 489 if (_heaps == NULL || _heaps->is_empty()) return NULL; 490 491 FOR_ALL_HEAPS(heap) { 492 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); 493 if (result != NULL && result->blob_contains((address)start)) { 494 return result; 495 } 496 } 497 return NULL; 498 } 499 500 nmethod* CodeCache::find_nmethod(void* start) { 501 CodeBlob* cb = find_blob(start); 502 assert(cb->is_nmethod(), "did not find an nmethod"); 503 return (nmethod*)cb; 504 } 505 506 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 507 assert_locked_or_safepoint(CodeCache_lock); 508 FOR_ALL_HEAPS(heap) { 509 FOR_ALL_BLOBS(cb, *heap) { 510 f(cb); 511 } 512 } 513 } 514 515 void CodeCache::nmethods_do(void f(nmethod* nm)) { 516 assert_locked_or_safepoint(CodeCache_lock); 517 NMethodIterator iter; 518 while(iter.next()) { 519 f(iter.method()); 520 } 521 } 522 523 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 524 assert_locked_or_safepoint(CodeCache_lock); 525 NMethodIterator iter; 526 while(iter.next_alive()) { 527 f(iter.method()); 528 } 529 } 530 531 int CodeCache::alignment_unit() { 532 return (int)_heaps->first()->alignment_unit(); 533 } 534 535 int CodeCache::alignment_offset() { 536 return (int)_heaps->first()->alignment_offset(); 537 } 538 539 // Mark nmethods for unloading if they contain otherwise unreachable oops. 540 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 541 assert_locked_or_safepoint(CodeCache_lock); 542 NMethodIterator iter; 543 while(iter.next_alive()) { 544 iter.method()->do_unloading(is_alive, unloading_occurred); 545 } 546 } 547 548 void CodeCache::blobs_do(CodeBlobClosure* f) { 549 assert_locked_or_safepoint(CodeCache_lock); 550 FOR_ALL_HEAPS(heap) { 551 FOR_ALL_BLOBS(cb, *heap) { 552 if (cb->is_alive()) { 553 f->do_code_blob(cb); 554 555 #ifdef ASSERT 556 if (cb->is_nmethod()) 557 ((nmethod*)cb)->verify_scavenge_root_oops(); 558 #endif //ASSERT 559 } 560 } 561 } 562 } 563 564 // Walk the list of methods which might contain non-perm oops. 565 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 566 assert_locked_or_safepoint(CodeCache_lock); 567 568 if (UseG1GC) { 569 return; 570 } 571 572 debug_only(mark_scavenge_root_nmethods()); 573 574 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 575 debug_only(cur->clear_scavenge_root_marked()); 576 assert(cur->scavenge_root_not_marked(), ""); 577 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 578 579 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 580 #ifndef PRODUCT 581 if (TraceScavenge) { 582 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 583 } 584 #endif //PRODUCT 585 if (is_live) { 586 // Perform cur->oops_do(f), maybe just once per nmethod. 587 f->do_code_blob(cur); 588 } 589 } 590 591 // Check for stray marks. 592 debug_only(verify_perm_nmethods(NULL)); 593 } 594 595 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 596 assert_locked_or_safepoint(CodeCache_lock); 597 598 if (UseG1GC) { 599 return; 600 } 601 602 nm->set_on_scavenge_root_list(); 603 nm->set_scavenge_root_link(_scavenge_root_nmethods); 604 set_scavenge_root_nmethods(nm); 605 print_trace("add_scavenge_root", nm); 606 } 607 608 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 609 assert_locked_or_safepoint(CodeCache_lock); 610 611 if (UseG1GC) { 612 return; 613 } 614 615 print_trace("drop_scavenge_root", nm); 616 nmethod* last = NULL; 617 nmethod* cur = scavenge_root_nmethods(); 618 while (cur != NULL) { 619 nmethod* next = cur->scavenge_root_link(); 620 if (cur == nm) { 621 if (last != NULL) 622 last->set_scavenge_root_link(next); 623 else set_scavenge_root_nmethods(next); 624 nm->set_scavenge_root_link(NULL); 625 nm->clear_on_scavenge_root_list(); 626 return; 627 } 628 last = cur; 629 cur = next; 630 } 631 assert(false, "should have been on list"); 632 } 633 634 void CodeCache::prune_scavenge_root_nmethods() { 635 assert_locked_or_safepoint(CodeCache_lock); 636 637 if (UseG1GC) { 638 return; 639 } 640 641 debug_only(mark_scavenge_root_nmethods()); 642 643 nmethod* last = NULL; 644 nmethod* cur = scavenge_root_nmethods(); 645 while (cur != NULL) { 646 nmethod* next = cur->scavenge_root_link(); 647 debug_only(cur->clear_scavenge_root_marked()); 648 assert(cur->scavenge_root_not_marked(), ""); 649 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 650 651 if (!cur->is_zombie() && !cur->is_unloaded() 652 && cur->detect_scavenge_root_oops()) { 653 // Keep it. Advance 'last' to prevent deletion. 654 last = cur; 655 } else { 656 // Prune it from the list, so we don't have to look at it any more. 657 print_trace("prune_scavenge_root", cur); 658 cur->set_scavenge_root_link(NULL); 659 cur->clear_on_scavenge_root_list(); 660 if (last != NULL) 661 last->set_scavenge_root_link(next); 662 else set_scavenge_root_nmethods(next); 663 } 664 cur = next; 665 } 666 667 // Check for stray marks. 668 debug_only(verify_perm_nmethods(NULL)); 669 } 670 671 #ifndef PRODUCT 672 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 673 if (UseG1GC) { 674 return; 675 } 676 677 // While we are here, verify the integrity of the list. 678 mark_scavenge_root_nmethods(); 679 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 680 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 681 cur->clear_scavenge_root_marked(); 682 } 683 verify_perm_nmethods(f); 684 } 685 686 // Temporarily mark nmethods that are claimed to be on the non-perm list. 687 void CodeCache::mark_scavenge_root_nmethods() { 688 NMethodIterator iter; 689 while(iter.next_alive()) { 690 nmethod* nm = iter.method(); 691 assert(nm->scavenge_root_not_marked(), "clean state"); 692 if (nm->on_scavenge_root_list()) 693 nm->set_scavenge_root_marked(); 694 } 695 } 696 697 // If the closure is given, run it on the unlisted nmethods. 698 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 699 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 700 NMethodIterator iter; 701 while(iter.next_alive()) { 702 nmethod* nm = iter.method(); 703 bool call_f = (f_or_null != NULL); 704 assert(nm->scavenge_root_not_marked(), "must be already processed"); 705 if (nm->on_scavenge_root_list()) 706 call_f = false; // don't show this one to the client 707 nm->verify_scavenge_root_oops(); 708 if (call_f) f_or_null->do_code_blob(nm); 709 } 710 } 711 #endif //PRODUCT 712 713 void CodeCache::verify_clean_inline_caches() { 714 #ifdef ASSERT 715 NMethodIterator iter; 716 while(iter.next_alive()) { 717 nmethod* nm = iter.method(); 718 assert(!nm->is_unloaded(), "Tautology"); 719 nm->verify_clean_inline_caches(); 720 nm->verify(); 721 } 722 #endif 723 } 724 725 void CodeCache::verify_icholder_relocations() { 726 #ifdef ASSERT 727 // make sure that we aren't leaking icholders 728 int count = 0; 729 FOR_ALL_HEAPS(heap) { 730 FOR_ALL_BLOBS(cb, *heap) { 731 if (cb->is_nmethod()) { 732 nmethod* nm = (nmethod*)cb; 733 count += nm->verify_icholder_relocations(); 734 } 735 } 736 } 737 738 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 739 CompiledICHolder::live_count(), "must agree"); 740 #endif 741 } 742 743 void CodeCache::gc_prologue() { 744 } 745 746 void CodeCache::gc_epilogue() { 747 assert_locked_or_safepoint(CodeCache_lock); 748 NMethodIterator iter; 749 while(iter.next_alive()) { 750 nmethod* nm = iter.method(); 751 assert(!nm->is_unloaded(), "Tautology"); 752 if (needs_cache_clean()) { 753 nm->cleanup_inline_caches(); 754 } 755 DEBUG_ONLY(nm->verify()); 756 DEBUG_ONLY(nm->verify_oop_relocations()); 757 } 758 set_needs_cache_clean(false); 759 prune_scavenge_root_nmethods(); 760 761 verify_icholder_relocations(); 762 } 763 764 void CodeCache::verify_oops() { 765 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 766 VerifyOopClosure voc; 767 NMethodIterator iter; 768 while(iter.next_alive()) { 769 nmethod* nm = iter.method(); 770 nm->oops_do(&voc); 771 nm->verify_oop_relocations(); 772 } 773 } 774 775 size_t CodeCache::capacity() { 776 size_t cap = 0; 777 FOR_ALL_HEAPS(heap) { 778 cap += (*heap)->capacity(); 779 } 780 return cap; 781 } 782 783 size_t CodeCache::unallocated_capacity(int code_blob_type) { 784 CodeHeap* heap = get_code_heap(code_blob_type); 785 return (heap != NULL) ? heap->unallocated_capacity() : 0; 786 } 787 788 size_t CodeCache::unallocated_capacity() { 789 size_t unallocated_cap = 0; 790 FOR_ALL_HEAPS(heap) { 791 unallocated_cap += (*heap)->unallocated_capacity(); 792 } 793 return unallocated_cap; 794 } 795 796 size_t CodeCache::max_capacity() { 797 size_t max_cap = 0; 798 FOR_ALL_HEAPS(heap) { 799 max_cap += (*heap)->max_capacity(); 800 } 801 return max_cap; 802 } 803 804 /** 805 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 806 * is free, reverse_free_ratio() returns 4. 807 */ 808 double CodeCache::reverse_free_ratio(int code_blob_type) { 809 CodeHeap* heap = get_code_heap(code_blob_type); 810 if (heap == NULL) { 811 return 0; 812 } 813 814 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 815 double max_capacity = (double)heap->max_capacity(); 816 double result = max_capacity / unallocated_capacity; 817 assert (max_capacity >= unallocated_capacity, "Must be"); 818 assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result)); 819 return result; 820 } 821 822 size_t CodeCache::bytes_allocated_in_freelists() { 823 size_t allocated_bytes = 0; 824 FOR_ALL_HEAPS(heap) { 825 allocated_bytes += (*heap)->allocated_in_freelist(); 826 } 827 return allocated_bytes; 828 } 829 830 int CodeCache::allocated_segments() { 831 int number_of_segments = 0; 832 FOR_ALL_HEAPS(heap) { 833 number_of_segments += (*heap)->allocated_segments(); 834 } 835 return number_of_segments; 836 } 837 838 size_t CodeCache::freelists_length() { 839 size_t length = 0; 840 FOR_ALL_HEAPS(heap) { 841 length += (*heap)->freelist_length(); 842 } 843 return length; 844 } 845 846 void icache_init(); 847 848 void CodeCache::initialize() { 849 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 850 #ifdef COMPILER2 851 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 852 #endif 853 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 854 // This was originally just a check of the alignment, causing failure, instead, round 855 // the code cache to the page size. In particular, Solaris is moving to a larger 856 // default page size. 857 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 858 859 if (SegmentedCodeCache) { 860 // Use multiple code heaps 861 initialize_heaps(); 862 } else { 863 // Use a single code heap 864 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 865 add_heap(rs, "CodeCache", CodeBlobType::All); 866 } 867 868 // Initialize ICache flush mechanism 869 // This service is needed for os::register_code_area 870 icache_init(); 871 872 // Give OS a chance to register generated code area. 873 // This is used on Windows 64 bit platforms to register 874 // Structured Exception Handlers for our generated code. 875 os::register_code_area((char*)low_bound(), (char*)high_bound()); 876 } 877 878 void codeCache_init() { 879 CodeCache::initialize(); 880 } 881 882 //------------------------------------------------------------------------------------------------ 883 884 int CodeCache::number_of_nmethods_with_dependencies() { 885 return _number_of_nmethods_with_dependencies; 886 } 887 888 void CodeCache::clear_inline_caches() { 889 assert_locked_or_safepoint(CodeCache_lock); 890 NMethodIterator iter; 891 while(iter.next_alive()) { 892 iter.method()->clear_inline_caches(); 893 } 894 } 895 896 // Keeps track of time spent for checking dependencies 897 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 898 899 int CodeCache::mark_for_deoptimization(DepChange& changes) { 900 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 901 int number_of_marked_CodeBlobs = 0; 902 903 // search the hierarchy looking for nmethods which are affected by the loading of this class 904 905 // then search the interfaces this class implements looking for nmethods 906 // which might be dependent of the fact that an interface only had one 907 // implementor. 908 // nmethod::check_all_dependencies works only correctly, if no safepoint 909 // can happen 910 No_Safepoint_Verifier nsv; 911 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 912 Klass* d = str.klass(); 913 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 914 } 915 916 #ifndef PRODUCT 917 if (VerifyDependencies) { 918 // Object pointers are used as unique identifiers for dependency arguments. This 919 // is only possible if no safepoint, i.e., GC occurs during the verification code. 920 dependentCheckTime.start(); 921 nmethod::check_all_dependencies(changes); 922 dependentCheckTime.stop(); 923 } 924 #endif 925 926 return number_of_marked_CodeBlobs; 927 } 928 929 930 #ifdef HOTSWAP 931 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 932 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 933 int number_of_marked_CodeBlobs = 0; 934 935 // Deoptimize all methods of the evolving class itself 936 Array<Method*>* old_methods = dependee->methods(); 937 for (int i = 0; i < old_methods->length(); i++) { 938 ResourceMark rm; 939 Method* old_method = old_methods->at(i); 940 nmethod *nm = old_method->code(); 941 if (nm != NULL) { 942 nm->mark_for_deoptimization(); 943 number_of_marked_CodeBlobs++; 944 } 945 } 946 947 NMethodIterator iter; 948 while(iter.next_alive()) { 949 nmethod* nm = iter.method(); 950 if (nm->is_marked_for_deoptimization()) { 951 // ...Already marked in the previous pass; don't count it again. 952 } else if (nm->is_evol_dependent_on(dependee())) { 953 ResourceMark rm; 954 nm->mark_for_deoptimization(); 955 number_of_marked_CodeBlobs++; 956 } else { 957 // flush caches in case they refer to a redefined Method* 958 nm->clear_inline_caches(); 959 } 960 } 961 962 return number_of_marked_CodeBlobs; 963 } 964 #endif // HOTSWAP 965 966 967 // Deoptimize all methods 968 void CodeCache::mark_all_nmethods_for_deoptimization() { 969 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 970 NMethodIterator iter; 971 while(iter.next_alive()) { 972 nmethod* nm = iter.method(); 973 if (!nm->method()->is_method_handle_intrinsic()) { 974 nm->mark_for_deoptimization(); 975 } 976 } 977 } 978 979 int CodeCache::mark_for_deoptimization(Method* dependee) { 980 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 981 int number_of_marked_CodeBlobs = 0; 982 983 NMethodIterator iter; 984 while(iter.next_alive()) { 985 nmethod* nm = iter.method(); 986 if (nm->is_dependent_on_method(dependee)) { 987 ResourceMark rm; 988 nm->mark_for_deoptimization(); 989 number_of_marked_CodeBlobs++; 990 } 991 } 992 993 return number_of_marked_CodeBlobs; 994 } 995 996 void CodeCache::make_marked_nmethods_zombies() { 997 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 998 NMethodIterator iter; 999 while(iter.next_alive()) { 1000 nmethod* nm = iter.method(); 1001 if (nm->is_marked_for_deoptimization()) { 1002 1003 // If the nmethod has already been made non-entrant and it can be converted 1004 // then zombie it now. Otherwise make it non-entrant and it will eventually 1005 // be zombied when it is no longer seen on the stack. Note that the nmethod 1006 // might be "entrant" and not on the stack and so could be zombied immediately 1007 // but we can't tell because we don't track it on stack until it becomes 1008 // non-entrant. 1009 1010 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 1011 nm->make_zombie(); 1012 } else { 1013 nm->make_not_entrant(); 1014 } 1015 } 1016 } 1017 } 1018 1019 void CodeCache::make_marked_nmethods_not_entrant() { 1020 assert_locked_or_safepoint(CodeCache_lock); 1021 NMethodIterator iter; 1022 while(iter.next_alive()) { 1023 nmethod* nm = iter.method(); 1024 if (nm->is_marked_for_deoptimization()) { 1025 nm->make_not_entrant(); 1026 } 1027 } 1028 } 1029 1030 // Flushes compiled methods dependent on dependee. 1031 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) { 1032 assert_lock_strong(Compile_lock); 1033 1034 if (number_of_nmethods_with_dependencies() == 0) return; 1035 1036 // CodeCache can only be updated by a thread_in_VM and they will all be 1037 // stopped during the safepoint so CodeCache will be safe to update without 1038 // holding the CodeCache_lock. 1039 1040 KlassDepChange changes(dependee); 1041 1042 // Compute the dependent nmethods 1043 if (mark_for_deoptimization(changes) > 0) { 1044 // At least one nmethod has been marked for deoptimization 1045 VM_Deoptimize op; 1046 VMThread::execute(&op); 1047 } 1048 } 1049 1050 // Flushes compiled methods dependent on a particular CallSite 1051 // instance when its target is different than the given MethodHandle. 1052 void CodeCache::flush_dependents_on(Handle call_site, Handle method_handle) { 1053 assert_lock_strong(Compile_lock); 1054 1055 if (number_of_nmethods_with_dependencies() == 0) return; 1056 1057 // CodeCache can only be updated by a thread_in_VM and they will all be 1058 // stopped during the safepoint so CodeCache will be safe to update without 1059 // holding the CodeCache_lock. 1060 1061 CallSiteDepChange changes(call_site(), method_handle()); 1062 1063 // Compute the dependent nmethods that have a reference to a 1064 // CallSite object. We use InstanceKlass::mark_dependent_nmethod 1065 // directly instead of CodeCache::mark_for_deoptimization because we 1066 // want dependents on the call site class only not all classes in 1067 // the ContextStream. 1068 int marked = 0; 1069 { 1070 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1071 InstanceKlass* ctxk = MethodHandles::get_call_site_context(call_site()); 1072 if (ctxk == NULL) { 1073 return; // No dependencies to invalidate yet. 1074 } 1075 marked = ctxk->mark_dependent_nmethods(changes); 1076 } 1077 if (marked > 0) { 1078 // At least one nmethod has been marked for deoptimization 1079 VM_Deoptimize op; 1080 VMThread::execute(&op); 1081 } 1082 } 1083 1084 #ifdef HOTSWAP 1085 // Flushes compiled methods dependent on dependee in the evolutionary sense 1086 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1087 // --- Compile_lock is not held. However we are at a safepoint. 1088 assert_locked_or_safepoint(Compile_lock); 1089 if (number_of_nmethods_with_dependencies() == 0) return; 1090 1091 // CodeCache can only be updated by a thread_in_VM and they will all be 1092 // stopped during the safepoint so CodeCache will be safe to update without 1093 // holding the CodeCache_lock. 1094 1095 // Compute the dependent nmethods 1096 if (mark_for_evol_deoptimization(ev_k_h) > 0) { 1097 // At least one nmethod has been marked for deoptimization 1098 1099 // All this already happens inside a VM_Operation, so we'll do all the work here. 1100 // Stuff copied from VM_Deoptimize and modified slightly. 1101 1102 // We do not want any GCs to happen while we are in the middle of this VM operation 1103 ResourceMark rm; 1104 DeoptimizationMarker dm; 1105 1106 // Deoptimize all activations depending on marked nmethods 1107 Deoptimization::deoptimize_dependents(); 1108 1109 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1110 make_marked_nmethods_not_entrant(); 1111 } 1112 } 1113 #endif // HOTSWAP 1114 1115 1116 // Flushes compiled methods dependent on dependee 1117 void CodeCache::flush_dependents_on_method(methodHandle m_h) { 1118 // --- Compile_lock is not held. However we are at a safepoint. 1119 assert_locked_or_safepoint(Compile_lock); 1120 1121 // CodeCache can only be updated by a thread_in_VM and they will all be 1122 // stopped dring the safepoint so CodeCache will be safe to update without 1123 // holding the CodeCache_lock. 1124 1125 // Compute the dependent nmethods 1126 if (mark_for_deoptimization(m_h()) > 0) { 1127 // At least one nmethod has been marked for deoptimization 1128 1129 // All this already happens inside a VM_Operation, so we'll do all the work here. 1130 // Stuff copied from VM_Deoptimize and modified slightly. 1131 1132 // We do not want any GCs to happen while we are in the middle of this VM operation 1133 ResourceMark rm; 1134 DeoptimizationMarker dm; 1135 1136 // Deoptimize all activations depending on marked nmethods 1137 Deoptimization::deoptimize_dependents(); 1138 1139 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1140 make_marked_nmethods_not_entrant(); 1141 } 1142 } 1143 1144 void CodeCache::verify() { 1145 assert_locked_or_safepoint(CodeCache_lock); 1146 FOR_ALL_HEAPS(heap) { 1147 (*heap)->verify(); 1148 FOR_ALL_BLOBS(cb, *heap) { 1149 if (cb->is_alive()) { 1150 cb->verify(); 1151 } 1152 } 1153 } 1154 } 1155 1156 // A CodeHeap is full. Print out warning and report event. 1157 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1158 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1159 CodeHeap* heap = get_code_heap(code_blob_type); 1160 assert(heap != NULL, "heap is null"); 1161 1162 if (!heap->was_full() || print) { 1163 // Not yet reported for this heap, report 1164 heap->report_full(); 1165 if (SegmentedCodeCache) { 1166 warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type)); 1167 warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type)); 1168 } else { 1169 warning("CodeCache is full. Compiler has been disabled."); 1170 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 1171 } 1172 ResourceMark rm; 1173 stringStream s; 1174 // Dump code cache into a buffer before locking the tty, 1175 { 1176 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1177 print_summary(&s); 1178 } 1179 ttyLocker ttyl; 1180 tty->print("%s", s.as_string()); 1181 } 1182 1183 _codemem_full_count++; 1184 EventCodeCacheFull event; 1185 if (event.should_commit()) { 1186 event.set_codeBlobType((u1)code_blob_type); 1187 event.set_startAddress((u8)heap->low_boundary()); 1188 event.set_commitedTopAddress((u8)heap->high()); 1189 event.set_reservedTopAddress((u8)heap->high_boundary()); 1190 event.set_entryCount(nof_blobs()); 1191 event.set_methodCount(nof_nmethods()); 1192 event.set_adaptorCount(nof_adapters()); 1193 event.set_unallocatedCapacity(heap->unallocated_capacity()/K); 1194 event.set_fullCount(_codemem_full_count); 1195 event.commit(); 1196 } 1197 } 1198 1199 void CodeCache::print_memory_overhead() { 1200 size_t wasted_bytes = 0; 1201 FOR_ALL_HEAPS(heap) { 1202 CodeHeap* curr_heap = *heap; 1203 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1204 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1205 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1206 } 1207 } 1208 // Print bytes that are allocated in the freelist 1209 ttyLocker ttl; 1210 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1211 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1212 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1213 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1214 } 1215 1216 //------------------------------------------------------------------------------------------------ 1217 // Non-product version 1218 1219 #ifndef PRODUCT 1220 1221 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1222 if (PrintCodeCache2) { // Need to add a new flag 1223 ResourceMark rm; 1224 if (size == 0) size = cb->size(); 1225 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1226 } 1227 } 1228 1229 void CodeCache::print_internals() { 1230 int nmethodCount = 0; 1231 int runtimeStubCount = 0; 1232 int adapterCount = 0; 1233 int deoptimizationStubCount = 0; 1234 int uncommonTrapStubCount = 0; 1235 int bufferBlobCount = 0; 1236 int total = 0; 1237 int nmethodAlive = 0; 1238 int nmethodNotEntrant = 0; 1239 int nmethodZombie = 0; 1240 int nmethodUnloaded = 0; 1241 int nmethodJava = 0; 1242 int nmethodNative = 0; 1243 int max_nm_size = 0; 1244 ResourceMark rm; 1245 1246 int i = 0; 1247 FOR_ALL_HEAPS(heap) { 1248 if (SegmentedCodeCache && Verbose) { 1249 tty->print_cr("-- %s --", (*heap)->name()); 1250 } 1251 FOR_ALL_BLOBS(cb, *heap) { 1252 total++; 1253 if (cb->is_nmethod()) { 1254 nmethod* nm = (nmethod*)cb; 1255 1256 if (Verbose && nm->method() != NULL) { 1257 ResourceMark rm; 1258 char *method_name = nm->method()->name_and_sig_as_C_string(); 1259 tty->print("%s", method_name); 1260 if(nm->is_alive()) { tty->print_cr(" alive"); } 1261 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1262 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1263 } 1264 1265 nmethodCount++; 1266 1267 if(nm->is_alive()) { nmethodAlive++; } 1268 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1269 if(nm->is_zombie()) { nmethodZombie++; } 1270 if(nm->is_unloaded()) { nmethodUnloaded++; } 1271 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1272 1273 if(nm->method() != NULL && nm->is_java_method()) { 1274 nmethodJava++; 1275 max_nm_size = MAX2(max_nm_size, nm->size()); 1276 } 1277 } else if (cb->is_runtime_stub()) { 1278 runtimeStubCount++; 1279 } else if (cb->is_deoptimization_stub()) { 1280 deoptimizationStubCount++; 1281 } else if (cb->is_uncommon_trap_stub()) { 1282 uncommonTrapStubCount++; 1283 } else if (cb->is_adapter_blob()) { 1284 adapterCount++; 1285 } else if (cb->is_buffer_blob()) { 1286 bufferBlobCount++; 1287 } 1288 } 1289 } 1290 1291 int bucketSize = 512; 1292 int bucketLimit = max_nm_size / bucketSize + 1; 1293 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1294 memset(buckets, 0, sizeof(int) * bucketLimit); 1295 1296 NMethodIterator iter; 1297 while(iter.next()) { 1298 nmethod* nm = iter.method(); 1299 if(nm->method() != NULL && nm->is_java_method()) { 1300 buckets[nm->size() / bucketSize]++; 1301 } 1302 } 1303 1304 tty->print_cr("Code Cache Entries (total of %d)",total); 1305 tty->print_cr("-------------------------------------------------"); 1306 tty->print_cr("nmethods: %d",nmethodCount); 1307 tty->print_cr("\talive: %d",nmethodAlive); 1308 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1309 tty->print_cr("\tzombie: %d",nmethodZombie); 1310 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1311 tty->print_cr("\tjava: %d",nmethodJava); 1312 tty->print_cr("\tnative: %d",nmethodNative); 1313 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1314 tty->print_cr("adapters: %d",adapterCount); 1315 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1316 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1317 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1318 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1319 tty->print_cr("-------------------------------------------------"); 1320 1321 for(int i=0; i<bucketLimit; i++) { 1322 if(buckets[i] != 0) { 1323 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1324 tty->fill_to(40); 1325 tty->print_cr("%d",buckets[i]); 1326 } 1327 } 1328 1329 FREE_C_HEAP_ARRAY(int, buckets); 1330 print_memory_overhead(); 1331 } 1332 1333 #endif // !PRODUCT 1334 1335 void CodeCache::print() { 1336 print_summary(tty); 1337 1338 #ifndef PRODUCT 1339 if (!Verbose) return; 1340 1341 CodeBlob_sizes live; 1342 CodeBlob_sizes dead; 1343 1344 FOR_ALL_HEAPS(heap) { 1345 FOR_ALL_BLOBS(cb, *heap) { 1346 if (!cb->is_alive()) { 1347 dead.add(cb); 1348 } else { 1349 live.add(cb); 1350 } 1351 } 1352 } 1353 1354 tty->print_cr("CodeCache:"); 1355 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1356 1357 if (!live.is_empty()) { 1358 live.print("live"); 1359 } 1360 if (!dead.is_empty()) { 1361 dead.print("dead"); 1362 } 1363 1364 if (WizardMode) { 1365 // print the oop_map usage 1366 int code_size = 0; 1367 int number_of_blobs = 0; 1368 int number_of_oop_maps = 0; 1369 int map_size = 0; 1370 FOR_ALL_HEAPS(heap) { 1371 FOR_ALL_BLOBS(cb, *heap) { 1372 if (cb->is_alive()) { 1373 number_of_blobs++; 1374 code_size += cb->code_size(); 1375 ImmutableOopMapSet* set = cb->oop_maps(); 1376 if (set != NULL) { 1377 number_of_oop_maps += set->count(); 1378 map_size += set->size(); 1379 } 1380 } 1381 } 1382 } 1383 tty->print_cr("OopMaps"); 1384 tty->print_cr(" #blobs = %d", number_of_blobs); 1385 tty->print_cr(" code size = %d", code_size); 1386 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1387 tty->print_cr(" map size = %d", map_size); 1388 } 1389 1390 #endif // !PRODUCT 1391 } 1392 1393 void CodeCache::print_summary(outputStream* st, bool detailed) { 1394 FOR_ALL_HEAPS(heap_iterator) { 1395 CodeHeap* heap = (*heap_iterator); 1396 size_t total = (heap->high_boundary() - heap->low_boundary()); 1397 if (SegmentedCodeCache) { 1398 st->print("%s:", heap->name()); 1399 } else { 1400 st->print("CodeCache:"); 1401 } 1402 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1403 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1404 total/K, (total - heap->unallocated_capacity())/K, 1405 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1406 1407 if (detailed) { 1408 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1409 p2i(heap->low_boundary()), 1410 p2i(heap->high()), 1411 p2i(heap->high_boundary())); 1412 } 1413 } 1414 1415 if (detailed) { 1416 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1417 " adapters=" UINT32_FORMAT, 1418 nof_blobs(), nof_nmethods(), nof_adapters()); 1419 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1420 "enabled" : Arguments::mode() == Arguments::_int ? 1421 "disabled (interpreter mode)" : 1422 "disabled (not enough contiguous free space left)"); 1423 } 1424 } 1425 1426 void CodeCache::print_codelist(outputStream* st) { 1427 assert_locked_or_safepoint(CodeCache_lock); 1428 1429 NMethodIterator iter; 1430 while(iter.next_alive()) { 1431 nmethod* nm = iter.method(); 1432 ResourceMark rm; 1433 char *method_name = nm->method()->name_and_sig_as_C_string(); 1434 st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]", 1435 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1436 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1437 } 1438 } 1439 1440 void CodeCache::print_layout(outputStream* st) { 1441 assert_locked_or_safepoint(CodeCache_lock); 1442 ResourceMark rm; 1443 1444 print_summary(st, true); 1445 } 1446 1447 void CodeCache::log_state(outputStream* st) { 1448 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1449 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1450 nof_blobs(), nof_nmethods(), nof_adapters(), 1451 unallocated_capacity()); 1452 }