1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/gcLocker.hpp" 36 #include "memory/iterator.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/method.hpp" 39 #include "oops/objArrayOop.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/verifyOopClosure.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/icache.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/sweeper.hpp" 48 #include "runtime/compilationPolicy.hpp" 49 #include "services/memoryService.hpp" 50 #include "trace/tracing.hpp" 51 #include "utilities/xmlstream.hpp" 52 #ifdef COMPILER1 53 #include "c1/c1_Compilation.hpp" 54 #include "c1/c1_Compiler.hpp" 55 #endif 56 #ifdef COMPILER2 57 #include "opto/c2compiler.hpp" 58 #include "opto/compile.hpp" 59 #include "opto/node.hpp" 60 #endif 61 62 // Helper class for printing in CodeCache 63 class CodeBlob_sizes { 64 private: 65 int count; 66 int total_size; 67 int header_size; 68 int code_size; 69 int stub_size; 70 int relocation_size; 71 int scopes_oop_size; 72 int scopes_metadata_size; 73 int scopes_data_size; 74 int scopes_pcs_size; 75 76 public: 77 CodeBlob_sizes() { 78 count = 0; 79 total_size = 0; 80 header_size = 0; 81 code_size = 0; 82 stub_size = 0; 83 relocation_size = 0; 84 scopes_oop_size = 0; 85 scopes_metadata_size = 0; 86 scopes_data_size = 0; 87 scopes_pcs_size = 0; 88 } 89 90 int total() { return total_size; } 91 bool is_empty() { return count == 0; } 92 93 void print(const char* title) { 94 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 95 count, 96 title, 97 (int)(total() / K), 98 header_size * 100 / total_size, 99 relocation_size * 100 / total_size, 100 code_size * 100 / total_size, 101 stub_size * 100 / total_size, 102 scopes_oop_size * 100 / total_size, 103 scopes_metadata_size * 100 / total_size, 104 scopes_data_size * 100 / total_size, 105 scopes_pcs_size * 100 / total_size); 106 } 107 108 void add(CodeBlob* cb) { 109 count++; 110 total_size += cb->size(); 111 header_size += cb->header_size(); 112 relocation_size += cb->relocation_size(); 113 if (cb->is_nmethod()) { 114 nmethod* nm = cb->as_nmethod_or_null(); 115 code_size += nm->insts_size(); 116 stub_size += nm->stub_size(); 117 118 scopes_oop_size += nm->oops_size(); 119 scopes_metadata_size += nm->metadata_size(); 120 scopes_data_size += nm->scopes_data_size(); 121 scopes_pcs_size += nm->scopes_pcs_size(); 122 } else { 123 code_size += cb->code_size(); 124 } 125 } 126 }; 127 128 // Iterate over all CodeHeaps 129 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 130 // Iterate over all CodeBlobs (cb) on the given CodeHeap 131 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 132 133 address CodeCache::_low_bound = 0; 134 address CodeCache::_high_bound = 0; 135 int CodeCache::_number_of_blobs = 0; 136 int CodeCache::_number_of_adapters = 0; 137 int CodeCache::_number_of_nmethods = 0; 138 int CodeCache::_number_of_nmethods_with_dependencies = 0; 139 bool CodeCache::_needs_cache_clean = false; 140 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 141 int CodeCache::_codemem_full_count = 0; 142 143 // Initialize array of CodeHeaps 144 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 145 146 void CodeCache::initialize_heaps() { 147 // Determine size of compiler buffers 148 size_t code_buffers_size = 0; 149 #ifdef COMPILER1 150 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 151 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 152 code_buffers_size += c1_count * Compiler::code_buffer_size(); 153 #endif 154 #ifdef COMPILER2 155 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 156 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 157 // Initial size of constant table (this may be increased if a compiled method needs more space) 158 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 159 #endif 160 161 // Calculate default CodeHeap sizes if not set by user 162 if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize) 163 && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) { 164 // Increase default NonNMethodCodeHeapSize to account for compiler buffers 165 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size); 166 167 // Check if we have enough space for the non-nmethod code heap 168 if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) { 169 // Use the default value for NonNMethodCodeHeapSize and one half of the 170 // remaining size for non-profiled methods and one half for profiled methods 171 size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize; 172 size_t profiled_size = remaining_size / 2; 173 size_t non_profiled_size = remaining_size - profiled_size; 174 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 175 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 176 } else { 177 // Use all space for the non-nmethod heap and set other heaps to minimal size 178 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); 179 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size()); 180 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size()); 181 } 182 } 183 184 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 185 if(!heap_available(CodeBlobType::MethodProfiled)) { 186 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); 187 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 188 } 189 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 190 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 191 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); 192 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 193 } 194 195 // Make sure we have enough space for VM internal code 196 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 197 if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { 198 vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); 199 } 200 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); 201 202 // Align CodeHeaps 203 size_t alignment = heap_alignment(); 204 size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment); 205 size_t profiled_size = align_size_down(ProfiledCodeHeapSize, alignment); 206 207 // Reserve one continuous chunk of memory for CodeHeaps and split it into 208 // parts for the individual heaps. The memory layout looks like this: 209 // ---------- high ----------- 210 // Non-profiled nmethods 211 // Profiled nmethods 212 // Non-nmethods 213 // ---------- low ------------ 214 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 215 ReservedSpace non_method_space = rs.first_part(non_method_size); 216 ReservedSpace rest = rs.last_part(non_method_size); 217 ReservedSpace profiled_space = rest.first_part(profiled_size); 218 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 219 220 // Non-nmethods (stubs, adapters, ...) 221 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 222 // Tier 2 and tier 3 (profiled) methods 223 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 224 // Tier 1 and tier 4 (non-profiled) methods and native methods 225 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 226 } 227 228 size_t CodeCache::heap_alignment() { 229 // If large page support is enabled, align code heaps according to large 230 // page size to make sure that code cache is covered by large pages. 231 const size_t page_size = os::can_execute_large_page_memory() ? 232 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) : 233 os::vm_page_size(); 234 return MAX2(page_size, (size_t) os::vm_allocation_granularity()); 235 } 236 237 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 238 // Determine alignment 239 const size_t page_size = os::can_execute_large_page_memory() ? 240 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), 241 os::page_size_for_region_aligned(size, 8)) : 242 os::vm_page_size(); 243 const size_t granularity = os::vm_allocation_granularity(); 244 const size_t r_align = MAX2(page_size, granularity); 245 const size_t r_size = align_size_up(size, r_align); 246 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 247 MAX2(page_size, granularity); 248 249 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 250 251 // Initialize bounds 252 _low_bound = (address)rs.base(); 253 _high_bound = _low_bound + rs.size(); 254 255 return rs; 256 } 257 258 bool CodeCache::heap_available(int code_blob_type) { 259 if (!SegmentedCodeCache) { 260 // No segmentation: use a single code heap 261 return (code_blob_type == CodeBlobType::All); 262 } else if (Arguments::mode() == Arguments::_int) { 263 // Interpreter only: we don't need any method code heaps 264 return (code_blob_type == CodeBlobType::NonNMethod); 265 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 266 // Tiered compilation: use all code heaps 267 return (code_blob_type < CodeBlobType::All); 268 } else { 269 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 270 return (code_blob_type == CodeBlobType::NonNMethod) || 271 (code_blob_type == CodeBlobType::MethodNonProfiled); 272 } 273 } 274 275 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { 276 switch(code_blob_type) { 277 case CodeBlobType::NonNMethod: 278 return "NonNMethodCodeHeapSize"; 279 break; 280 case CodeBlobType::MethodNonProfiled: 281 return "NonProfiledCodeHeapSize"; 282 break; 283 case CodeBlobType::MethodProfiled: 284 return "ProfiledCodeHeapSize"; 285 break; 286 } 287 ShouldNotReachHere(); 288 return NULL; 289 } 290 291 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { 292 // Check if heap is needed 293 if (!heap_available(code_blob_type)) { 294 return; 295 } 296 297 // Create CodeHeap 298 CodeHeap* heap = new CodeHeap(name, code_blob_type); 299 _heaps->append(heap); 300 301 // Reserve Space 302 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); 303 size_initial = round_to(size_initial, os::vm_page_size()); 304 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 305 vm_exit_during_initialization("Could not reserve enough space for code cache"); 306 } 307 308 // Register the CodeHeap 309 MemoryService::add_code_heap_memory_pool(heap, name); 310 } 311 312 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { 313 assert(cb != NULL, "CodeBlob is null"); 314 FOR_ALL_HEAPS(heap) { 315 if ((*heap)->contains(cb)) { 316 return *heap; 317 } 318 } 319 ShouldNotReachHere(); 320 return NULL; 321 } 322 323 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 324 FOR_ALL_HEAPS(heap) { 325 if ((*heap)->accepts(code_blob_type)) { 326 return *heap; 327 } 328 } 329 return NULL; 330 } 331 332 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 333 assert_locked_or_safepoint(CodeCache_lock); 334 assert(heap != NULL, "heap is null"); 335 return (CodeBlob*)heap->first(); 336 } 337 338 CodeBlob* CodeCache::first_blob(int code_blob_type) { 339 if (heap_available(code_blob_type)) { 340 return first_blob(get_code_heap(code_blob_type)); 341 } else { 342 return NULL; 343 } 344 } 345 346 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 347 assert_locked_or_safepoint(CodeCache_lock); 348 assert(heap != NULL, "heap is null"); 349 return (CodeBlob*)heap->next(cb); 350 } 351 352 CodeBlob* CodeCache::next_blob(CodeBlob* cb) { 353 return next_blob(get_code_heap(cb), cb); 354 } 355 356 /** 357 * Do not seize the CodeCache lock here--if the caller has not 358 * already done so, we are going to lose bigtime, since the code 359 * cache will contain a garbage CodeBlob until the caller can 360 * run the constructor for the CodeBlob subclass he is busy 361 * instantiating. 362 */ 363 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) { 364 // Possibly wakes up the sweeper thread. 365 NMethodSweeper::notify(code_blob_type); 366 assert_locked_or_safepoint(CodeCache_lock); 367 assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size)); 368 if (size <= 0) { 369 return NULL; 370 } 371 CodeBlob* cb = NULL; 372 373 // Get CodeHeap for the given CodeBlobType 374 CodeHeap* heap = get_code_heap(code_blob_type); 375 assert(heap != NULL, "heap is null"); 376 377 while (true) { 378 cb = (CodeBlob*)heap->allocate(size); 379 if (cb != NULL) break; 380 if (!heap->expand_by(CodeCacheExpansionSize)) { 381 // Expansion failed 382 if (SegmentedCodeCache && !strict) { 383 // Fallback solution: Try to store code in another code heap. 384 // Note that in the sweeper, we check the reverse_free_ratio of the code heap 385 // and force stack scanning if less than 10% of the code heap are free. 386 int type = code_blob_type; 387 switch (type) { 388 case CodeBlobType::NonNMethod: 389 type = CodeBlobType::MethodNonProfiled; 390 strict = false; // Allow recursive search for other heaps 391 break; 392 case CodeBlobType::MethodProfiled: 393 type = CodeBlobType::MethodNonProfiled; 394 strict = true; 395 break; 396 case CodeBlobType::MethodNonProfiled: 397 type = CodeBlobType::MethodProfiled; 398 strict = true; 399 break; 400 } 401 if (heap_available(type)) { 402 return allocate(size, type, strict); 403 } 404 } 405 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 406 CompileBroker::handle_full_code_cache(code_blob_type); 407 return NULL; 408 } 409 if (PrintCodeCacheExtension) { 410 ResourceMark rm; 411 if (SegmentedCodeCache) { 412 tty->print("%s", heap->name()); 413 } else { 414 tty->print("CodeCache"); 415 } 416 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 417 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 418 (address)heap->high() - (address)heap->low_boundary()); 419 } 420 } 421 print_trace("allocation", cb, size); 422 _number_of_blobs++; 423 return cb; 424 } 425 426 void CodeCache::free(CodeBlob* cb) { 427 assert_locked_or_safepoint(CodeCache_lock); 428 429 print_trace("free", cb); 430 if (cb->is_nmethod()) { 431 _number_of_nmethods--; 432 if (((nmethod *)cb)->has_dependencies()) { 433 _number_of_nmethods_with_dependencies--; 434 } 435 } 436 if (cb->is_adapter_blob()) { 437 _number_of_adapters--; 438 } 439 _number_of_blobs--; 440 441 // Get heap for given CodeBlob and deallocate 442 get_code_heap(cb)->deallocate(cb); 443 444 assert(_number_of_blobs >= 0, "sanity check"); 445 } 446 447 void CodeCache::commit(CodeBlob* cb) { 448 // this is called by nmethod::nmethod, which must already own CodeCache_lock 449 assert_locked_or_safepoint(CodeCache_lock); 450 if (cb->is_nmethod()) { 451 _number_of_nmethods++; 452 if (((nmethod *)cb)->has_dependencies()) { 453 _number_of_nmethods_with_dependencies++; 454 } 455 } 456 if (cb->is_adapter_blob()) { 457 _number_of_adapters++; 458 } 459 460 // flush the hardware I-cache 461 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 462 } 463 464 bool CodeCache::contains(void *p) { 465 // It should be ok to call contains without holding a lock 466 FOR_ALL_HEAPS(heap) { 467 if ((*heap)->contains(p)) { 468 return true; 469 } 470 } 471 return false; 472 } 473 474 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 475 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 476 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 477 CodeBlob* CodeCache::find_blob(void* start) { 478 CodeBlob* result = find_blob_unsafe(start); 479 // We could potentially look up non_entrant methods 480 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 481 return result; 482 } 483 484 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 485 // what you are doing) 486 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 487 // NMT can walk the stack before code cache is created 488 if (_heaps == NULL || _heaps->is_empty()) return NULL; 489 490 FOR_ALL_HEAPS(heap) { 491 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); 492 if (result != NULL && result->blob_contains((address)start)) { 493 return result; 494 } 495 } 496 return NULL; 497 } 498 499 nmethod* CodeCache::find_nmethod(void* start) { 500 CodeBlob* cb = find_blob(start); 501 assert(cb->is_nmethod(), "did not find an nmethod"); 502 return (nmethod*)cb; 503 } 504 505 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 506 assert_locked_or_safepoint(CodeCache_lock); 507 FOR_ALL_HEAPS(heap) { 508 FOR_ALL_BLOBS(cb, *heap) { 509 f(cb); 510 } 511 } 512 } 513 514 void CodeCache::nmethods_do(void f(nmethod* nm)) { 515 assert_locked_or_safepoint(CodeCache_lock); 516 NMethodIterator iter; 517 while(iter.next()) { 518 f(iter.method()); 519 } 520 } 521 522 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 523 assert_locked_or_safepoint(CodeCache_lock); 524 NMethodIterator iter; 525 while(iter.next_alive()) { 526 f(iter.method()); 527 } 528 } 529 530 int CodeCache::alignment_unit() { 531 return (int)_heaps->first()->alignment_unit(); 532 } 533 534 int CodeCache::alignment_offset() { 535 return (int)_heaps->first()->alignment_offset(); 536 } 537 538 // Mark nmethods for unloading if they contain otherwise unreachable oops. 539 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 540 assert_locked_or_safepoint(CodeCache_lock); 541 NMethodIterator iter; 542 while(iter.next_alive()) { 543 iter.method()->do_unloading(is_alive, unloading_occurred); 544 } 545 } 546 547 void CodeCache::blobs_do(CodeBlobClosure* f) { 548 assert_locked_or_safepoint(CodeCache_lock); 549 FOR_ALL_HEAPS(heap) { 550 FOR_ALL_BLOBS(cb, *heap) { 551 if (cb->is_alive()) { 552 f->do_code_blob(cb); 553 554 #ifdef ASSERT 555 if (cb->is_nmethod()) 556 ((nmethod*)cb)->verify_scavenge_root_oops(); 557 #endif //ASSERT 558 } 559 } 560 } 561 } 562 563 // Walk the list of methods which might contain non-perm oops. 564 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 565 assert_locked_or_safepoint(CodeCache_lock); 566 567 if (UseG1GC) { 568 return; 569 } 570 571 debug_only(mark_scavenge_root_nmethods()); 572 573 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 574 debug_only(cur->clear_scavenge_root_marked()); 575 assert(cur->scavenge_root_not_marked(), ""); 576 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 577 578 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 579 #ifndef PRODUCT 580 if (TraceScavenge) { 581 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 582 } 583 #endif //PRODUCT 584 if (is_live) { 585 // Perform cur->oops_do(f), maybe just once per nmethod. 586 f->do_code_blob(cur); 587 } 588 } 589 590 // Check for stray marks. 591 debug_only(verify_perm_nmethods(NULL)); 592 } 593 594 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 595 assert_locked_or_safepoint(CodeCache_lock); 596 597 if (UseG1GC) { 598 return; 599 } 600 601 nm->set_on_scavenge_root_list(); 602 nm->set_scavenge_root_link(_scavenge_root_nmethods); 603 set_scavenge_root_nmethods(nm); 604 print_trace("add_scavenge_root", nm); 605 } 606 607 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 608 assert_locked_or_safepoint(CodeCache_lock); 609 610 if (UseG1GC) { 611 return; 612 } 613 614 print_trace("drop_scavenge_root", nm); 615 nmethod* last = NULL; 616 nmethod* cur = scavenge_root_nmethods(); 617 while (cur != NULL) { 618 nmethod* next = cur->scavenge_root_link(); 619 if (cur == nm) { 620 if (last != NULL) 621 last->set_scavenge_root_link(next); 622 else set_scavenge_root_nmethods(next); 623 nm->set_scavenge_root_link(NULL); 624 nm->clear_on_scavenge_root_list(); 625 return; 626 } 627 last = cur; 628 cur = next; 629 } 630 assert(false, "should have been on list"); 631 } 632 633 void CodeCache::prune_scavenge_root_nmethods() { 634 assert_locked_or_safepoint(CodeCache_lock); 635 636 if (UseG1GC) { 637 return; 638 } 639 640 debug_only(mark_scavenge_root_nmethods()); 641 642 nmethod* last = NULL; 643 nmethod* cur = scavenge_root_nmethods(); 644 while (cur != NULL) { 645 nmethod* next = cur->scavenge_root_link(); 646 debug_only(cur->clear_scavenge_root_marked()); 647 assert(cur->scavenge_root_not_marked(), ""); 648 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 649 650 if (!cur->is_zombie() && !cur->is_unloaded() 651 && cur->detect_scavenge_root_oops()) { 652 // Keep it. Advance 'last' to prevent deletion. 653 last = cur; 654 } else { 655 // Prune it from the list, so we don't have to look at it any more. 656 print_trace("prune_scavenge_root", cur); 657 cur->set_scavenge_root_link(NULL); 658 cur->clear_on_scavenge_root_list(); 659 if (last != NULL) 660 last->set_scavenge_root_link(next); 661 else set_scavenge_root_nmethods(next); 662 } 663 cur = next; 664 } 665 666 // Check for stray marks. 667 debug_only(verify_perm_nmethods(NULL)); 668 } 669 670 #ifndef PRODUCT 671 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 672 if (UseG1GC) { 673 return; 674 } 675 676 // While we are here, verify the integrity of the list. 677 mark_scavenge_root_nmethods(); 678 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 679 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 680 cur->clear_scavenge_root_marked(); 681 } 682 verify_perm_nmethods(f); 683 } 684 685 // Temporarily mark nmethods that are claimed to be on the non-perm list. 686 void CodeCache::mark_scavenge_root_nmethods() { 687 NMethodIterator iter; 688 while(iter.next_alive()) { 689 nmethod* nm = iter.method(); 690 assert(nm->scavenge_root_not_marked(), "clean state"); 691 if (nm->on_scavenge_root_list()) 692 nm->set_scavenge_root_marked(); 693 } 694 } 695 696 // If the closure is given, run it on the unlisted nmethods. 697 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 698 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 699 NMethodIterator iter; 700 while(iter.next_alive()) { 701 nmethod* nm = iter.method(); 702 bool call_f = (f_or_null != NULL); 703 assert(nm->scavenge_root_not_marked(), "must be already processed"); 704 if (nm->on_scavenge_root_list()) 705 call_f = false; // don't show this one to the client 706 nm->verify_scavenge_root_oops(); 707 if (call_f) f_or_null->do_code_blob(nm); 708 } 709 } 710 #endif //PRODUCT 711 712 void CodeCache::verify_clean_inline_caches() { 713 #ifdef ASSERT 714 NMethodIterator iter; 715 while(iter.next_alive()) { 716 nmethod* nm = iter.method(); 717 assert(!nm->is_unloaded(), "Tautology"); 718 nm->verify_clean_inline_caches(); 719 nm->verify(); 720 } 721 #endif 722 } 723 724 void CodeCache::verify_icholder_relocations() { 725 #ifdef ASSERT 726 // make sure that we aren't leaking icholders 727 int count = 0; 728 FOR_ALL_HEAPS(heap) { 729 FOR_ALL_BLOBS(cb, *heap) { 730 if (cb->is_nmethod()) { 731 nmethod* nm = (nmethod*)cb; 732 count += nm->verify_icholder_relocations(); 733 } 734 } 735 } 736 737 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 738 CompiledICHolder::live_count(), "must agree"); 739 #endif 740 } 741 742 void CodeCache::gc_prologue() { 743 } 744 745 void CodeCache::gc_epilogue() { 746 assert_locked_or_safepoint(CodeCache_lock); 747 NMethodIterator iter; 748 while(iter.next_alive()) { 749 nmethod* nm = iter.method(); 750 assert(!nm->is_unloaded(), "Tautology"); 751 if (needs_cache_clean()) { 752 nm->cleanup_inline_caches(); 753 } 754 DEBUG_ONLY(nm->verify()); 755 DEBUG_ONLY(nm->verify_oop_relocations()); 756 } 757 set_needs_cache_clean(false); 758 prune_scavenge_root_nmethods(); 759 760 verify_icholder_relocations(); 761 } 762 763 void CodeCache::verify_oops() { 764 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 765 VerifyOopClosure voc; 766 NMethodIterator iter; 767 while(iter.next_alive()) { 768 nmethod* nm = iter.method(); 769 nm->oops_do(&voc); 770 nm->verify_oop_relocations(); 771 } 772 } 773 774 size_t CodeCache::capacity() { 775 size_t cap = 0; 776 FOR_ALL_HEAPS(heap) { 777 cap += (*heap)->capacity(); 778 } 779 return cap; 780 } 781 782 size_t CodeCache::unallocated_capacity(int code_blob_type) { 783 CodeHeap* heap = get_code_heap(code_blob_type); 784 return (heap != NULL) ? heap->unallocated_capacity() : 0; 785 } 786 787 size_t CodeCache::unallocated_capacity() { 788 size_t unallocated_cap = 0; 789 FOR_ALL_HEAPS(heap) { 790 unallocated_cap += (*heap)->unallocated_capacity(); 791 } 792 return unallocated_cap; 793 } 794 795 size_t CodeCache::max_capacity() { 796 size_t max_cap = 0; 797 FOR_ALL_HEAPS(heap) { 798 max_cap += (*heap)->max_capacity(); 799 } 800 return max_cap; 801 } 802 803 /** 804 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 805 * is free, reverse_free_ratio() returns 4. 806 */ 807 double CodeCache::reverse_free_ratio(int code_blob_type) { 808 CodeHeap* heap = get_code_heap(code_blob_type); 809 if (heap == NULL) { 810 return 0; 811 } 812 813 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; 814 double max_capacity = (double)heap->max_capacity(); 815 double result = max_capacity / unallocated_capacity; 816 assert (max_capacity >= unallocated_capacity, "Must be"); 817 assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result)); 818 return result; 819 } 820 821 size_t CodeCache::bytes_allocated_in_freelists() { 822 size_t allocated_bytes = 0; 823 FOR_ALL_HEAPS(heap) { 824 allocated_bytes += (*heap)->allocated_in_freelist(); 825 } 826 return allocated_bytes; 827 } 828 829 int CodeCache::allocated_segments() { 830 int number_of_segments = 0; 831 FOR_ALL_HEAPS(heap) { 832 number_of_segments += (*heap)->allocated_segments(); 833 } 834 return number_of_segments; 835 } 836 837 size_t CodeCache::freelists_length() { 838 size_t length = 0; 839 FOR_ALL_HEAPS(heap) { 840 length += (*heap)->freelist_length(); 841 } 842 return length; 843 } 844 845 void icache_init(); 846 847 void CodeCache::initialize() { 848 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 849 #ifdef COMPILER2 850 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 851 #endif 852 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 853 // This was originally just a check of the alignment, causing failure, instead, round 854 // the code cache to the page size. In particular, Solaris is moving to a larger 855 // default page size. 856 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 857 858 if (SegmentedCodeCache) { 859 // Use multiple code heaps 860 initialize_heaps(); 861 } else { 862 // Use a single code heap 863 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 864 add_heap(rs, "CodeCache", CodeBlobType::All); 865 } 866 867 // Initialize ICache flush mechanism 868 // This service is needed for os::register_code_area 869 icache_init(); 870 871 // Give OS a chance to register generated code area. 872 // This is used on Windows 64 bit platforms to register 873 // Structured Exception Handlers for our generated code. 874 os::register_code_area((char*)low_bound(), (char*)high_bound()); 875 } 876 877 void codeCache_init() { 878 CodeCache::initialize(); 879 } 880 881 //------------------------------------------------------------------------------------------------ 882 883 int CodeCache::number_of_nmethods_with_dependencies() { 884 return _number_of_nmethods_with_dependencies; 885 } 886 887 void CodeCache::clear_inline_caches() { 888 assert_locked_or_safepoint(CodeCache_lock); 889 NMethodIterator iter; 890 while(iter.next_alive()) { 891 iter.method()->clear_inline_caches(); 892 } 893 } 894 895 // Keeps track of time spent for checking dependencies 896 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 897 898 int CodeCache::mark_for_deoptimization(DepChange& changes) { 899 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 900 int number_of_marked_CodeBlobs = 0; 901 902 // search the hierarchy looking for nmethods which are affected by the loading of this class 903 904 // then search the interfaces this class implements looking for nmethods 905 // which might be dependent of the fact that an interface only had one 906 // implementor. 907 // nmethod::check_all_dependencies works only correctly, if no safepoint 908 // can happen 909 No_Safepoint_Verifier nsv; 910 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 911 Klass* d = str.klass(); 912 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 913 } 914 915 #ifndef PRODUCT 916 if (VerifyDependencies) { 917 // Object pointers are used as unique identifiers for dependency arguments. This 918 // is only possible if no safepoint, i.e., GC occurs during the verification code. 919 dependentCheckTime.start(); 920 nmethod::check_all_dependencies(changes); 921 dependentCheckTime.stop(); 922 } 923 #endif 924 925 return number_of_marked_CodeBlobs; 926 } 927 928 929 #ifdef HOTSWAP 930 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 931 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 932 int number_of_marked_CodeBlobs = 0; 933 934 // Deoptimize all methods of the evolving class itself 935 Array<Method*>* old_methods = dependee->methods(); 936 for (int i = 0; i < old_methods->length(); i++) { 937 ResourceMark rm; 938 Method* old_method = old_methods->at(i); 939 nmethod *nm = old_method->code(); 940 if (nm != NULL) { 941 nm->mark_for_deoptimization(); 942 number_of_marked_CodeBlobs++; 943 } 944 } 945 946 NMethodIterator iter; 947 while(iter.next_alive()) { 948 nmethod* nm = iter.method(); 949 if (nm->is_marked_for_deoptimization()) { 950 // ...Already marked in the previous pass; don't count it again. 951 } else if (nm->is_evol_dependent_on(dependee())) { 952 ResourceMark rm; 953 nm->mark_for_deoptimization(); 954 number_of_marked_CodeBlobs++; 955 } else { 956 // flush caches in case they refer to a redefined Method* 957 nm->clear_inline_caches(); 958 } 959 } 960 961 return number_of_marked_CodeBlobs; 962 } 963 #endif // HOTSWAP 964 965 966 // Deoptimize all methods 967 void CodeCache::mark_all_nmethods_for_deoptimization() { 968 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 969 NMethodIterator iter; 970 while(iter.next_alive()) { 971 nmethod* nm = iter.method(); 972 if (!nm->method()->is_method_handle_intrinsic()) { 973 nm->mark_for_deoptimization(); 974 } 975 } 976 } 977 978 int CodeCache::mark_for_deoptimization(Method* dependee) { 979 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 980 int number_of_marked_CodeBlobs = 0; 981 982 NMethodIterator iter; 983 while(iter.next_alive()) { 984 nmethod* nm = iter.method(); 985 if (nm->is_dependent_on_method(dependee)) { 986 ResourceMark rm; 987 nm->mark_for_deoptimization(); 988 number_of_marked_CodeBlobs++; 989 } 990 } 991 992 return number_of_marked_CodeBlobs; 993 } 994 995 void CodeCache::make_marked_nmethods_zombies() { 996 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 997 NMethodIterator iter; 998 while(iter.next_alive()) { 999 nmethod* nm = iter.method(); 1000 if (nm->is_marked_for_deoptimization()) { 1001 1002 // If the nmethod has already been made non-entrant and it can be converted 1003 // then zombie it now. Otherwise make it non-entrant and it will eventually 1004 // be zombied when it is no longer seen on the stack. Note that the nmethod 1005 // might be "entrant" and not on the stack and so could be zombied immediately 1006 // but we can't tell because we don't track it on stack until it becomes 1007 // non-entrant. 1008 1009 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 1010 nm->make_zombie(); 1011 } else { 1012 nm->make_not_entrant(); 1013 } 1014 } 1015 } 1016 } 1017 1018 void CodeCache::make_marked_nmethods_not_entrant() { 1019 assert_locked_or_safepoint(CodeCache_lock); 1020 NMethodIterator iter; 1021 while(iter.next_alive()) { 1022 nmethod* nm = iter.method(); 1023 if (nm->is_marked_for_deoptimization()) { 1024 nm->make_not_entrant(); 1025 } 1026 } 1027 } 1028 1029 // Flushes compiled methods dependent on dependee. 1030 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) { 1031 assert_lock_strong(Compile_lock); 1032 1033 if (number_of_nmethods_with_dependencies() == 0) return; 1034 1035 // CodeCache can only be updated by a thread_in_VM and they will all be 1036 // stopped during the safepoint so CodeCache will be safe to update without 1037 // holding the CodeCache_lock. 1038 1039 KlassDepChange changes(dependee); 1040 1041 // Compute the dependent nmethods 1042 if (mark_for_deoptimization(changes) > 0) { 1043 // At least one nmethod has been marked for deoptimization 1044 VM_Deoptimize op; 1045 VMThread::execute(&op); 1046 } 1047 } 1048 1049 // Flushes compiled methods dependent on a particular CallSite 1050 // instance when its target is different than the given MethodHandle. 1051 void CodeCache::flush_dependents_on(Handle call_site, Handle method_handle) { 1052 assert_lock_strong(Compile_lock); 1053 1054 if (number_of_nmethods_with_dependencies() == 0) return; 1055 1056 // CodeCache can only be updated by a thread_in_VM and they will all be 1057 // stopped during the safepoint so CodeCache will be safe to update without 1058 // holding the CodeCache_lock. 1059 1060 CallSiteDepChange changes(call_site(), method_handle()); 1061 1062 // Compute the dependent nmethods that have a reference to a 1063 // CallSite object. We use InstanceKlass::mark_dependent_nmethod 1064 // directly instead of CodeCache::mark_for_deoptimization because we 1065 // want dependents on the call site class only not all classes in 1066 // the ContextStream. 1067 int marked = 0; 1068 { 1069 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1070 InstanceKlass* ctxk = MethodHandles::get_call_site_context(call_site()); 1071 if (ctxk == NULL) { 1072 return; // No dependencies to invalidate yet. 1073 } 1074 marked = ctxk->mark_dependent_nmethods(changes); 1075 } 1076 if (marked > 0) { 1077 // At least one nmethod has been marked for deoptimization 1078 VM_Deoptimize op; 1079 VMThread::execute(&op); 1080 } 1081 } 1082 1083 #ifdef HOTSWAP 1084 // Flushes compiled methods dependent on dependee in the evolutionary sense 1085 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1086 // --- Compile_lock is not held. However we are at a safepoint. 1087 assert_locked_or_safepoint(Compile_lock); 1088 if (number_of_nmethods_with_dependencies() == 0) return; 1089 1090 // CodeCache can only be updated by a thread_in_VM and they will all be 1091 // stopped during the safepoint so CodeCache will be safe to update without 1092 // holding the CodeCache_lock. 1093 1094 // Compute the dependent nmethods 1095 if (mark_for_evol_deoptimization(ev_k_h) > 0) { 1096 // At least one nmethod has been marked for deoptimization 1097 1098 // All this already happens inside a VM_Operation, so we'll do all the work here. 1099 // Stuff copied from VM_Deoptimize and modified slightly. 1100 1101 // We do not want any GCs to happen while we are in the middle of this VM operation 1102 ResourceMark rm; 1103 DeoptimizationMarker dm; 1104 1105 // Deoptimize all activations depending on marked nmethods 1106 Deoptimization::deoptimize_dependents(); 1107 1108 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1109 make_marked_nmethods_not_entrant(); 1110 } 1111 } 1112 #endif // HOTSWAP 1113 1114 1115 // Flushes compiled methods dependent on dependee 1116 void CodeCache::flush_dependents_on_method(methodHandle m_h) { 1117 // --- Compile_lock is not held. However we are at a safepoint. 1118 assert_locked_or_safepoint(Compile_lock); 1119 1120 // CodeCache can only be updated by a thread_in_VM and they will all be 1121 // stopped dring the safepoint so CodeCache will be safe to update without 1122 // holding the CodeCache_lock. 1123 1124 // Compute the dependent nmethods 1125 if (mark_for_deoptimization(m_h()) > 0) { 1126 // At least one nmethod has been marked for deoptimization 1127 1128 // All this already happens inside a VM_Operation, so we'll do all the work here. 1129 // Stuff copied from VM_Deoptimize and modified slightly. 1130 1131 // We do not want any GCs to happen while we are in the middle of this VM operation 1132 ResourceMark rm; 1133 DeoptimizationMarker dm; 1134 1135 // Deoptimize all activations depending on marked nmethods 1136 Deoptimization::deoptimize_dependents(); 1137 1138 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1139 make_marked_nmethods_not_entrant(); 1140 } 1141 } 1142 1143 void CodeCache::verify() { 1144 assert_locked_or_safepoint(CodeCache_lock); 1145 FOR_ALL_HEAPS(heap) { 1146 (*heap)->verify(); 1147 FOR_ALL_BLOBS(cb, *heap) { 1148 if (cb->is_alive()) { 1149 cb->verify(); 1150 } 1151 } 1152 } 1153 } 1154 1155 // A CodeHeap is full. Print out warning and report event. 1156 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1157 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1158 CodeHeap* heap = get_code_heap(code_blob_type); 1159 assert(heap != NULL, "heap is null"); 1160 1161 if (!heap->was_full() || print) { 1162 // Not yet reported for this heap, report 1163 heap->report_full(); 1164 if (SegmentedCodeCache) { 1165 warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type)); 1166 warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type)); 1167 } else { 1168 warning("CodeCache is full. Compiler has been disabled."); 1169 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 1170 } 1171 ResourceMark rm; 1172 stringStream s; 1173 // Dump code cache into a buffer before locking the tty, 1174 { 1175 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1176 print_summary(&s); 1177 } 1178 ttyLocker ttyl; 1179 tty->print("%s", s.as_string()); 1180 } 1181 1182 _codemem_full_count++; 1183 EventCodeCacheFull event; 1184 if (event.should_commit()) { 1185 event.set_codeBlobType((u1)code_blob_type); 1186 event.set_startAddress((u8)heap->low_boundary()); 1187 event.set_commitedTopAddress((u8)heap->high()); 1188 event.set_reservedTopAddress((u8)heap->high_boundary()); 1189 event.set_entryCount(nof_blobs()); 1190 event.set_methodCount(nof_nmethods()); 1191 event.set_adaptorCount(nof_adapters()); 1192 event.set_unallocatedCapacity(heap->unallocated_capacity()/K); 1193 event.set_fullCount(_codemem_full_count); 1194 event.commit(); 1195 } 1196 } 1197 1198 void CodeCache::print_memory_overhead() { 1199 size_t wasted_bytes = 0; 1200 FOR_ALL_HEAPS(heap) { 1201 CodeHeap* curr_heap = *heap; 1202 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1203 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1204 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1205 } 1206 } 1207 // Print bytes that are allocated in the freelist 1208 ttyLocker ttl; 1209 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1210 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1211 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1212 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1213 } 1214 1215 //------------------------------------------------------------------------------------------------ 1216 // Non-product version 1217 1218 #ifndef PRODUCT 1219 1220 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1221 if (PrintCodeCache2) { // Need to add a new flag 1222 ResourceMark rm; 1223 if (size == 0) size = cb->size(); 1224 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1225 } 1226 } 1227 1228 void CodeCache::print_internals() { 1229 int nmethodCount = 0; 1230 int runtimeStubCount = 0; 1231 int adapterCount = 0; 1232 int deoptimizationStubCount = 0; 1233 int uncommonTrapStubCount = 0; 1234 int bufferBlobCount = 0; 1235 int total = 0; 1236 int nmethodAlive = 0; 1237 int nmethodNotEntrant = 0; 1238 int nmethodZombie = 0; 1239 int nmethodUnloaded = 0; 1240 int nmethodJava = 0; 1241 int nmethodNative = 0; 1242 int max_nm_size = 0; 1243 ResourceMark rm; 1244 1245 int i = 0; 1246 FOR_ALL_HEAPS(heap) { 1247 if (SegmentedCodeCache && Verbose) { 1248 tty->print_cr("-- %s --", (*heap)->name()); 1249 } 1250 FOR_ALL_BLOBS(cb, *heap) { 1251 total++; 1252 if (cb->is_nmethod()) { 1253 nmethod* nm = (nmethod*)cb; 1254 1255 if (Verbose && nm->method() != NULL) { 1256 ResourceMark rm; 1257 char *method_name = nm->method()->name_and_sig_as_C_string(); 1258 tty->print("%s", method_name); 1259 if(nm->is_alive()) { tty->print_cr(" alive"); } 1260 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1261 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1262 } 1263 1264 nmethodCount++; 1265 1266 if(nm->is_alive()) { nmethodAlive++; } 1267 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1268 if(nm->is_zombie()) { nmethodZombie++; } 1269 if(nm->is_unloaded()) { nmethodUnloaded++; } 1270 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1271 1272 if(nm->method() != NULL && nm->is_java_method()) { 1273 nmethodJava++; 1274 max_nm_size = MAX2(max_nm_size, nm->size()); 1275 } 1276 } else if (cb->is_runtime_stub()) { 1277 runtimeStubCount++; 1278 } else if (cb->is_deoptimization_stub()) { 1279 deoptimizationStubCount++; 1280 } else if (cb->is_uncommon_trap_stub()) { 1281 uncommonTrapStubCount++; 1282 } else if (cb->is_adapter_blob()) { 1283 adapterCount++; 1284 } else if (cb->is_buffer_blob()) { 1285 bufferBlobCount++; 1286 } 1287 } 1288 } 1289 1290 int bucketSize = 512; 1291 int bucketLimit = max_nm_size / bucketSize + 1; 1292 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1293 memset(buckets, 0, sizeof(int) * bucketLimit); 1294 1295 NMethodIterator iter; 1296 while(iter.next()) { 1297 nmethod* nm = iter.method(); 1298 if(nm->method() != NULL && nm->is_java_method()) { 1299 buckets[nm->size() / bucketSize]++; 1300 } 1301 } 1302 1303 tty->print_cr("Code Cache Entries (total of %d)",total); 1304 tty->print_cr("-------------------------------------------------"); 1305 tty->print_cr("nmethods: %d",nmethodCount); 1306 tty->print_cr("\talive: %d",nmethodAlive); 1307 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1308 tty->print_cr("\tzombie: %d",nmethodZombie); 1309 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1310 tty->print_cr("\tjava: %d",nmethodJava); 1311 tty->print_cr("\tnative: %d",nmethodNative); 1312 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1313 tty->print_cr("adapters: %d",adapterCount); 1314 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1315 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1316 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1317 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1318 tty->print_cr("-------------------------------------------------"); 1319 1320 for(int i=0; i<bucketLimit; i++) { 1321 if(buckets[i] != 0) { 1322 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1323 tty->fill_to(40); 1324 tty->print_cr("%d",buckets[i]); 1325 } 1326 } 1327 1328 FREE_C_HEAP_ARRAY(int, buckets); 1329 print_memory_overhead(); 1330 } 1331 1332 #endif // !PRODUCT 1333 1334 void CodeCache::print() { 1335 print_summary(tty); 1336 1337 #ifndef PRODUCT 1338 if (!Verbose) return; 1339 1340 CodeBlob_sizes live; 1341 CodeBlob_sizes dead; 1342 1343 FOR_ALL_HEAPS(heap) { 1344 FOR_ALL_BLOBS(cb, *heap) { 1345 if (!cb->is_alive()) { 1346 dead.add(cb); 1347 } else { 1348 live.add(cb); 1349 } 1350 } 1351 } 1352 1353 tty->print_cr("CodeCache:"); 1354 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1355 1356 if (!live.is_empty()) { 1357 live.print("live"); 1358 } 1359 if (!dead.is_empty()) { 1360 dead.print("dead"); 1361 } 1362 1363 if (WizardMode) { 1364 // print the oop_map usage 1365 int code_size = 0; 1366 int number_of_blobs = 0; 1367 int number_of_oop_maps = 0; 1368 int map_size = 0; 1369 FOR_ALL_HEAPS(heap) { 1370 FOR_ALL_BLOBS(cb, *heap) { 1371 if (cb->is_alive()) { 1372 number_of_blobs++; 1373 code_size += cb->code_size(); 1374 ImmutableOopMapSet* set = cb->oop_maps(); 1375 if (set != NULL) { 1376 number_of_oop_maps += set->count(); 1377 map_size += set->size(); 1378 } 1379 } 1380 } 1381 } 1382 tty->print_cr("OopMaps"); 1383 tty->print_cr(" #blobs = %d", number_of_blobs); 1384 tty->print_cr(" code size = %d", code_size); 1385 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1386 tty->print_cr(" map size = %d", map_size); 1387 } 1388 1389 #endif // !PRODUCT 1390 } 1391 1392 void CodeCache::print_summary(outputStream* st, bool detailed) { 1393 FOR_ALL_HEAPS(heap_iterator) { 1394 CodeHeap* heap = (*heap_iterator); 1395 size_t total = (heap->high_boundary() - heap->low_boundary()); 1396 if (SegmentedCodeCache) { 1397 st->print("%s:", heap->name()); 1398 } else { 1399 st->print("CodeCache:"); 1400 } 1401 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1402 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1403 total/K, (total - heap->unallocated_capacity())/K, 1404 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1405 1406 if (detailed) { 1407 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1408 p2i(heap->low_boundary()), 1409 p2i(heap->high()), 1410 p2i(heap->high_boundary())); 1411 } 1412 } 1413 1414 if (detailed) { 1415 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1416 " adapters=" UINT32_FORMAT, 1417 nof_blobs(), nof_nmethods(), nof_adapters()); 1418 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1419 "enabled" : Arguments::mode() == Arguments::_int ? 1420 "disabled (interpreter mode)" : 1421 "disabled (not enough contiguous free space left)"); 1422 } 1423 } 1424 1425 void CodeCache::print_codelist(outputStream* st) { 1426 assert_locked_or_safepoint(CodeCache_lock); 1427 1428 NMethodIterator iter; 1429 while(iter.next_alive()) { 1430 nmethod* nm = iter.method(); 1431 ResourceMark rm; 1432 char *method_name = nm->method()->name_and_sig_as_C_string(); 1433 st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]", 1434 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1435 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1436 } 1437 } 1438 1439 void CodeCache::print_layout(outputStream* st) { 1440 assert_locked_or_safepoint(CodeCache_lock); 1441 ResourceMark rm; 1442 1443 print_summary(st, true); 1444 } 1445 1446 void CodeCache::log_state(outputStream* st) { 1447 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1448 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1449 nof_blobs(), nof_nmethods(), nof_adapters(), 1450 unallocated_capacity()); 1451 }