1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/gcLocker.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/icache.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/compilationPolicy.hpp" 48 #include "services/memoryService.hpp" 49 #include "trace/tracing.hpp" 50 #include "utilities/xmlstream.hpp" 51 #ifdef COMPILER1 52 #include "c1/c1_Compilation.hpp" 53 #include "c1/c1_Compiler.hpp" 54 #endif 55 #ifdef COMPILER2 56 #include "opto/c2compiler.hpp" 57 #include "opto/compile.hpp" 58 #include "opto/node.hpp" 59 #endif 60 61 // Helper class for printing in CodeCache 62 class CodeBlob_sizes { 63 private: 64 int count; 65 int total_size; 66 int header_size; 67 int code_size; 68 int stub_size; 69 int relocation_size; 70 int scopes_oop_size; 71 int scopes_metadata_size; 72 int scopes_data_size; 73 int scopes_pcs_size; 74 75 public: 76 CodeBlob_sizes() { 77 count = 0; 78 total_size = 0; 79 header_size = 0; 80 code_size = 0; 81 stub_size = 0; 82 relocation_size = 0; 83 scopes_oop_size = 0; 84 scopes_metadata_size = 0; 85 scopes_data_size = 0; 86 scopes_pcs_size = 0; 87 } 88 89 int total() { return total_size; } 90 bool is_empty() { return count == 0; } 91 92 void print(const char* title) { 93 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 94 count, 95 title, 96 (int)(total() / K), 97 header_size * 100 / total_size, 98 relocation_size * 100 / total_size, 99 code_size * 100 / total_size, 100 stub_size * 100 / total_size, 101 scopes_oop_size * 100 / total_size, 102 scopes_metadata_size * 100 / total_size, 103 scopes_data_size * 100 / total_size, 104 scopes_pcs_size * 100 / total_size); 105 } 106 107 void add(CodeBlob* cb) { 108 count++; 109 total_size += cb->size(); 110 header_size += cb->header_size(); 111 relocation_size += cb->relocation_size(); 112 if (cb->is_nmethod()) { 113 nmethod* nm = cb->as_nmethod_or_null(); 114 code_size += nm->insts_size(); 115 stub_size += nm->stub_size(); 116 117 scopes_oop_size += nm->oops_size(); 118 scopes_metadata_size += nm->metadata_size(); 119 scopes_data_size += nm->scopes_data_size(); 120 scopes_pcs_size += nm->scopes_pcs_size(); 121 } else { 122 code_size += cb->code_size(); 123 } 124 } 125 }; 126 127 // Iterate over all CodeHeaps 128 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 129 // Iterate over all CodeBlobs (cb) on the given CodeHeap 130 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 131 132 address CodeCache::_low_bound = 0; 133 address CodeCache::_high_bound = 0; 134 int CodeCache::_number_of_blobs = 0; 135 int CodeCache::_number_of_adapters = 0; 136 int CodeCache::_number_of_nmethods = 0; 137 int CodeCache::_number_of_nmethods_with_dependencies = 0; 138 bool CodeCache::_needs_cache_clean = false; 139 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 140 int CodeCache::_codemem_full_count = 0; 141 142 // Initialize array of CodeHeaps 143 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 144 145 void CodeCache::initialize_heaps() { 146 // Determine size of compiler buffers 147 size_t code_buffers_size = 0; 148 #ifdef COMPILER1 149 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 150 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 151 code_buffers_size += c1_count * Compiler::code_buffer_size(); 152 #endif 153 #ifdef COMPILER2 154 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 155 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 156 // Initial size of constant table (this may be increased if a compiled method needs more space) 157 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 158 #endif 159 160 // Calculate default CodeHeap sizes if not set by user 161 if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize) 162 && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) { 163 // Increase default NonNMethodCodeHeapSize to account for compiler buffers 164 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size); 165 166 // Check if we have enough space for the non-nmethod code heap 167 if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) { 168 // Use the default value for NonNMethodCodeHeapSize and one half of the 169 // remaining size for non-profiled methods and one half for profiled methods 170 size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize; 171 size_t profiled_size = remaining_size / 2; 172 size_t non_profiled_size = remaining_size - profiled_size; 173 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 174 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 175 } else { 176 // Use all space for the non-nmethod heap and set other heaps to minimal size 177 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); 178 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size()); 179 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size()); 180 } 181 } 182 183 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 184 if(!heap_available(CodeBlobType::MethodProfiled)) { 185 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); 186 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 187 } 188 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 189 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 190 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); 191 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 192 } 193 194 // Make sure we have enough space for VM internal code 195 uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace; 196 if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { 197 vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); 198 } 199 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); 200 201 // Align reserved sizes of CodeHeaps 202 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize); 203 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize); 204 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize); 205 206 // Compute initial sizes of CodeHeaps 207 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size); 208 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size); 209 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size); 210 211 // Reserve one continuous chunk of memory for CodeHeaps and split it into 212 // parts for the individual heaps. The memory layout looks like this: 213 // ---------- high ----------- 214 // Non-profiled nmethods 215 // Profiled nmethods 216 // Non-nmethods 217 // ---------- low ------------ 218 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size); 219 ReservedSpace non_method_space = rs.first_part(non_method_size); 220 ReservedSpace rest = rs.last_part(non_method_size); 221 ReservedSpace profiled_space = rest.first_part(profiled_size); 222 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 223 224 // Non-nmethods (stubs, adapters, ...) 225 add_heap(non_method_space, "CodeHeap 'non-nmethods'", init_non_method_size, CodeBlobType::NonNMethod); 226 // Tier 2 and tier 3 (profiled) methods 227 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled); 228 // Tier 1 and tier 4 (non-profiled) methods and native methods 229 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled); 230 } 231 232 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 233 // Determine alignment 234 const size_t page_size = os::can_execute_large_page_memory() ? 235 MIN2(os::page_size_for_region(InitialCodeCacheSize, 8), 236 os::page_size_for_region(size, 8)) : 237 os::vm_page_size(); 238 const size_t granularity = os::vm_allocation_granularity(); 239 const size_t r_align = MAX2(page_size, granularity); 240 const size_t r_size = align_size_up(size, r_align); 241 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 242 MAX2(page_size, granularity); 243 244 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 245 246 // Initialize bounds 247 _low_bound = (address)rs.base(); 248 _high_bound = _low_bound + rs.size(); 249 250 return rs; 251 } 252 253 bool CodeCache::heap_available(int code_blob_type) { 254 if (!SegmentedCodeCache) { 255 // No segmentation: use a single code heap 256 return (code_blob_type == CodeBlobType::All); 257 } else if (Arguments::mode() == Arguments::_int) { 258 // Interpreter only: we don't need any method code heaps 259 return (code_blob_type == CodeBlobType::NonNMethod); 260 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 261 // Tiered compilation: use all code heaps 262 return (code_blob_type < CodeBlobType::All); 263 } else { 264 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 265 return (code_blob_type == CodeBlobType::NonNMethod) || 266 (code_blob_type == CodeBlobType::MethodNonProfiled); 267 } 268 } 269 270 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) { 271 // Check if heap is needed 272 if (!heap_available(code_blob_type)) { 273 return; 274 } 275 276 // Create CodeHeap 277 CodeHeap* heap = new CodeHeap(name, code_blob_type); 278 _heaps->append(heap); 279 280 // Reserve Space 281 size_initial = round_to(size_initial, os::vm_page_size()); 282 283 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 284 vm_exit_during_initialization("Could not reserve enough space for code cache"); 285 } 286 287 // Register the CodeHeap 288 MemoryService::add_code_heap_memory_pool(heap, name); 289 } 290 291 CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) { 292 assert(cb != NULL, "CodeBlob is null"); 293 FOR_ALL_HEAPS(heap) { 294 if ((*heap)->contains(cb)) { 295 return *heap; 296 } 297 } 298 ShouldNotReachHere(); 299 return NULL; 300 } 301 302 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 303 FOR_ALL_HEAPS(heap) { 304 if ((*heap)->accepts(code_blob_type)) { 305 return *heap; 306 } 307 } 308 return NULL; 309 } 310 311 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 312 assert_locked_or_safepoint(CodeCache_lock); 313 assert(heap != NULL, "heap is null"); 314 return (CodeBlob*)heap->first(); 315 } 316 317 CodeBlob* CodeCache::first_blob(int code_blob_type) { 318 if (heap_available(code_blob_type)) { 319 return first_blob(get_code_heap(code_blob_type)); 320 } else { 321 return NULL; 322 } 323 } 324 325 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 326 assert_locked_or_safepoint(CodeCache_lock); 327 assert(heap != NULL, "heap is null"); 328 return (CodeBlob*)heap->next(cb); 329 } 330 331 CodeBlob* CodeCache::next_blob(CodeBlob* cb) { 332 return next_blob(get_code_heap(cb), cb); 333 } 334 335 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { 336 // Do not seize the CodeCache lock here--if the caller has not 337 // already done so, we are going to lose bigtime, since the code 338 // cache will contain a garbage CodeBlob until the caller can 339 // run the constructor for the CodeBlob subclass he is busy 340 // instantiating. 341 assert_locked_or_safepoint(CodeCache_lock); 342 assert(size > 0, "allocation request must be reasonable"); 343 if (size <= 0) { 344 return NULL; 345 } 346 CodeBlob* cb = NULL; 347 348 // Get CodeHeap for the given CodeBlobType 349 CodeHeap* heap = get_code_heap(code_blob_type); 350 assert(heap != NULL, "heap is null"); 351 352 while (true) { 353 cb = (CodeBlob*)heap->allocate(size, is_critical); 354 if (cb != NULL) break; 355 if (!heap->expand_by(CodeCacheExpansionSize)) { 356 // Expansion failed 357 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) { 358 // Fallback solution: Store non-nmethod code in the non-profiled code heap 359 return allocate(size, CodeBlobType::MethodNonProfiled, is_critical); 360 } 361 return NULL; 362 } 363 if (PrintCodeCacheExtension) { 364 ResourceMark rm; 365 if (SegmentedCodeCache) { 366 tty->print("%s", heap->name()); 367 } else { 368 tty->print("CodeCache"); 369 } 370 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 371 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 372 (address)heap->high() - (address)heap->low_boundary()); 373 } 374 } 375 print_trace("allocation", cb, size); 376 _number_of_blobs++; 377 return cb; 378 } 379 380 void CodeCache::free(CodeBlob* cb) { 381 assert_locked_or_safepoint(CodeCache_lock); 382 383 print_trace("free", cb); 384 if (cb->is_nmethod()) { 385 _number_of_nmethods--; 386 if (((nmethod *)cb)->has_dependencies()) { 387 _number_of_nmethods_with_dependencies--; 388 } 389 } 390 if (cb->is_adapter_blob()) { 391 _number_of_adapters--; 392 } 393 _number_of_blobs--; 394 395 // Get heap for given CodeBlob and deallocate 396 get_code_heap(cb)->deallocate(cb); 397 398 assert(_number_of_blobs >= 0, "sanity check"); 399 } 400 401 void CodeCache::commit(CodeBlob* cb) { 402 // this is called by nmethod::nmethod, which must already own CodeCache_lock 403 assert_locked_or_safepoint(CodeCache_lock); 404 if (cb->is_nmethod()) { 405 _number_of_nmethods++; 406 if (((nmethod *)cb)->has_dependencies()) { 407 _number_of_nmethods_with_dependencies++; 408 } 409 } 410 if (cb->is_adapter_blob()) { 411 _number_of_adapters++; 412 } 413 414 // flush the hardware I-cache 415 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 416 } 417 418 bool CodeCache::contains(void *p) { 419 // It should be ok to call contains without holding a lock 420 FOR_ALL_HEAPS(heap) { 421 if ((*heap)->contains(p)) { 422 return true; 423 } 424 } 425 return false; 426 } 427 428 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 429 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 430 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 431 CodeBlob* CodeCache::find_blob(void* start) { 432 CodeBlob* result = find_blob_unsafe(start); 433 // We could potentially look up non_entrant methods 434 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 435 return result; 436 } 437 438 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 439 // what you are doing) 440 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 441 // NMT can walk the stack before code cache is created 442 if (_heaps == NULL || _heaps->is_empty()) return NULL; 443 444 FOR_ALL_HEAPS(heap) { 445 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); 446 if (result != NULL && result->blob_contains((address)start)) { 447 return result; 448 } 449 } 450 return NULL; 451 } 452 453 nmethod* CodeCache::find_nmethod(void* start) { 454 CodeBlob* cb = find_blob(start); 455 assert(cb->is_nmethod(), "did not find an nmethod"); 456 return (nmethod*)cb; 457 } 458 459 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 460 assert_locked_or_safepoint(CodeCache_lock); 461 FOR_ALL_HEAPS(heap) { 462 FOR_ALL_BLOBS(cb, *heap) { 463 f(cb); 464 } 465 } 466 } 467 468 void CodeCache::nmethods_do(void f(nmethod* nm)) { 469 assert_locked_or_safepoint(CodeCache_lock); 470 NMethodIterator iter; 471 while(iter.next()) { 472 f(iter.method()); 473 } 474 } 475 476 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 477 assert_locked_or_safepoint(CodeCache_lock); 478 NMethodIterator iter; 479 while(iter.next_alive()) { 480 f(iter.method()); 481 } 482 } 483 484 int CodeCache::alignment_unit() { 485 return (int)_heaps->first()->alignment_unit(); 486 } 487 488 int CodeCache::alignment_offset() { 489 return (int)_heaps->first()->alignment_offset(); 490 } 491 492 // Mark nmethods for unloading if they contain otherwise unreachable oops. 493 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 494 assert_locked_or_safepoint(CodeCache_lock); 495 NMethodIterator iter; 496 while(iter.next_alive()) { 497 iter.method()->do_unloading(is_alive, unloading_occurred); 498 } 499 } 500 501 void CodeCache::blobs_do(CodeBlobClosure* f) { 502 assert_locked_or_safepoint(CodeCache_lock); 503 FOR_ALL_HEAPS(heap) { 504 FOR_ALL_BLOBS(cb, *heap) { 505 if (cb->is_alive()) { 506 f->do_code_blob(cb); 507 508 #ifdef ASSERT 509 if (cb->is_nmethod()) 510 ((nmethod*)cb)->verify_scavenge_root_oops(); 511 #endif //ASSERT 512 } 513 } 514 } 515 } 516 517 // Walk the list of methods which might contain non-perm oops. 518 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 519 assert_locked_or_safepoint(CodeCache_lock); 520 521 if (UseG1GC) { 522 return; 523 } 524 525 debug_only(mark_scavenge_root_nmethods()); 526 527 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 528 debug_only(cur->clear_scavenge_root_marked()); 529 assert(cur->scavenge_root_not_marked(), ""); 530 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 531 532 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 533 #ifndef PRODUCT 534 if (TraceScavenge) { 535 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 536 } 537 #endif //PRODUCT 538 if (is_live) { 539 // Perform cur->oops_do(f), maybe just once per nmethod. 540 f->do_code_blob(cur); 541 } 542 } 543 544 // Check for stray marks. 545 debug_only(verify_perm_nmethods(NULL)); 546 } 547 548 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 549 assert_locked_or_safepoint(CodeCache_lock); 550 551 if (UseG1GC) { 552 return; 553 } 554 555 nm->set_on_scavenge_root_list(); 556 nm->set_scavenge_root_link(_scavenge_root_nmethods); 557 set_scavenge_root_nmethods(nm); 558 print_trace("add_scavenge_root", nm); 559 } 560 561 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 562 assert_locked_or_safepoint(CodeCache_lock); 563 564 if (UseG1GC) { 565 return; 566 } 567 568 print_trace("drop_scavenge_root", nm); 569 nmethod* last = NULL; 570 nmethod* cur = scavenge_root_nmethods(); 571 while (cur != NULL) { 572 nmethod* next = cur->scavenge_root_link(); 573 if (cur == nm) { 574 if (last != NULL) 575 last->set_scavenge_root_link(next); 576 else set_scavenge_root_nmethods(next); 577 nm->set_scavenge_root_link(NULL); 578 nm->clear_on_scavenge_root_list(); 579 return; 580 } 581 last = cur; 582 cur = next; 583 } 584 assert(false, "should have been on list"); 585 } 586 587 void CodeCache::prune_scavenge_root_nmethods() { 588 assert_locked_or_safepoint(CodeCache_lock); 589 590 if (UseG1GC) { 591 return; 592 } 593 594 debug_only(mark_scavenge_root_nmethods()); 595 596 nmethod* last = NULL; 597 nmethod* cur = scavenge_root_nmethods(); 598 while (cur != NULL) { 599 nmethod* next = cur->scavenge_root_link(); 600 debug_only(cur->clear_scavenge_root_marked()); 601 assert(cur->scavenge_root_not_marked(), ""); 602 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 603 604 if (!cur->is_zombie() && !cur->is_unloaded() 605 && cur->detect_scavenge_root_oops()) { 606 // Keep it. Advance 'last' to prevent deletion. 607 last = cur; 608 } else { 609 // Prune it from the list, so we don't have to look at it any more. 610 print_trace("prune_scavenge_root", cur); 611 cur->set_scavenge_root_link(NULL); 612 cur->clear_on_scavenge_root_list(); 613 if (last != NULL) 614 last->set_scavenge_root_link(next); 615 else set_scavenge_root_nmethods(next); 616 } 617 cur = next; 618 } 619 620 // Check for stray marks. 621 debug_only(verify_perm_nmethods(NULL)); 622 } 623 624 #ifndef PRODUCT 625 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 626 if (UseG1GC) { 627 return; 628 } 629 630 // While we are here, verify the integrity of the list. 631 mark_scavenge_root_nmethods(); 632 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 633 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 634 cur->clear_scavenge_root_marked(); 635 } 636 verify_perm_nmethods(f); 637 } 638 639 // Temporarily mark nmethods that are claimed to be on the non-perm list. 640 void CodeCache::mark_scavenge_root_nmethods() { 641 NMethodIterator iter; 642 while(iter.next_alive()) { 643 nmethod* nm = iter.method(); 644 assert(nm->scavenge_root_not_marked(), "clean state"); 645 if (nm->on_scavenge_root_list()) 646 nm->set_scavenge_root_marked(); 647 } 648 } 649 650 // If the closure is given, run it on the unlisted nmethods. 651 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 652 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 653 NMethodIterator iter; 654 while(iter.next_alive()) { 655 nmethod* nm = iter.method(); 656 bool call_f = (f_or_null != NULL); 657 assert(nm->scavenge_root_not_marked(), "must be already processed"); 658 if (nm->on_scavenge_root_list()) 659 call_f = false; // don't show this one to the client 660 nm->verify_scavenge_root_oops(); 661 if (call_f) f_or_null->do_code_blob(nm); 662 } 663 } 664 #endif //PRODUCT 665 666 void CodeCache::verify_clean_inline_caches() { 667 #ifdef ASSERT 668 NMethodIterator iter; 669 while(iter.next_alive()) { 670 nmethod* nm = iter.method(); 671 assert(!nm->is_unloaded(), "Tautology"); 672 nm->verify_clean_inline_caches(); 673 nm->verify(); 674 } 675 #endif 676 } 677 678 void CodeCache::verify_icholder_relocations() { 679 #ifdef ASSERT 680 // make sure that we aren't leaking icholders 681 int count = 0; 682 FOR_ALL_HEAPS(heap) { 683 FOR_ALL_BLOBS(cb, *heap) { 684 if (cb->is_nmethod()) { 685 nmethod* nm = (nmethod*)cb; 686 count += nm->verify_icholder_relocations(); 687 } 688 } 689 } 690 691 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 692 CompiledICHolder::live_count(), "must agree"); 693 #endif 694 } 695 696 void CodeCache::gc_prologue() { 697 } 698 699 void CodeCache::gc_epilogue() { 700 assert_locked_or_safepoint(CodeCache_lock); 701 NMethodIterator iter; 702 while(iter.next_alive()) { 703 nmethod* nm = iter.method(); 704 assert(!nm->is_unloaded(), "Tautology"); 705 if (needs_cache_clean()) { 706 nm->cleanup_inline_caches(); 707 } 708 DEBUG_ONLY(nm->verify()); 709 DEBUG_ONLY(nm->verify_oop_relocations()); 710 } 711 set_needs_cache_clean(false); 712 prune_scavenge_root_nmethods(); 713 714 verify_icholder_relocations(); 715 } 716 717 void CodeCache::verify_oops() { 718 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 719 VerifyOopClosure voc; 720 NMethodIterator iter; 721 while(iter.next_alive()) { 722 nmethod* nm = iter.method(); 723 nm->oops_do(&voc); 724 nm->verify_oop_relocations(); 725 } 726 } 727 728 size_t CodeCache::capacity() { 729 size_t cap = 0; 730 FOR_ALL_HEAPS(heap) { 731 cap += (*heap)->capacity(); 732 } 733 return cap; 734 } 735 736 size_t CodeCache::unallocated_capacity(int code_blob_type) { 737 CodeHeap* heap = get_code_heap(code_blob_type); 738 return (heap != NULL) ? heap->unallocated_capacity() : 0; 739 } 740 741 size_t CodeCache::unallocated_capacity() { 742 size_t unallocated_cap = 0; 743 FOR_ALL_HEAPS(heap) { 744 unallocated_cap += (*heap)->unallocated_capacity(); 745 } 746 return unallocated_cap; 747 } 748 749 size_t CodeCache::max_capacity() { 750 size_t max_cap = 0; 751 FOR_ALL_HEAPS(heap) { 752 max_cap += (*heap)->max_capacity(); 753 } 754 return max_cap; 755 } 756 757 /** 758 * Returns true if a CodeHeap is full and sets code_blob_type accordingly. 759 */ 760 bool CodeCache::is_full(int* code_blob_type) { 761 FOR_ALL_HEAPS(heap_iterator) { 762 CodeHeap* heap = *heap_iterator; 763 // Do not check the non-nmethod code heap because we can store 764 // non-nmethod code in the non-profiled code heap (see comment 765 // 'fallback solution' in CodeCache::allocate). 766 if (heap->code_blob_type() != CodeBlobType::NonNMethod && 767 heap->unallocated_capacity() < CodeCacheMinimumFreeSpace) { 768 *code_blob_type = heap->code_blob_type(); 769 return true; 770 } 771 } 772 return false; 773 } 774 775 /** 776 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 777 * is free, reverse_free_ratio() returns 4. 778 */ 779 double CodeCache::reverse_free_ratio(int code_blob_type) { 780 CodeHeap* heap = get_code_heap(code_blob_type); 781 if (heap == NULL) { 782 return 0; 783 } 784 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); 785 double max_capacity = (double)heap->max_capacity(); 786 return max_capacity / unallocated_capacity; 787 } 788 789 size_t CodeCache::bytes_allocated_in_freelists() { 790 size_t allocated_bytes = 0; 791 FOR_ALL_HEAPS(heap) { 792 allocated_bytes += (*heap)->allocated_in_freelist(); 793 } 794 return allocated_bytes; 795 } 796 797 int CodeCache::allocated_segments() { 798 int number_of_segments = 0; 799 FOR_ALL_HEAPS(heap) { 800 number_of_segments += (*heap)->allocated_segments(); 801 } 802 return number_of_segments; 803 } 804 805 size_t CodeCache::freelists_length() { 806 size_t length = 0; 807 FOR_ALL_HEAPS(heap) { 808 length += (*heap)->freelist_length(); 809 } 810 return length; 811 } 812 813 void icache_init(); 814 815 void CodeCache::initialize() { 816 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 817 #ifdef COMPILER2 818 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 819 #endif 820 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 821 // This was originally just a check of the alignment, causing failure, instead, round 822 // the code cache to the page size. In particular, Solaris is moving to a larger 823 // default page size. 824 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 825 826 if (SegmentedCodeCache) { 827 // Use multiple code heaps 828 initialize_heaps(); 829 } else { 830 // Use a single code heap 831 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 832 add_heap(rs, "CodeCache", InitialCodeCacheSize, CodeBlobType::All); 833 } 834 835 // Initialize ICache flush mechanism 836 // This service is needed for os::register_code_area 837 icache_init(); 838 839 // Give OS a chance to register generated code area. 840 // This is used on Windows 64 bit platforms to register 841 // Structured Exception Handlers for our generated code. 842 os::register_code_area((char*)low_bound(), (char*)high_bound()); 843 } 844 845 void codeCache_init() { 846 CodeCache::initialize(); 847 } 848 849 //------------------------------------------------------------------------------------------------ 850 851 int CodeCache::number_of_nmethods_with_dependencies() { 852 return _number_of_nmethods_with_dependencies; 853 } 854 855 void CodeCache::clear_inline_caches() { 856 assert_locked_or_safepoint(CodeCache_lock); 857 NMethodIterator iter; 858 while(iter.next_alive()) { 859 iter.method()->clear_inline_caches(); 860 } 861 } 862 863 // Keeps track of time spent for checking dependencies 864 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 865 866 int CodeCache::mark_for_deoptimization(DepChange& changes) { 867 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 868 int number_of_marked_CodeBlobs = 0; 869 870 // search the hierarchy looking for nmethods which are affected by the loading of this class 871 872 // then search the interfaces this class implements looking for nmethods 873 // which might be dependent of the fact that an interface only had one 874 // implementor. 875 // nmethod::check_all_dependencies works only correctly, if no safepoint 876 // can happen 877 No_Safepoint_Verifier nsv; 878 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 879 Klass* d = str.klass(); 880 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 881 } 882 883 #ifndef PRODUCT 884 if (VerifyDependencies) { 885 // Object pointers are used as unique identifiers for dependency arguments. This 886 // is only possible if no safepoint, i.e., GC occurs during the verification code. 887 dependentCheckTime.start(); 888 nmethod::check_all_dependencies(changes); 889 dependentCheckTime.stop(); 890 } 891 #endif 892 893 return number_of_marked_CodeBlobs; 894 } 895 896 897 #ifdef HOTSWAP 898 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 899 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 900 int number_of_marked_CodeBlobs = 0; 901 902 // Deoptimize all methods of the evolving class itself 903 Array<Method*>* old_methods = dependee->methods(); 904 for (int i = 0; i < old_methods->length(); i++) { 905 ResourceMark rm; 906 Method* old_method = old_methods->at(i); 907 nmethod *nm = old_method->code(); 908 if (nm != NULL) { 909 nm->mark_for_deoptimization(); 910 number_of_marked_CodeBlobs++; 911 } 912 } 913 914 NMethodIterator iter; 915 while(iter.next_alive()) { 916 nmethod* nm = iter.method(); 917 if (nm->is_marked_for_deoptimization()) { 918 // ...Already marked in the previous pass; don't count it again. 919 } else if (nm->is_evol_dependent_on(dependee())) { 920 ResourceMark rm; 921 nm->mark_for_deoptimization(); 922 number_of_marked_CodeBlobs++; 923 } else { 924 // flush caches in case they refer to a redefined Method* 925 nm->clear_inline_caches(); 926 } 927 } 928 929 return number_of_marked_CodeBlobs; 930 } 931 #endif // HOTSWAP 932 933 934 // Deoptimize all methods 935 void CodeCache::mark_all_nmethods_for_deoptimization() { 936 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 937 NMethodIterator iter; 938 while(iter.next_alive()) { 939 nmethod* nm = iter.method(); 940 if (!nm->method()->is_method_handle_intrinsic()) { 941 nm->mark_for_deoptimization(); 942 } 943 } 944 } 945 946 int CodeCache::mark_for_deoptimization(Method* dependee) { 947 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 948 int number_of_marked_CodeBlobs = 0; 949 950 NMethodIterator iter; 951 while(iter.next_alive()) { 952 nmethod* nm = iter.method(); 953 if (nm->is_dependent_on_method(dependee)) { 954 ResourceMark rm; 955 nm->mark_for_deoptimization(); 956 number_of_marked_CodeBlobs++; 957 } 958 } 959 960 return number_of_marked_CodeBlobs; 961 } 962 963 void CodeCache::make_marked_nmethods_zombies() { 964 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 965 NMethodIterator iter; 966 while(iter.next_alive()) { 967 nmethod* nm = iter.method(); 968 if (nm->is_marked_for_deoptimization()) { 969 970 // If the nmethod has already been made non-entrant and it can be converted 971 // then zombie it now. Otherwise make it non-entrant and it will eventually 972 // be zombied when it is no longer seen on the stack. Note that the nmethod 973 // might be "entrant" and not on the stack and so could be zombied immediately 974 // but we can't tell because we don't track it on stack until it becomes 975 // non-entrant. 976 977 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 978 nm->make_zombie(); 979 } else { 980 nm->make_not_entrant(); 981 } 982 } 983 } 984 } 985 986 void CodeCache::make_marked_nmethods_not_entrant() { 987 assert_locked_or_safepoint(CodeCache_lock); 988 NMethodIterator iter; 989 while(iter.next_alive()) { 990 nmethod* nm = iter.method(); 991 if (nm->is_marked_for_deoptimization()) { 992 nm->make_not_entrant(); 993 } 994 } 995 } 996 997 void CodeCache::verify() { 998 assert_locked_or_safepoint(CodeCache_lock); 999 FOR_ALL_HEAPS(heap) { 1000 (*heap)->verify(); 1001 FOR_ALL_BLOBS(cb, *heap) { 1002 if (cb->is_alive()) { 1003 cb->verify(); 1004 } 1005 } 1006 } 1007 } 1008 1009 // A CodeHeap is full. Print out warning and report event. 1010 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1011 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1012 CodeHeap* heap = get_code_heap(code_blob_type); 1013 assert(heap != NULL, "heap is null"); 1014 1015 if (!heap->was_full() || print) { 1016 // Not yet reported for this heap, report 1017 heap->report_full(); 1018 if (SegmentedCodeCache) { 1019 warning("%s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type)); 1020 warning("Try increasing the code heap size using -XX:%s=", 1021 (code_blob_type == CodeBlobType::MethodProfiled) ? "ProfiledCodeHeapSize" : "NonProfiledCodeHeapSize"); 1022 } else { 1023 warning("CodeCache is full. Compiler has been disabled."); 1024 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 1025 } 1026 ResourceMark rm; 1027 stringStream s; 1028 // Dump code cache into a buffer before locking the tty, 1029 { 1030 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1031 print_summary(&s); 1032 } 1033 ttyLocker ttyl; 1034 tty->print("%s", s.as_string()); 1035 } 1036 1037 _codemem_full_count++; 1038 EventCodeCacheFull event; 1039 if (event.should_commit()) { 1040 event.set_codeBlobType((u1)code_blob_type); 1041 event.set_startAddress((u8)heap->low_boundary()); 1042 event.set_commitedTopAddress((u8)heap->high()); 1043 event.set_reservedTopAddress((u8)heap->high_boundary()); 1044 event.set_entryCount(nof_blobs()); 1045 event.set_methodCount(nof_nmethods()); 1046 event.set_adaptorCount(nof_adapters()); 1047 event.set_unallocatedCapacity(heap->unallocated_capacity()/K); 1048 event.set_fullCount(_codemem_full_count); 1049 event.commit(); 1050 } 1051 } 1052 1053 void CodeCache::print_memory_overhead() { 1054 size_t wasted_bytes = 0; 1055 FOR_ALL_HEAPS(heap) { 1056 CodeHeap* curr_heap = *heap; 1057 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1058 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1059 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1060 } 1061 } 1062 // Print bytes that are allocated in the freelist 1063 ttyLocker ttl; 1064 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1065 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1066 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1067 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1068 } 1069 1070 //------------------------------------------------------------------------------------------------ 1071 // Non-product version 1072 1073 #ifndef PRODUCT 1074 1075 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1076 if (PrintCodeCache2) { // Need to add a new flag 1077 ResourceMark rm; 1078 if (size == 0) size = cb->size(); 1079 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1080 } 1081 } 1082 1083 void CodeCache::print_internals() { 1084 int nmethodCount = 0; 1085 int runtimeStubCount = 0; 1086 int adapterCount = 0; 1087 int deoptimizationStubCount = 0; 1088 int uncommonTrapStubCount = 0; 1089 int bufferBlobCount = 0; 1090 int total = 0; 1091 int nmethodAlive = 0; 1092 int nmethodNotEntrant = 0; 1093 int nmethodZombie = 0; 1094 int nmethodUnloaded = 0; 1095 int nmethodJava = 0; 1096 int nmethodNative = 0; 1097 int max_nm_size = 0; 1098 ResourceMark rm; 1099 1100 int i = 0; 1101 FOR_ALL_HEAPS(heap) { 1102 if (SegmentedCodeCache && Verbose) { 1103 tty->print_cr("-- %s --", (*heap)->name()); 1104 } 1105 FOR_ALL_BLOBS(cb, *heap) { 1106 total++; 1107 if (cb->is_nmethod()) { 1108 nmethod* nm = (nmethod*)cb; 1109 1110 if (Verbose && nm->method() != NULL) { 1111 ResourceMark rm; 1112 char *method_name = nm->method()->name_and_sig_as_C_string(); 1113 tty->print("%s", method_name); 1114 if(nm->is_alive()) { tty->print_cr(" alive"); } 1115 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1116 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1117 } 1118 1119 nmethodCount++; 1120 1121 if(nm->is_alive()) { nmethodAlive++; } 1122 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1123 if(nm->is_zombie()) { nmethodZombie++; } 1124 if(nm->is_unloaded()) { nmethodUnloaded++; } 1125 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1126 1127 if(nm->method() != NULL && nm->is_java_method()) { 1128 nmethodJava++; 1129 max_nm_size = MAX2(max_nm_size, nm->size()); 1130 } 1131 } else if (cb->is_runtime_stub()) { 1132 runtimeStubCount++; 1133 } else if (cb->is_deoptimization_stub()) { 1134 deoptimizationStubCount++; 1135 } else if (cb->is_uncommon_trap_stub()) { 1136 uncommonTrapStubCount++; 1137 } else if (cb->is_adapter_blob()) { 1138 adapterCount++; 1139 } else if (cb->is_buffer_blob()) { 1140 bufferBlobCount++; 1141 } 1142 } 1143 } 1144 1145 int bucketSize = 512; 1146 int bucketLimit = max_nm_size / bucketSize + 1; 1147 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1148 memset(buckets, 0, sizeof(int) * bucketLimit); 1149 1150 NMethodIterator iter; 1151 while(iter.next()) { 1152 nmethod* nm = iter.method(); 1153 if(nm->method() != NULL && nm->is_java_method()) { 1154 buckets[nm->size() / bucketSize]++; 1155 } 1156 } 1157 1158 tty->print_cr("Code Cache Entries (total of %d)",total); 1159 tty->print_cr("-------------------------------------------------"); 1160 tty->print_cr("nmethods: %d",nmethodCount); 1161 tty->print_cr("\talive: %d",nmethodAlive); 1162 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1163 tty->print_cr("\tzombie: %d",nmethodZombie); 1164 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1165 tty->print_cr("\tjava: %d",nmethodJava); 1166 tty->print_cr("\tnative: %d",nmethodNative); 1167 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1168 tty->print_cr("adapters: %d",adapterCount); 1169 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1170 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1171 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1172 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1173 tty->print_cr("-------------------------------------------------"); 1174 1175 for(int i=0; i<bucketLimit; i++) { 1176 if(buckets[i] != 0) { 1177 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1178 tty->fill_to(40); 1179 tty->print_cr("%d",buckets[i]); 1180 } 1181 } 1182 1183 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 1184 print_memory_overhead(); 1185 } 1186 1187 #endif // !PRODUCT 1188 1189 void CodeCache::print() { 1190 print_summary(tty); 1191 1192 #ifndef PRODUCT 1193 if (!Verbose) return; 1194 1195 CodeBlob_sizes live; 1196 CodeBlob_sizes dead; 1197 1198 FOR_ALL_HEAPS(heap) { 1199 FOR_ALL_BLOBS(cb, *heap) { 1200 if (!cb->is_alive()) { 1201 dead.add(cb); 1202 } else { 1203 live.add(cb); 1204 } 1205 } 1206 } 1207 1208 tty->print_cr("CodeCache:"); 1209 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1210 1211 if (!live.is_empty()) { 1212 live.print("live"); 1213 } 1214 if (!dead.is_empty()) { 1215 dead.print("dead"); 1216 } 1217 1218 if (WizardMode) { 1219 // print the oop_map usage 1220 int code_size = 0; 1221 int number_of_blobs = 0; 1222 int number_of_oop_maps = 0; 1223 int map_size = 0; 1224 FOR_ALL_HEAPS(heap) { 1225 FOR_ALL_BLOBS(cb, *heap) { 1226 if (cb->is_alive()) { 1227 number_of_blobs++; 1228 code_size += cb->code_size(); 1229 OopMapSet* set = cb->oop_maps(); 1230 if (set != NULL) { 1231 number_of_oop_maps += set->size(); 1232 map_size += set->heap_size(); 1233 } 1234 } 1235 } 1236 } 1237 tty->print_cr("OopMaps"); 1238 tty->print_cr(" #blobs = %d", number_of_blobs); 1239 tty->print_cr(" code size = %d", code_size); 1240 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1241 tty->print_cr(" map size = %d", map_size); 1242 } 1243 1244 #endif // !PRODUCT 1245 } 1246 1247 void CodeCache::print_summary(outputStream* st, bool detailed) { 1248 FOR_ALL_HEAPS(heap_iterator) { 1249 CodeHeap* heap = (*heap_iterator); 1250 size_t total = (heap->high_boundary() - heap->low_boundary()); 1251 if (SegmentedCodeCache) { 1252 st->print("%s:", heap->name()); 1253 } else { 1254 st->print("CodeCache:"); 1255 } 1256 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1257 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1258 total/K, (total - heap->unallocated_capacity())/K, 1259 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1260 1261 if (detailed) { 1262 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1263 p2i(heap->low_boundary()), 1264 p2i(heap->high()), 1265 p2i(heap->high_boundary())); 1266 } 1267 } 1268 1269 if (detailed) { 1270 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1271 " adapters=" UINT32_FORMAT, 1272 nof_blobs(), nof_nmethods(), nof_adapters()); 1273 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1274 "enabled" : Arguments::mode() == Arguments::_int ? 1275 "disabled (interpreter mode)" : 1276 "disabled (not enough contiguous free space left)"); 1277 } 1278 } 1279 1280 void CodeCache::print_codelist(outputStream* st) { 1281 assert_locked_or_safepoint(CodeCache_lock); 1282 1283 NMethodIterator iter; 1284 while(iter.next_alive()) { 1285 nmethod* nm = iter.method(); 1286 ResourceMark rm; 1287 char *method_name = nm->method()->name_and_sig_as_C_string(); 1288 st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]", 1289 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1290 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1291 } 1292 } 1293 1294 void CodeCache::print_layout(outputStream* st) { 1295 assert_locked_or_safepoint(CodeCache_lock); 1296 ResourceMark rm; 1297 1298 print_summary(st, true); 1299 } 1300 1301 void CodeCache::log_state(outputStream* st) { 1302 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1303 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1304 nof_blobs(), nof_nmethods(), nof_adapters(), 1305 unallocated_capacity()); 1306 }