1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/gcLocker.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/icache.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/sweeper.hpp" 48 #include "runtime/compilationPolicy.hpp" 49 #include "services/memoryService.hpp" 50 #include "trace/tracing.hpp" 51 #include "utilities/xmlstream.hpp" 52 #ifdef COMPILER1 53 #include "c1/c1_Compilation.hpp" 54 #include "c1/c1_Compiler.hpp" 55 #endif 56 #ifdef COMPILER2 57 #include "opto/c2compiler.hpp" 58 #include "opto/compile.hpp" 59 #include "opto/node.hpp" 60 #endif 61 62 // Helper class for printing in CodeCache 63 class CodeBlob_sizes { 64 private: 65 int count; 66 int total_size; 67 int header_size; 68 int code_size; 69 int stub_size; 70 int relocation_size; 71 int scopes_oop_size; 72 int scopes_metadata_size; 73 int scopes_data_size; 74 int scopes_pcs_size; 75 76 public: 77 CodeBlob_sizes() { 78 count = 0; 79 total_size = 0; 80 header_size = 0; 81 code_size = 0; 82 stub_size = 0; 83 relocation_size = 0; 84 scopes_oop_size = 0; 85 scopes_metadata_size = 0; 86 scopes_data_size = 0; 87 scopes_pcs_size = 0; 88 } 89 90 int total() { return total_size; } 91 bool is_empty() { return count == 0; } 92 93 void print(const char* title) { 94 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 95 count, 96 title, 97 (int)(total() / K), 98 header_size * 100 / total_size, 99 relocation_size * 100 / total_size, 100 code_size * 100 / total_size, 101 stub_size * 100 / total_size, 102 scopes_oop_size * 100 / total_size, 103 scopes_metadata_size * 100 / total_size, 104 scopes_data_size * 100 / total_size, 105 scopes_pcs_size * 100 / total_size); 106 } 107 108 void add(CodeBlob* cb) { 109 count++; 110 total_size += cb->size(); 111 header_size += cb->header_size(); 112 relocation_size += cb->relocation_size(); 113 if (cb->is_nmethod()) { 114 nmethod* nm = cb->as_nmethod_or_null(); 115 code_size += nm->insts_size(); 116 stub_size += nm->stub_size(); 117 118 scopes_oop_size += nm->oops_size(); 119 scopes_metadata_size += nm->metadata_size(); 120 scopes_data_size += nm->scopes_data_size(); 121 scopes_pcs_size += nm->scopes_pcs_size(); 122 } else { 123 code_size += cb->code_size(); 124 } 125 } 126 }; 127 128 // Iterate over all CodeHeaps 129 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 130 // Iterate over all CodeBlobs (cb) on the given CodeHeap 131 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 132 133 address CodeCache::_low_bound = 0; 134 address CodeCache::_high_bound = 0; 135 int CodeCache::_number_of_blobs = 0; 136 int CodeCache::_number_of_adapters = 0; 137 int CodeCache::_number_of_nmethods = 0; 138 int CodeCache::_number_of_nmethods_with_dependencies = 0; 139 bool CodeCache::_needs_cache_clean = false; 140 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 141 int CodeCache::_codemem_full_count = 0; 142 143 // Initialize array of CodeHeaps 144 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 145 146 void CodeCache::initialize_heaps() { 147 // Determine size of compiler buffers 148 size_t code_buffers_size = 0; 149 #ifdef COMPILER1 150 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 151 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 152 code_buffers_size += c1_count * Compiler::code_buffer_size(); 153 #endif 154 #ifdef COMPILER2 155 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 156 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 157 // Initial size of constant table (this may be increased if a compiled method needs more space) 158 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 159 #endif 160 161 // Calculate default CodeHeap sizes if not set by user 162 if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize) 163 && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) { 164 // Increase default NonNMethodCodeHeapSize to account for compiler buffers 165 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size); 166 167 // Check if we have enough space for the non-nmethod code heap 168 if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) { 169 // Use the default value for NonNMethodCodeHeapSize and one half of the 170 // remaining size for non-profiled methods and one half for profiled methods 171 size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize; 172 size_t profiled_size = remaining_size / 2; 173 size_t non_profiled_size = remaining_size - profiled_size; 174 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 175 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 176 } else { 177 // Use all space for the non-nmethod heap and set other heaps to minimal size 178 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); 179 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size()); 180 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size()); 181 } 182 } 183 184 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 185 if(!heap_available(CodeBlobType::MethodProfiled)) { 186 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); 187 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 188 } 189 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap 190 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 191 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); 192 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 193 } 194 195 // Make sure we have enough space for VM internal code 196 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 197 if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { 198 vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); 199 } 200 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); 201 202 // Align reserved sizes of CodeHeaps 203 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize); 204 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize); 205 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize); 206 207 // Compute initial sizes of CodeHeaps 208 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size); 209 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size); 210 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size); 211 212 // Reserve one continuous chunk of memory for CodeHeaps and split it into 213 // parts for the individual heaps. The memory layout looks like this: 214 // ---------- high ----------- 215 // Non-profiled nmethods 216 // Profiled nmethods 217 // Non-nmethods 218 // ---------- low ------------ 219 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size); 220 ReservedSpace non_method_space = rs.first_part(non_method_size); 221 ReservedSpace rest = rs.last_part(non_method_size); 222 ReservedSpace profiled_space = rest.first_part(profiled_size); 223 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 224 225 // Non-nmethods (stubs, adapters, ...) 226 add_heap(non_method_space, "CodeHeap 'non-nmethods'", init_non_method_size, CodeBlobType::NonNMethod); 227 // Tier 2 and tier 3 (profiled) methods 228 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled); 229 // Tier 1 and tier 4 (non-profiled) methods and native methods 230 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled); 231 } 232 233 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 234 // Determine alignment 235 const size_t page_size = os::can_execute_large_page_memory() ? 236 MIN2(os::page_size_for_region(InitialCodeCacheSize, 8), 237 os::page_size_for_region(size, 8)) : 238 os::vm_page_size(); 239 const size_t granularity = os::vm_allocation_granularity(); 240 const size_t r_align = MAX2(page_size, granularity); 241 const size_t r_size = align_size_up(size, r_align); 242 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 243 MAX2(page_size, granularity); 244 245 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 246 247 // Initialize bounds 248 _low_bound = (address)rs.base(); 249 _high_bound = _low_bound + rs.size(); 250 251 return rs; 252 } 253 254 bool CodeCache::heap_available(int code_blob_type) { 255 if (!SegmentedCodeCache) { 256 // No segmentation: use a single code heap 257 return (code_blob_type == CodeBlobType::All); 258 } else if ((Arguments::mode() == Arguments::_int) || 259 (TieredStopAtLevel == CompLevel_none)) { 260 // Interpreter only: we don't need any method code heaps 261 return (code_blob_type == CodeBlobType::NonNMethod); 262 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 263 // Tiered compilation: use all code heaps 264 return (code_blob_type < CodeBlobType::All); 265 } else { 266 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 267 return (code_blob_type == CodeBlobType::NonNMethod) || 268 (code_blob_type == CodeBlobType::MethodNonProfiled); 269 } 270 } 271 272 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) { 273 // Check if heap is needed 274 if (!heap_available(code_blob_type)) { 275 return; 276 } 277 278 // Create CodeHeap 279 CodeHeap* heap = new CodeHeap(name, code_blob_type); 280 _heaps->append(heap); 281 282 // Reserve Space 283 size_initial = round_to(size_initial, os::vm_page_size()); 284 285 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 286 vm_exit_during_initialization("Could not reserve enough space for code cache"); 287 } 288 289 // Register the CodeHeap 290 MemoryService::add_code_heap_memory_pool(heap, name); 291 } 292 293 CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) { 294 assert(cb != NULL, "CodeBlob is null"); 295 FOR_ALL_HEAPS(heap) { 296 if ((*heap)->contains(cb)) { 297 return *heap; 298 } 299 } 300 ShouldNotReachHere(); 301 return NULL; 302 } 303 304 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 305 FOR_ALL_HEAPS(heap) { 306 if ((*heap)->accepts(code_blob_type)) { 307 return *heap; 308 } 309 } 310 return NULL; 311 } 312 313 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 314 assert_locked_or_safepoint(CodeCache_lock); 315 assert(heap != NULL, "heap is null"); 316 return (CodeBlob*)heap->first(); 317 } 318 319 CodeBlob* CodeCache::first_blob(int code_blob_type) { 320 if (heap_available(code_blob_type)) { 321 return first_blob(get_code_heap(code_blob_type)); 322 } else { 323 return NULL; 324 } 325 } 326 327 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 328 assert_locked_or_safepoint(CodeCache_lock); 329 assert(heap != NULL, "heap is null"); 330 return (CodeBlob*)heap->next(cb); 331 } 332 333 CodeBlob* CodeCache::next_blob(CodeBlob* cb) { 334 return next_blob(get_code_heap(cb), cb); 335 } 336 337 /** 338 * Do not seize the CodeCache lock here--if the caller has not 339 * already done so, we are going to lose bigtime, since the code 340 * cache will contain a garbage CodeBlob until the caller can 341 * run the constructor for the CodeBlob subclass he is busy 342 * instantiating. 343 */ 344 CodeBlob* CodeCache::allocate(int size, int code_blob_type) { 345 // Possibly wakes up the sweeper thread. 346 NMethodSweeper::notify(code_blob_type); 347 assert_locked_or_safepoint(CodeCache_lock); 348 assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size)); 349 if (size <= 0) { 350 return NULL; 351 } 352 CodeBlob* cb = NULL; 353 354 // Get CodeHeap for the given CodeBlobType 355 CodeHeap* heap = get_code_heap(code_blob_type); 356 assert(heap != NULL, "heap is null"); 357 358 while (true) { 359 cb = (CodeBlob*)heap->allocate(size); 360 if (cb != NULL) break; 361 if (!heap->expand_by(CodeCacheExpansionSize)) { 362 // Expansion failed 363 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) { 364 // Fallback solution: Store non-nmethod code in the non-profiled code heap. 365 // Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled 366 // code heap and force stack scanning if less than 10% if the code heap are free. 367 return allocate(size, CodeBlobType::MethodNonProfiled); 368 } 369 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 370 CompileBroker::handle_full_code_cache(code_blob_type); 371 return NULL; 372 } 373 if (PrintCodeCacheExtension) { 374 ResourceMark rm; 375 if (SegmentedCodeCache) { 376 tty->print("%s", heap->name()); 377 } else { 378 tty->print("CodeCache"); 379 } 380 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 381 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 382 (address)heap->high() - (address)heap->low_boundary()); 383 } 384 } 385 print_trace("allocation", cb, size); 386 _number_of_blobs++; 387 return cb; 388 } 389 390 void CodeCache::free(CodeBlob* cb) { 391 assert_locked_or_safepoint(CodeCache_lock); 392 393 print_trace("free", cb); 394 if (cb->is_nmethod()) { 395 _number_of_nmethods--; 396 if (((nmethod *)cb)->has_dependencies()) { 397 _number_of_nmethods_with_dependencies--; 398 } 399 } 400 if (cb->is_adapter_blob()) { 401 _number_of_adapters--; 402 } 403 _number_of_blobs--; 404 405 // Get heap for given CodeBlob and deallocate 406 get_code_heap(cb)->deallocate(cb); 407 408 assert(_number_of_blobs >= 0, "sanity check"); 409 } 410 411 void CodeCache::commit(CodeBlob* cb) { 412 // this is called by nmethod::nmethod, which must already own CodeCache_lock 413 assert_locked_or_safepoint(CodeCache_lock); 414 if (cb->is_nmethod()) { 415 _number_of_nmethods++; 416 if (((nmethod *)cb)->has_dependencies()) { 417 _number_of_nmethods_with_dependencies++; 418 } 419 } 420 if (cb->is_adapter_blob()) { 421 _number_of_adapters++; 422 } 423 424 // flush the hardware I-cache 425 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 426 } 427 428 bool CodeCache::contains(void *p) { 429 // It should be ok to call contains without holding a lock 430 FOR_ALL_HEAPS(heap) { 431 if ((*heap)->contains(p)) { 432 return true; 433 } 434 } 435 return false; 436 } 437 438 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 439 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 440 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 441 CodeBlob* CodeCache::find_blob(void* start) { 442 CodeBlob* result = find_blob_unsafe(start); 443 // We could potentially look up non_entrant methods 444 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 445 return result; 446 } 447 448 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 449 // what you are doing) 450 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 451 // NMT can walk the stack before code cache is created 452 if (_heaps == NULL || _heaps->is_empty()) return NULL; 453 454 FOR_ALL_HEAPS(heap) { 455 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); 456 if (result != NULL && result->blob_contains((address)start)) { 457 return result; 458 } 459 } 460 return NULL; 461 } 462 463 nmethod* CodeCache::find_nmethod(void* start) { 464 CodeBlob* cb = find_blob(start); 465 assert(cb->is_nmethod(), "did not find an nmethod"); 466 return (nmethod*)cb; 467 } 468 469 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 470 assert_locked_or_safepoint(CodeCache_lock); 471 FOR_ALL_HEAPS(heap) { 472 FOR_ALL_BLOBS(cb, *heap) { 473 f(cb); 474 } 475 } 476 } 477 478 void CodeCache::nmethods_do(void f(nmethod* nm)) { 479 assert_locked_or_safepoint(CodeCache_lock); 480 NMethodIterator iter; 481 while(iter.next()) { 482 f(iter.method()); 483 } 484 } 485 486 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 487 assert_locked_or_safepoint(CodeCache_lock); 488 NMethodIterator iter; 489 while(iter.next_alive()) { 490 f(iter.method()); 491 } 492 } 493 494 int CodeCache::alignment_unit() { 495 return (int)_heaps->first()->alignment_unit(); 496 } 497 498 int CodeCache::alignment_offset() { 499 return (int)_heaps->first()->alignment_offset(); 500 } 501 502 // Mark nmethods for unloading if they contain otherwise unreachable oops. 503 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 504 assert_locked_or_safepoint(CodeCache_lock); 505 NMethodIterator iter; 506 while(iter.next_alive()) { 507 iter.method()->do_unloading(is_alive, unloading_occurred); 508 } 509 } 510 511 void CodeCache::blobs_do(CodeBlobClosure* f) { 512 assert_locked_or_safepoint(CodeCache_lock); 513 FOR_ALL_HEAPS(heap) { 514 FOR_ALL_BLOBS(cb, *heap) { 515 if (cb->is_alive()) { 516 f->do_code_blob(cb); 517 518 #ifdef ASSERT 519 if (cb->is_nmethod()) 520 ((nmethod*)cb)->verify_scavenge_root_oops(); 521 #endif //ASSERT 522 } 523 } 524 } 525 } 526 527 // Walk the list of methods which might contain non-perm oops. 528 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 529 assert_locked_or_safepoint(CodeCache_lock); 530 531 if (UseG1GC) { 532 return; 533 } 534 535 debug_only(mark_scavenge_root_nmethods()); 536 537 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 538 debug_only(cur->clear_scavenge_root_marked()); 539 assert(cur->scavenge_root_not_marked(), ""); 540 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 541 542 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 543 #ifndef PRODUCT 544 if (TraceScavenge) { 545 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 546 } 547 #endif //PRODUCT 548 if (is_live) { 549 // Perform cur->oops_do(f), maybe just once per nmethod. 550 f->do_code_blob(cur); 551 } 552 } 553 554 // Check for stray marks. 555 debug_only(verify_perm_nmethods(NULL)); 556 } 557 558 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 559 assert_locked_or_safepoint(CodeCache_lock); 560 561 if (UseG1GC) { 562 return; 563 } 564 565 nm->set_on_scavenge_root_list(); 566 nm->set_scavenge_root_link(_scavenge_root_nmethods); 567 set_scavenge_root_nmethods(nm); 568 print_trace("add_scavenge_root", nm); 569 } 570 571 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 572 assert_locked_or_safepoint(CodeCache_lock); 573 574 if (UseG1GC) { 575 return; 576 } 577 578 print_trace("drop_scavenge_root", nm); 579 nmethod* last = NULL; 580 nmethod* cur = scavenge_root_nmethods(); 581 while (cur != NULL) { 582 nmethod* next = cur->scavenge_root_link(); 583 if (cur == nm) { 584 if (last != NULL) 585 last->set_scavenge_root_link(next); 586 else set_scavenge_root_nmethods(next); 587 nm->set_scavenge_root_link(NULL); 588 nm->clear_on_scavenge_root_list(); 589 return; 590 } 591 last = cur; 592 cur = next; 593 } 594 assert(false, "should have been on list"); 595 } 596 597 void CodeCache::prune_scavenge_root_nmethods() { 598 assert_locked_or_safepoint(CodeCache_lock); 599 600 if (UseG1GC) { 601 return; 602 } 603 604 debug_only(mark_scavenge_root_nmethods()); 605 606 nmethod* last = NULL; 607 nmethod* cur = scavenge_root_nmethods(); 608 while (cur != NULL) { 609 nmethod* next = cur->scavenge_root_link(); 610 debug_only(cur->clear_scavenge_root_marked()); 611 assert(cur->scavenge_root_not_marked(), ""); 612 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 613 614 if (!cur->is_zombie() && !cur->is_unloaded() 615 && cur->detect_scavenge_root_oops()) { 616 // Keep it. Advance 'last' to prevent deletion. 617 last = cur; 618 } else { 619 // Prune it from the list, so we don't have to look at it any more. 620 print_trace("prune_scavenge_root", cur); 621 cur->set_scavenge_root_link(NULL); 622 cur->clear_on_scavenge_root_list(); 623 if (last != NULL) 624 last->set_scavenge_root_link(next); 625 else set_scavenge_root_nmethods(next); 626 } 627 cur = next; 628 } 629 630 // Check for stray marks. 631 debug_only(verify_perm_nmethods(NULL)); 632 } 633 634 #ifndef PRODUCT 635 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 636 if (UseG1GC) { 637 return; 638 } 639 640 // While we are here, verify the integrity of the list. 641 mark_scavenge_root_nmethods(); 642 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 643 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 644 cur->clear_scavenge_root_marked(); 645 } 646 verify_perm_nmethods(f); 647 } 648 649 // Temporarily mark nmethods that are claimed to be on the non-perm list. 650 void CodeCache::mark_scavenge_root_nmethods() { 651 NMethodIterator iter; 652 while(iter.next_alive()) { 653 nmethod* nm = iter.method(); 654 assert(nm->scavenge_root_not_marked(), "clean state"); 655 if (nm->on_scavenge_root_list()) 656 nm->set_scavenge_root_marked(); 657 } 658 } 659 660 // If the closure is given, run it on the unlisted nmethods. 661 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 662 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 663 NMethodIterator iter; 664 while(iter.next_alive()) { 665 nmethod* nm = iter.method(); 666 bool call_f = (f_or_null != NULL); 667 assert(nm->scavenge_root_not_marked(), "must be already processed"); 668 if (nm->on_scavenge_root_list()) 669 call_f = false; // don't show this one to the client 670 nm->verify_scavenge_root_oops(); 671 if (call_f) f_or_null->do_code_blob(nm); 672 } 673 } 674 #endif //PRODUCT 675 676 void CodeCache::verify_clean_inline_caches() { 677 #ifdef ASSERT 678 NMethodIterator iter; 679 while(iter.next_alive()) { 680 nmethod* nm = iter.method(); 681 assert(!nm->is_unloaded(), "Tautology"); 682 nm->verify_clean_inline_caches(); 683 nm->verify(); 684 } 685 #endif 686 } 687 688 void CodeCache::verify_icholder_relocations() { 689 #ifdef ASSERT 690 // make sure that we aren't leaking icholders 691 int count = 0; 692 FOR_ALL_HEAPS(heap) { 693 FOR_ALL_BLOBS(cb, *heap) { 694 if (cb->is_nmethod()) { 695 nmethod* nm = (nmethod*)cb; 696 count += nm->verify_icholder_relocations(); 697 } 698 } 699 } 700 701 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 702 CompiledICHolder::live_count(), "must agree"); 703 #endif 704 } 705 706 void CodeCache::gc_prologue() { 707 } 708 709 void CodeCache::gc_epilogue() { 710 assert_locked_or_safepoint(CodeCache_lock); 711 NMethodIterator iter; 712 while(iter.next_alive()) { 713 nmethod* nm = iter.method(); 714 assert(!nm->is_unloaded(), "Tautology"); 715 if (needs_cache_clean()) { 716 nm->cleanup_inline_caches(); 717 } 718 DEBUG_ONLY(nm->verify()); 719 DEBUG_ONLY(nm->verify_oop_relocations()); 720 } 721 set_needs_cache_clean(false); 722 prune_scavenge_root_nmethods(); 723 724 verify_icholder_relocations(); 725 } 726 727 void CodeCache::verify_oops() { 728 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 729 VerifyOopClosure voc; 730 NMethodIterator iter; 731 while(iter.next_alive()) { 732 nmethod* nm = iter.method(); 733 nm->oops_do(&voc); 734 nm->verify_oop_relocations(); 735 } 736 } 737 738 size_t CodeCache::capacity() { 739 size_t cap = 0; 740 FOR_ALL_HEAPS(heap) { 741 cap += (*heap)->capacity(); 742 } 743 return cap; 744 } 745 746 size_t CodeCache::unallocated_capacity(int code_blob_type) { 747 CodeHeap* heap = get_code_heap(code_blob_type); 748 return (heap != NULL) ? heap->unallocated_capacity() : 0; 749 } 750 751 size_t CodeCache::unallocated_capacity() { 752 size_t unallocated_cap = 0; 753 FOR_ALL_HEAPS(heap) { 754 unallocated_cap += (*heap)->unallocated_capacity(); 755 } 756 return unallocated_cap; 757 } 758 759 size_t CodeCache::max_capacity() { 760 size_t max_cap = 0; 761 FOR_ALL_HEAPS(heap) { 762 max_cap += (*heap)->max_capacity(); 763 } 764 return max_cap; 765 } 766 767 /** 768 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 769 * is free, reverse_free_ratio() returns 4. 770 */ 771 double CodeCache::reverse_free_ratio(int code_blob_type) { 772 CodeHeap* heap = get_code_heap(code_blob_type); 773 if (heap == NULL) { 774 return 0; 775 } 776 double unallocated_capacity = (double)CodeCache::unallocated_capacity() + 1; // Avoid division by 0 777 double max_capacity = CodeCache::max_capacity(); 778 return max_capacity / unallocated_capacity; 779 } 780 781 size_t CodeCache::bytes_allocated_in_freelists() { 782 size_t allocated_bytes = 0; 783 FOR_ALL_HEAPS(heap) { 784 allocated_bytes += (*heap)->allocated_in_freelist(); 785 } 786 return allocated_bytes; 787 } 788 789 int CodeCache::allocated_segments() { 790 int number_of_segments = 0; 791 FOR_ALL_HEAPS(heap) { 792 number_of_segments += (*heap)->allocated_segments(); 793 } 794 return number_of_segments; 795 } 796 797 size_t CodeCache::freelists_length() { 798 size_t length = 0; 799 FOR_ALL_HEAPS(heap) { 800 length += (*heap)->freelist_length(); 801 } 802 return length; 803 } 804 805 void icache_init(); 806 807 void CodeCache::initialize() { 808 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 809 #ifdef COMPILER2 810 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 811 #endif 812 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 813 // This was originally just a check of the alignment, causing failure, instead, round 814 // the code cache to the page size. In particular, Solaris is moving to a larger 815 // default page size. 816 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 817 818 if (SegmentedCodeCache) { 819 // Use multiple code heaps 820 initialize_heaps(); 821 } else { 822 // Use a single code heap 823 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 824 add_heap(rs, "CodeCache", InitialCodeCacheSize, CodeBlobType::All); 825 } 826 827 // Initialize ICache flush mechanism 828 // This service is needed for os::register_code_area 829 icache_init(); 830 831 // Give OS a chance to register generated code area. 832 // This is used on Windows 64 bit platforms to register 833 // Structured Exception Handlers for our generated code. 834 os::register_code_area((char*)low_bound(), (char*)high_bound()); 835 } 836 837 void codeCache_init() { 838 CodeCache::initialize(); 839 } 840 841 //------------------------------------------------------------------------------------------------ 842 843 int CodeCache::number_of_nmethods_with_dependencies() { 844 return _number_of_nmethods_with_dependencies; 845 } 846 847 void CodeCache::clear_inline_caches() { 848 assert_locked_or_safepoint(CodeCache_lock); 849 NMethodIterator iter; 850 while(iter.next_alive()) { 851 iter.method()->clear_inline_caches(); 852 } 853 } 854 855 // Keeps track of time spent for checking dependencies 856 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 857 858 int CodeCache::mark_for_deoptimization(DepChange& changes) { 859 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 860 int number_of_marked_CodeBlobs = 0; 861 862 // search the hierarchy looking for nmethods which are affected by the loading of this class 863 864 // then search the interfaces this class implements looking for nmethods 865 // which might be dependent of the fact that an interface only had one 866 // implementor. 867 // nmethod::check_all_dependencies works only correctly, if no safepoint 868 // can happen 869 No_Safepoint_Verifier nsv; 870 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 871 Klass* d = str.klass(); 872 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 873 } 874 875 #ifndef PRODUCT 876 if (VerifyDependencies) { 877 // Object pointers are used as unique identifiers for dependency arguments. This 878 // is only possible if no safepoint, i.e., GC occurs during the verification code. 879 dependentCheckTime.start(); 880 nmethod::check_all_dependencies(changes); 881 dependentCheckTime.stop(); 882 } 883 #endif 884 885 return number_of_marked_CodeBlobs; 886 } 887 888 889 #ifdef HOTSWAP 890 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 891 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 892 int number_of_marked_CodeBlobs = 0; 893 894 // Deoptimize all methods of the evolving class itself 895 Array<Method*>* old_methods = dependee->methods(); 896 for (int i = 0; i < old_methods->length(); i++) { 897 ResourceMark rm; 898 Method* old_method = old_methods->at(i); 899 nmethod *nm = old_method->code(); 900 if (nm != NULL) { 901 nm->mark_for_deoptimization(); 902 number_of_marked_CodeBlobs++; 903 } 904 } 905 906 NMethodIterator iter; 907 while(iter.next_alive()) { 908 nmethod* nm = iter.method(); 909 if (nm->is_marked_for_deoptimization()) { 910 // ...Already marked in the previous pass; don't count it again. 911 } else if (nm->is_evol_dependent_on(dependee())) { 912 ResourceMark rm; 913 nm->mark_for_deoptimization(); 914 number_of_marked_CodeBlobs++; 915 } else { 916 // flush caches in case they refer to a redefined Method* 917 nm->clear_inline_caches(); 918 } 919 } 920 921 return number_of_marked_CodeBlobs; 922 } 923 #endif // HOTSWAP 924 925 926 // Deoptimize all methods 927 void CodeCache::mark_all_nmethods_for_deoptimization() { 928 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 929 NMethodIterator iter; 930 while(iter.next_alive()) { 931 nmethod* nm = iter.method(); 932 if (!nm->method()->is_method_handle_intrinsic()) { 933 nm->mark_for_deoptimization(); 934 } 935 } 936 } 937 938 int CodeCache::mark_for_deoptimization(Method* dependee) { 939 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 940 int number_of_marked_CodeBlobs = 0; 941 942 NMethodIterator iter; 943 while(iter.next_alive()) { 944 nmethod* nm = iter.method(); 945 if (nm->is_dependent_on_method(dependee)) { 946 ResourceMark rm; 947 nm->mark_for_deoptimization(); 948 number_of_marked_CodeBlobs++; 949 } 950 } 951 952 return number_of_marked_CodeBlobs; 953 } 954 955 void CodeCache::make_marked_nmethods_zombies() { 956 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 957 NMethodIterator iter; 958 while(iter.next_alive()) { 959 nmethod* nm = iter.method(); 960 if (nm->is_marked_for_deoptimization()) { 961 962 // If the nmethod has already been made non-entrant and it can be converted 963 // then zombie it now. Otherwise make it non-entrant and it will eventually 964 // be zombied when it is no longer seen on the stack. Note that the nmethod 965 // might be "entrant" and not on the stack and so could be zombied immediately 966 // but we can't tell because we don't track it on stack until it becomes 967 // non-entrant. 968 969 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 970 nm->make_zombie(); 971 } else { 972 nm->make_not_entrant(); 973 } 974 } 975 } 976 } 977 978 void CodeCache::make_marked_nmethods_not_entrant() { 979 assert_locked_or_safepoint(CodeCache_lock); 980 NMethodIterator iter; 981 while(iter.next_alive()) { 982 nmethod* nm = iter.method(); 983 if (nm->is_marked_for_deoptimization()) { 984 nm->make_not_entrant(); 985 } 986 } 987 } 988 989 void CodeCache::verify() { 990 assert_locked_or_safepoint(CodeCache_lock); 991 FOR_ALL_HEAPS(heap) { 992 (*heap)->verify(); 993 FOR_ALL_BLOBS(cb, *heap) { 994 if (cb->is_alive()) { 995 cb->verify(); 996 } 997 } 998 } 999 } 1000 1001 // A CodeHeap is full. Print out warning and report event. 1002 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1003 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1004 CodeHeap* heap = get_code_heap(code_blob_type); 1005 assert(heap != NULL, "heap is null"); 1006 1007 if (!heap->was_full() || print) { 1008 // Not yet reported for this heap, report 1009 heap->report_full(); 1010 if (SegmentedCodeCache) { 1011 warning("%s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type)); 1012 warning("Try increasing the code heap size using -XX:%s=", 1013 (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize"); 1014 } else { 1015 warning("CodeCache is full. Compiler has been disabled."); 1016 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 1017 } 1018 ResourceMark rm; 1019 stringStream s; 1020 // Dump code cache into a buffer before locking the tty, 1021 { 1022 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1023 print_summary(&s); 1024 } 1025 ttyLocker ttyl; 1026 tty->print("%s", s.as_string()); 1027 } 1028 1029 _codemem_full_count++; 1030 EventCodeCacheFull event; 1031 if (event.should_commit()) { 1032 event.set_codeBlobType((u1)code_blob_type); 1033 event.set_startAddress((u8)heap->low_boundary()); 1034 event.set_commitedTopAddress((u8)heap->high()); 1035 event.set_reservedTopAddress((u8)heap->high_boundary()); 1036 event.set_entryCount(nof_blobs()); 1037 event.set_methodCount(nof_nmethods()); 1038 event.set_adaptorCount(nof_adapters()); 1039 event.set_unallocatedCapacity(heap->unallocated_capacity()/K); 1040 event.set_fullCount(_codemem_full_count); 1041 event.commit(); 1042 } 1043 } 1044 1045 void CodeCache::print_memory_overhead() { 1046 size_t wasted_bytes = 0; 1047 FOR_ALL_HEAPS(heap) { 1048 CodeHeap* curr_heap = *heap; 1049 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1050 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1051 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1052 } 1053 } 1054 // Print bytes that are allocated in the freelist 1055 ttyLocker ttl; 1056 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1057 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1058 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1059 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1060 } 1061 1062 //------------------------------------------------------------------------------------------------ 1063 // Non-product version 1064 1065 #ifndef PRODUCT 1066 1067 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1068 if (PrintCodeCache2) { // Need to add a new flag 1069 ResourceMark rm; 1070 if (size == 0) size = cb->size(); 1071 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1072 } 1073 } 1074 1075 void CodeCache::print_internals() { 1076 int nmethodCount = 0; 1077 int runtimeStubCount = 0; 1078 int adapterCount = 0; 1079 int deoptimizationStubCount = 0; 1080 int uncommonTrapStubCount = 0; 1081 int bufferBlobCount = 0; 1082 int total = 0; 1083 int nmethodAlive = 0; 1084 int nmethodNotEntrant = 0; 1085 int nmethodZombie = 0; 1086 int nmethodUnloaded = 0; 1087 int nmethodJava = 0; 1088 int nmethodNative = 0; 1089 int max_nm_size = 0; 1090 ResourceMark rm; 1091 1092 int i = 0; 1093 FOR_ALL_HEAPS(heap) { 1094 if (SegmentedCodeCache && Verbose) { 1095 tty->print_cr("-- %s --", (*heap)->name()); 1096 } 1097 FOR_ALL_BLOBS(cb, *heap) { 1098 total++; 1099 if (cb->is_nmethod()) { 1100 nmethod* nm = (nmethod*)cb; 1101 1102 if (Verbose && nm->method() != NULL) { 1103 ResourceMark rm; 1104 char *method_name = nm->method()->name_and_sig_as_C_string(); 1105 tty->print("%s", method_name); 1106 if(nm->is_alive()) { tty->print_cr(" alive"); } 1107 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1108 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1109 } 1110 1111 nmethodCount++; 1112 1113 if(nm->is_alive()) { nmethodAlive++; } 1114 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1115 if(nm->is_zombie()) { nmethodZombie++; } 1116 if(nm->is_unloaded()) { nmethodUnloaded++; } 1117 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1118 1119 if(nm->method() != NULL && nm->is_java_method()) { 1120 nmethodJava++; 1121 max_nm_size = MAX2(max_nm_size, nm->size()); 1122 } 1123 } else if (cb->is_runtime_stub()) { 1124 runtimeStubCount++; 1125 } else if (cb->is_deoptimization_stub()) { 1126 deoptimizationStubCount++; 1127 } else if (cb->is_uncommon_trap_stub()) { 1128 uncommonTrapStubCount++; 1129 } else if (cb->is_adapter_blob()) { 1130 adapterCount++; 1131 } else if (cb->is_buffer_blob()) { 1132 bufferBlobCount++; 1133 } 1134 } 1135 } 1136 1137 int bucketSize = 512; 1138 int bucketLimit = max_nm_size / bucketSize + 1; 1139 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1140 memset(buckets, 0, sizeof(int) * bucketLimit); 1141 1142 NMethodIterator iter; 1143 while(iter.next()) { 1144 nmethod* nm = iter.method(); 1145 if(nm->method() != NULL && nm->is_java_method()) { 1146 buckets[nm->size() / bucketSize]++; 1147 } 1148 } 1149 1150 tty->print_cr("Code Cache Entries (total of %d)",total); 1151 tty->print_cr("-------------------------------------------------"); 1152 tty->print_cr("nmethods: %d",nmethodCount); 1153 tty->print_cr("\talive: %d",nmethodAlive); 1154 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1155 tty->print_cr("\tzombie: %d",nmethodZombie); 1156 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1157 tty->print_cr("\tjava: %d",nmethodJava); 1158 tty->print_cr("\tnative: %d",nmethodNative); 1159 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1160 tty->print_cr("adapters: %d",adapterCount); 1161 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1162 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1163 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1164 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1165 tty->print_cr("-------------------------------------------------"); 1166 1167 for(int i=0; i<bucketLimit; i++) { 1168 if(buckets[i] != 0) { 1169 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1170 tty->fill_to(40); 1171 tty->print_cr("%d",buckets[i]); 1172 } 1173 } 1174 1175 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 1176 print_memory_overhead(); 1177 } 1178 1179 #endif // !PRODUCT 1180 1181 void CodeCache::print() { 1182 print_summary(tty); 1183 1184 #ifndef PRODUCT 1185 if (!Verbose) return; 1186 1187 CodeBlob_sizes live; 1188 CodeBlob_sizes dead; 1189 1190 FOR_ALL_HEAPS(heap) { 1191 FOR_ALL_BLOBS(cb, *heap) { 1192 if (!cb->is_alive()) { 1193 dead.add(cb); 1194 } else { 1195 live.add(cb); 1196 } 1197 } 1198 } 1199 1200 tty->print_cr("CodeCache:"); 1201 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1202 1203 if (!live.is_empty()) { 1204 live.print("live"); 1205 } 1206 if (!dead.is_empty()) { 1207 dead.print("dead"); 1208 } 1209 1210 if (WizardMode) { 1211 // print the oop_map usage 1212 int code_size = 0; 1213 int number_of_blobs = 0; 1214 int number_of_oop_maps = 0; 1215 int map_size = 0; 1216 FOR_ALL_HEAPS(heap) { 1217 FOR_ALL_BLOBS(cb, *heap) { 1218 if (cb->is_alive()) { 1219 number_of_blobs++; 1220 code_size += cb->code_size(); 1221 OopMapSet* set = cb->oop_maps(); 1222 if (set != NULL) { 1223 number_of_oop_maps += set->size(); 1224 map_size += set->heap_size(); 1225 } 1226 } 1227 } 1228 } 1229 tty->print_cr("OopMaps"); 1230 tty->print_cr(" #blobs = %d", number_of_blobs); 1231 tty->print_cr(" code size = %d", code_size); 1232 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1233 tty->print_cr(" map size = %d", map_size); 1234 } 1235 1236 #endif // !PRODUCT 1237 } 1238 1239 void CodeCache::print_summary(outputStream* st, bool detailed) { 1240 FOR_ALL_HEAPS(heap_iterator) { 1241 CodeHeap* heap = (*heap_iterator); 1242 size_t total = (heap->high_boundary() - heap->low_boundary()); 1243 if (SegmentedCodeCache) { 1244 st->print("%s:", heap->name()); 1245 } else { 1246 st->print("CodeCache:"); 1247 } 1248 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1249 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1250 total/K, (total - heap->unallocated_capacity())/K, 1251 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1252 1253 if (detailed) { 1254 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1255 p2i(heap->low_boundary()), 1256 p2i(heap->high()), 1257 p2i(heap->high_boundary())); 1258 } 1259 } 1260 1261 if (detailed) { 1262 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1263 " adapters=" UINT32_FORMAT, 1264 nof_blobs(), nof_nmethods(), nof_adapters()); 1265 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1266 "enabled" : Arguments::mode() == Arguments::_int ? 1267 "disabled (interpreter mode)" : 1268 "disabled (not enough contiguous free space left)"); 1269 } 1270 } 1271 1272 void CodeCache::print_codelist(outputStream* st) { 1273 assert_locked_or_safepoint(CodeCache_lock); 1274 1275 NMethodIterator iter; 1276 while(iter.next_alive()) { 1277 nmethod* nm = iter.method(); 1278 ResourceMark rm; 1279 char *method_name = nm->method()->name_and_sig_as_C_string(); 1280 st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]", 1281 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1282 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1283 } 1284 } 1285 1286 void CodeCache::print_layout(outputStream* st) { 1287 assert_locked_or_safepoint(CodeCache_lock); 1288 ResourceMark rm; 1289 1290 print_summary(st, true); 1291 } 1292 1293 void CodeCache::log_state(outputStream* st) { 1294 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1295 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1296 nof_blobs(), nof_nmethods(), nof_adapters(), 1297 unallocated_capacity()); 1298 }