1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/gcLocker.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/icache.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "runtime/compilationPolicy.hpp" 48 #include "services/memoryService.hpp" 49 #include "trace/tracing.hpp" 50 #include "utilities/xmlstream.hpp" 51 #ifdef COMPILER1 52 #include "c1/c1_Compilation.hpp" 53 #include "c1/c1_Compiler.hpp" 54 #endif 55 #ifdef COMPILER2 56 #include "opto/c2compiler.hpp" 57 #include "opto/compile.hpp" 58 #endif 59 60 // Helper class for printing in CodeCache 61 class CodeBlob_sizes { 62 private: 63 int count; 64 int total_size; 65 int header_size; 66 int code_size; 67 int stub_size; 68 int relocation_size; 69 int scopes_oop_size; 70 int scopes_metadata_size; 71 int scopes_data_size; 72 int scopes_pcs_size; 73 74 public: 75 CodeBlob_sizes() { 76 count = 0; 77 total_size = 0; 78 header_size = 0; 79 code_size = 0; 80 stub_size = 0; 81 relocation_size = 0; 82 scopes_oop_size = 0; 83 scopes_metadata_size = 0; 84 scopes_data_size = 0; 85 scopes_pcs_size = 0; 86 } 87 88 int total() { return total_size; } 89 bool is_empty() { return count == 0; } 90 91 void print(const char* title) { 92 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 93 count, 94 title, 95 (int)(total() / K), 96 header_size * 100 / total_size, 97 relocation_size * 100 / total_size, 98 code_size * 100 / total_size, 99 stub_size * 100 / total_size, 100 scopes_oop_size * 100 / total_size, 101 scopes_metadata_size * 100 / total_size, 102 scopes_data_size * 100 / total_size, 103 scopes_pcs_size * 100 / total_size); 104 } 105 106 void add(CodeBlob* cb) { 107 count++; 108 total_size += cb->size(); 109 header_size += cb->header_size(); 110 relocation_size += cb->relocation_size(); 111 if (cb->is_nmethod()) { 112 nmethod* nm = cb->as_nmethod_or_null(); 113 code_size += nm->insts_size(); 114 stub_size += nm->stub_size(); 115 116 scopes_oop_size += nm->oops_size(); 117 scopes_metadata_size += nm->metadata_size(); 118 scopes_data_size += nm->scopes_data_size(); 119 scopes_pcs_size += nm->scopes_pcs_size(); 120 } else { 121 code_size += cb->code_size(); 122 } 123 } 124 }; 125 126 // Iterate over all CodeHeaps 127 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 128 // Iterate over all CodeBlobs (cb) on the given CodeHeap 129 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 130 131 address CodeCache::_low_bound = 0; 132 address CodeCache::_high_bound = 0; 133 int CodeCache::_number_of_blobs = 0; 134 int CodeCache::_number_of_adapters = 0; 135 int CodeCache::_number_of_nmethods = 0; 136 int CodeCache::_number_of_nmethods_with_dependencies = 0; 137 bool CodeCache::_needs_cache_clean = false; 138 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 139 int CodeCache::_codemem_full_count = 0; 140 141 // Initialize array of CodeHeaps 142 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); 143 144 void CodeCache::initialize_heaps() { 145 // Determine size of compiler buffers 146 size_t code_buffers_size = 0; 147 #ifdef COMPILER1 148 // C1 temporary code buffers (see Compiler::init_buffer_blob()) 149 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); 150 code_buffers_size += c1_count * Compiler::code_buffer_size(); 151 #endif 152 #ifdef COMPILER2 153 // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) 154 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); 155 // Initial size of constant table (this may be increased if a compiled method needs more space) 156 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); 157 #endif 158 159 // Calculate default CodeHeap sizes if not set by user 160 if (!FLAG_IS_CMDLINE(NonMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize) 161 && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) { 162 // Increase default NonMethodCodeHeapSize to account for compiler buffers 163 FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + code_buffers_size); 164 165 // Check if we have enough space for the non-method code heap 166 if (ReservedCodeCacheSize > NonMethodCodeHeapSize) { 167 // Use the default value for NonMethodCodeHeapSize and one half of the 168 // remaining size for non-profiled methods and one half for profiled methods 169 size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize; 170 size_t profiled_size = remaining_size / 2; 171 size_t non_profiled_size = remaining_size - profiled_size; 172 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size); 173 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size); 174 } else { 175 // Use all space for the non-method heap and set other heaps to minimal size 176 FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); 177 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size()); 178 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size()); 179 } 180 } 181 182 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 183 if(!heap_available(CodeBlobType::MethodProfiled)) { 184 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); 185 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0); 186 } 187 // We do not need the non-profiled CodeHeap, use all space for the non-method CodeHeap 188 if(!heap_available(CodeBlobType::MethodNonProfiled)) { 189 FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + NonProfiledCodeHeapSize); 190 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); 191 } 192 193 // Make sure we have enough space for VM internal code 194 uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace; 195 if (NonMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { 196 vm_exit_during_initialization("Not enough space in non-method code heap to run VM."); 197 } 198 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); 199 200 // Align reserved sizes of CodeHeaps 201 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize); 202 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize); 203 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize); 204 205 // Compute initial sizes of CodeHeaps 206 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size); 207 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size); 208 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size); 209 210 // Reserve one continuous chunk of memory for CodeHeaps and split it into 211 // parts for the individual heaps. The memory layout looks like this: 212 // ---------- high ----------- 213 // Non-profiled nmethods 214 // Profiled nmethods 215 // Non-methods 216 // ---------- low ------------ 217 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size); 218 ReservedSpace non_method_space = rs.first_part(non_method_size); 219 ReservedSpace rest = rs.last_part(non_method_size); 220 ReservedSpace profiled_space = rest.first_part(profiled_size); 221 ReservedSpace non_profiled_space = rest.last_part(profiled_size); 222 223 // Non-methods (stubs, adapters, ...) 224 add_heap(non_method_space, "non-methods", init_non_method_size, CodeBlobType::NonMethod); 225 // Tier 2 and tier 3 (profiled) methods 226 add_heap(profiled_space, "profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled); 227 // Tier 1 and tier 4 (non-profiled) methods and native methods 228 add_heap(non_profiled_space, "non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled); 229 } 230 231 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 232 // Determine alignment 233 const size_t page_size = os::can_execute_large_page_memory() ? 234 os::page_size_for_region(InitialCodeCacheSize, size, 8) : 235 os::vm_page_size(); 236 const size_t granularity = os::vm_allocation_granularity(); 237 const size_t r_align = MAX2(page_size, granularity); 238 const size_t r_size = align_size_up(size, r_align); 239 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 240 MAX2(page_size, granularity); 241 242 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 243 244 // Initialize bounds 245 _low_bound = (address)rs.base(); 246 _high_bound = _low_bound + rs.size(); 247 248 return rs; 249 } 250 251 bool CodeCache::heap_available(int code_blob_type) { 252 if (!SegmentedCodeCache) { 253 // No segmentation: use a single code heap 254 return (code_blob_type == CodeBlobType::All); 255 } else if ((Arguments::mode() == Arguments::_int) || 256 (TieredStopAtLevel == CompLevel_none)) { 257 // Interpreter only: we don't need any method code heaps 258 return (code_blob_type == CodeBlobType::NonMethod); 259 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { 260 // Tiered compilation: use all code heaps 261 return (code_blob_type < CodeBlobType::All); 262 } else { 263 // No TieredCompilation: we only need the non-method and non-profiled code heap 264 return (code_blob_type == CodeBlobType::NonMethod) || 265 (code_blob_type == CodeBlobType::MethodNonProfiled); 266 } 267 } 268 269 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) { 270 // Check if heap is needed 271 if (!heap_available(code_blob_type)) { 272 return; 273 } 274 275 // Create CodeHeap 276 CodeHeap* heap = new CodeHeap(name, code_blob_type); 277 _heaps->append(heap); 278 279 // Reserve Space 280 size_initial = round_to(size_initial, os::vm_page_size()); 281 282 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 283 vm_exit_during_initialization("Could not reserve enough space for code cache"); 284 } 285 286 // Register the CodeHeap 287 MemoryService::add_code_heap_memory_pool(heap, name); 288 } 289 290 CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) { 291 assert(cb != NULL, "CodeBlob is null"); 292 FOR_ALL_HEAPS(heap) { 293 if ((*heap)->contains(cb)) { 294 return *heap; 295 } 296 } 297 ShouldNotReachHere(); 298 return NULL; 299 } 300 301 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 302 FOR_ALL_HEAPS(heap) { 303 if ((*heap)->accepts(code_blob_type)) { 304 return *heap; 305 } 306 } 307 return NULL; 308 } 309 310 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 311 assert_locked_or_safepoint(CodeCache_lock); 312 assert(heap != NULL, "heap is null"); 313 return (CodeBlob*)heap->first(); 314 } 315 316 CodeBlob* CodeCache::first_blob(int code_blob_type) { 317 if (heap_available(code_blob_type)) { 318 return first_blob(get_code_heap(code_blob_type)); 319 } else { 320 return NULL; 321 } 322 } 323 324 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 325 assert_locked_or_safepoint(CodeCache_lock); 326 assert(heap != NULL, "heap is null"); 327 return (CodeBlob*)heap->next(cb); 328 } 329 330 CodeBlob* CodeCache::next_blob(CodeBlob* cb) { 331 return next_blob(get_code_heap(cb), cb); 332 } 333 334 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { 335 // Do not seize the CodeCache lock here--if the caller has not 336 // already done so, we are going to lose bigtime, since the code 337 // cache will contain a garbage CodeBlob until the caller can 338 // run the constructor for the CodeBlob subclass he is busy 339 // instantiating. 340 assert_locked_or_safepoint(CodeCache_lock); 341 assert(size > 0, "allocation request must be reasonable"); 342 if (size <= 0) { 343 return NULL; 344 } 345 CodeBlob* cb = NULL; 346 347 // Get CodeHeap for the given CodeBlobType 348 CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All); 349 assert (heap != NULL, "heap is null"); 350 351 while (true) { 352 cb = (CodeBlob*)heap->allocate(size, is_critical); 353 if (cb != NULL) break; 354 if (!heap->expand_by(CodeCacheExpansionSize)) { 355 // Expansion failed 356 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonMethod)) { 357 // Fallback solution: Store non-method code in the non-profiled code heap 358 return allocate(size, CodeBlobType::MethodNonProfiled, is_critical); 359 } 360 return NULL; 361 } 362 if (PrintCodeCacheExtension) { 363 ResourceMark rm; 364 if (SegmentedCodeCache) { 365 tty->print("Code heap '%s'", heap->name()); 366 } else { 367 tty->print("Code cache"); 368 } 369 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 370 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 371 (address)heap->high() - (address)heap->low_boundary()); 372 } 373 } 374 print_trace("allocation", cb, size); 375 _number_of_blobs++; 376 return cb; 377 } 378 379 void CodeCache::free(CodeBlob* cb) { 380 assert_locked_or_safepoint(CodeCache_lock); 381 382 print_trace("free", cb); 383 if (cb->is_nmethod()) { 384 _number_of_nmethods--; 385 if (((nmethod *)cb)->has_dependencies()) { 386 _number_of_nmethods_with_dependencies--; 387 } 388 } 389 if (cb->is_adapter_blob()) { 390 _number_of_adapters--; 391 } 392 _number_of_blobs--; 393 394 // Get heap for given CodeBlob and deallocate 395 get_code_heap(cb)->deallocate(cb); 396 397 assert(_number_of_blobs >= 0, "sanity check"); 398 } 399 400 void CodeCache::commit(CodeBlob* cb) { 401 // this is called by nmethod::nmethod, which must already own CodeCache_lock 402 assert_locked_or_safepoint(CodeCache_lock); 403 if (cb->is_nmethod()) { 404 _number_of_nmethods++; 405 if (((nmethod *)cb)->has_dependencies()) { 406 _number_of_nmethods_with_dependencies++; 407 } 408 } 409 if (cb->is_adapter_blob()) { 410 _number_of_adapters++; 411 } 412 413 // flush the hardware I-cache 414 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 415 } 416 417 bool CodeCache::contains(void *p) { 418 // It should be ok to call contains without holding a lock 419 FOR_ALL_HEAPS(heap) { 420 if ((*heap)->contains(p)) { 421 return true; 422 } 423 } 424 return false; 425 } 426 427 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 428 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 429 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 430 CodeBlob* CodeCache::find_blob(void* start) { 431 CodeBlob* result = find_blob_unsafe(start); 432 // We could potentially look up non_entrant methods 433 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 434 return result; 435 } 436 437 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 438 // what you are doing) 439 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 440 // NMT can walk the stack before code cache is created 441 if (_heaps == NULL || _heaps->is_empty()) return NULL; 442 443 FOR_ALL_HEAPS(heap) { 444 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); 445 if (result != NULL && result->blob_contains((address)start)) { 446 return result; 447 } 448 } 449 return NULL; 450 } 451 452 nmethod* CodeCache::find_nmethod(void* start) { 453 CodeBlob* cb = find_blob(start); 454 assert(cb->is_nmethod(), "did not find an nmethod"); 455 return (nmethod*)cb; 456 } 457 458 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 459 assert_locked_or_safepoint(CodeCache_lock); 460 FOR_ALL_HEAPS(heap) { 461 FOR_ALL_BLOBS(cb, *heap) { 462 f(cb); 463 } 464 } 465 } 466 467 void CodeCache::nmethods_do(void f(nmethod* nm)) { 468 assert_locked_or_safepoint(CodeCache_lock); 469 NMethodIterator iter; 470 while(iter.next()) { 471 f(iter.method()); 472 } 473 } 474 475 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 476 assert_locked_or_safepoint(CodeCache_lock); 477 NMethodIterator iter; 478 while(iter.next_alive()) { 479 f(iter.method()); 480 } 481 } 482 483 int CodeCache::alignment_unit() { 484 return (int)_heaps->first()->alignment_unit(); 485 } 486 487 int CodeCache::alignment_offset() { 488 return (int)_heaps->first()->alignment_offset(); 489 } 490 491 // Mark nmethods for unloading if they contain otherwise unreachable oops. 492 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 493 assert_locked_or_safepoint(CodeCache_lock); 494 NMethodIterator iter; 495 while(iter.next_alive()) { 496 iter.method()->do_unloading(is_alive, unloading_occurred); 497 } 498 } 499 500 void CodeCache::blobs_do(CodeBlobClosure* f) { 501 assert_locked_or_safepoint(CodeCache_lock); 502 FOR_ALL_HEAPS(heap) { 503 FOR_ALL_BLOBS(cb, *heap) { 504 if (cb->is_alive()) { 505 f->do_code_blob(cb); 506 507 #ifdef ASSERT 508 if (cb->is_nmethod()) 509 ((nmethod*)cb)->verify_scavenge_root_oops(); 510 #endif //ASSERT 511 } 512 } 513 } 514 } 515 516 // Walk the list of methods which might contain non-perm oops. 517 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 518 assert_locked_or_safepoint(CodeCache_lock); 519 520 if (UseG1GC) { 521 return; 522 } 523 524 debug_only(mark_scavenge_root_nmethods()); 525 526 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 527 debug_only(cur->clear_scavenge_root_marked()); 528 assert(cur->scavenge_root_not_marked(), ""); 529 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 530 531 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 532 #ifndef PRODUCT 533 if (TraceScavenge) { 534 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 535 } 536 #endif //PRODUCT 537 if (is_live) { 538 // Perform cur->oops_do(f), maybe just once per nmethod. 539 f->do_code_blob(cur); 540 } 541 } 542 543 // Check for stray marks. 544 debug_only(verify_perm_nmethods(NULL)); 545 } 546 547 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 548 assert_locked_or_safepoint(CodeCache_lock); 549 550 if (UseG1GC) { 551 return; 552 } 553 554 nm->set_on_scavenge_root_list(); 555 nm->set_scavenge_root_link(_scavenge_root_nmethods); 556 set_scavenge_root_nmethods(nm); 557 print_trace("add_scavenge_root", nm); 558 } 559 560 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 561 assert_locked_or_safepoint(CodeCache_lock); 562 563 if (UseG1GC) { 564 return; 565 } 566 567 print_trace("drop_scavenge_root", nm); 568 nmethod* last = NULL; 569 nmethod* cur = scavenge_root_nmethods(); 570 while (cur != NULL) { 571 nmethod* next = cur->scavenge_root_link(); 572 if (cur == nm) { 573 if (last != NULL) 574 last->set_scavenge_root_link(next); 575 else set_scavenge_root_nmethods(next); 576 nm->set_scavenge_root_link(NULL); 577 nm->clear_on_scavenge_root_list(); 578 return; 579 } 580 last = cur; 581 cur = next; 582 } 583 assert(false, "should have been on list"); 584 } 585 586 void CodeCache::prune_scavenge_root_nmethods() { 587 assert_locked_or_safepoint(CodeCache_lock); 588 589 if (UseG1GC) { 590 return; 591 } 592 593 debug_only(mark_scavenge_root_nmethods()); 594 595 nmethod* last = NULL; 596 nmethod* cur = scavenge_root_nmethods(); 597 while (cur != NULL) { 598 nmethod* next = cur->scavenge_root_link(); 599 debug_only(cur->clear_scavenge_root_marked()); 600 assert(cur->scavenge_root_not_marked(), ""); 601 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 602 603 if (!cur->is_zombie() && !cur->is_unloaded() 604 && cur->detect_scavenge_root_oops()) { 605 // Keep it. Advance 'last' to prevent deletion. 606 last = cur; 607 } else { 608 // Prune it from the list, so we don't have to look at it any more. 609 print_trace("prune_scavenge_root", cur); 610 cur->set_scavenge_root_link(NULL); 611 cur->clear_on_scavenge_root_list(); 612 if (last != NULL) 613 last->set_scavenge_root_link(next); 614 else set_scavenge_root_nmethods(next); 615 } 616 cur = next; 617 } 618 619 // Check for stray marks. 620 debug_only(verify_perm_nmethods(NULL)); 621 } 622 623 #ifndef PRODUCT 624 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 625 if (UseG1GC) { 626 return; 627 } 628 629 // While we are here, verify the integrity of the list. 630 mark_scavenge_root_nmethods(); 631 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 632 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 633 cur->clear_scavenge_root_marked(); 634 } 635 verify_perm_nmethods(f); 636 } 637 638 // Temporarily mark nmethods that are claimed to be on the non-perm list. 639 void CodeCache::mark_scavenge_root_nmethods() { 640 NMethodIterator iter; 641 while(iter.next_alive()) { 642 nmethod* nm = iter.method(); 643 assert(nm->scavenge_root_not_marked(), "clean state"); 644 if (nm->on_scavenge_root_list()) 645 nm->set_scavenge_root_marked(); 646 } 647 } 648 649 // If the closure is given, run it on the unlisted nmethods. 650 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 651 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 652 NMethodIterator iter; 653 while(iter.next_alive()) { 654 nmethod* nm = iter.method(); 655 bool call_f = (f_or_null != NULL); 656 assert(nm->scavenge_root_not_marked(), "must be already processed"); 657 if (nm->on_scavenge_root_list()) 658 call_f = false; // don't show this one to the client 659 nm->verify_scavenge_root_oops(); 660 if (call_f) f_or_null->do_code_blob(nm); 661 } 662 } 663 #endif //PRODUCT 664 665 void CodeCache::verify_clean_inline_caches() { 666 #ifdef ASSERT 667 NMethodIterator iter; 668 while(iter.next_alive()) { 669 nmethod* nm = iter.method(); 670 assert(!nm->is_unloaded(), "Tautology"); 671 nm->verify_clean_inline_caches(); 672 nm->verify(); 673 } 674 #endif 675 } 676 677 void CodeCache::verify_icholder_relocations() { 678 #ifdef ASSERT 679 // make sure that we aren't leaking icholders 680 int count = 0; 681 FOR_ALL_HEAPS(heap) { 682 FOR_ALL_BLOBS(cb, *heap) { 683 if (cb->is_nmethod()) { 684 nmethod* nm = (nmethod*)cb; 685 count += nm->verify_icholder_relocations(); 686 } 687 } 688 } 689 690 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 691 CompiledICHolder::live_count(), "must agree"); 692 #endif 693 } 694 695 void CodeCache::gc_prologue() { 696 } 697 698 void CodeCache::gc_epilogue() { 699 assert_locked_or_safepoint(CodeCache_lock); 700 NMethodIterator iter; 701 while(iter.next_alive()) { 702 nmethod* nm = iter.method(); 703 assert(!nm->is_unloaded(), "Tautology"); 704 if (needs_cache_clean()) { 705 nm->cleanup_inline_caches(); 706 } 707 DEBUG_ONLY(nm->verify()); 708 DEBUG_ONLY(nm->verify_oop_relocations()); 709 } 710 set_needs_cache_clean(false); 711 prune_scavenge_root_nmethods(); 712 713 verify_icholder_relocations(); 714 } 715 716 void CodeCache::verify_oops() { 717 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 718 VerifyOopClosure voc; 719 NMethodIterator iter; 720 while(iter.next_alive()) { 721 nmethod* nm = iter.method(); 722 nm->oops_do(&voc); 723 nm->verify_oop_relocations(); 724 } 725 } 726 727 size_t CodeCache::capacity() { 728 size_t cap = 0; 729 FOR_ALL_HEAPS(heap) { 730 cap += (*heap)->capacity(); 731 } 732 return cap; 733 } 734 735 size_t CodeCache::unallocated_capacity() { 736 size_t unallocated_cap = 0; 737 FOR_ALL_HEAPS(heap) { 738 unallocated_cap += (*heap)->unallocated_capacity(); 739 } 740 return unallocated_cap; 741 } 742 743 size_t CodeCache::max_capacity() { 744 size_t max_cap = 0; 745 FOR_ALL_HEAPS(heap) { 746 max_cap += (*heap)->max_capacity(); 747 } 748 return max_cap; 749 } 750 751 /** 752 * Returns true if a CodeHeap is full and sets code_blob_type accordingly. 753 */ 754 bool CodeCache::is_full(int* code_blob_type) { 755 FOR_ALL_HEAPS(heap) { 756 if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) { 757 *code_blob_type = (*heap)->code_blob_type(); 758 return true; 759 } 760 } 761 return false; 762 } 763 764 /** 765 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap 766 * is free, reverse_free_ratio() returns 4. 767 */ 768 double CodeCache::reverse_free_ratio(int code_blob_type) { 769 CodeHeap* heap = get_code_heap(code_blob_type); 770 if (heap == NULL) { 771 return 0; 772 } 773 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); 774 double max_capacity = (double)heap->max_capacity(); 775 return max_capacity / unallocated_capacity; 776 } 777 778 size_t CodeCache::bytes_allocated_in_freelists() { 779 size_t allocated_bytes = 0; 780 FOR_ALL_HEAPS(heap) { 781 allocated_bytes += (*heap)->allocated_in_freelist(); 782 } 783 return allocated_bytes; 784 } 785 786 int CodeCache::allocated_segments() { 787 int number_of_segments = 0; 788 FOR_ALL_HEAPS(heap) { 789 number_of_segments += (*heap)->allocated_segments(); 790 } 791 return number_of_segments; 792 } 793 794 size_t CodeCache::freelists_length() { 795 size_t length = 0; 796 FOR_ALL_HEAPS(heap) { 797 length += (*heap)->freelist_length(); 798 } 799 return length; 800 } 801 802 void icache_init(); 803 804 void CodeCache::initialize() { 805 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 806 #ifdef COMPILER2 807 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 808 #endif 809 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 810 // This was originally just a check of the alignment, causing failure, instead, round 811 // the code cache to the page size. In particular, Solaris is moving to a larger 812 // default page size. 813 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 814 815 if (SegmentedCodeCache) { 816 // Use multiple code heaps 817 initialize_heaps(); 818 } else { 819 // Use a single code heap 820 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); 821 add_heap(rs, "Code heap", InitialCodeCacheSize, CodeBlobType::All); 822 } 823 824 // Initialize ICache flush mechanism 825 // This service is needed for os::register_code_area 826 icache_init(); 827 828 // Give OS a chance to register generated code area. 829 // This is used on Windows 64 bit platforms to register 830 // Structured Exception Handlers for our generated code. 831 os::register_code_area((char*)low_bound(), (char*)high_bound()); 832 } 833 834 void codeCache_init() { 835 CodeCache::initialize(); 836 } 837 838 //------------------------------------------------------------------------------------------------ 839 840 int CodeCache::number_of_nmethods_with_dependencies() { 841 return _number_of_nmethods_with_dependencies; 842 } 843 844 void CodeCache::clear_inline_caches() { 845 assert_locked_or_safepoint(CodeCache_lock); 846 NMethodIterator iter; 847 while(iter.next_alive()) { 848 iter.method()->clear_inline_caches(); 849 } 850 } 851 852 // Keeps track of time spent for checking dependencies 853 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 854 855 int CodeCache::mark_for_deoptimization(DepChange& changes) { 856 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 857 int number_of_marked_CodeBlobs = 0; 858 859 // search the hierarchy looking for nmethods which are affected by the loading of this class 860 861 // then search the interfaces this class implements looking for nmethods 862 // which might be dependent of the fact that an interface only had one 863 // implementor. 864 // nmethod::check_all_dependencies works only correctly, if no safepoint 865 // can happen 866 No_Safepoint_Verifier nsv; 867 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 868 Klass* d = str.klass(); 869 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 870 } 871 872 #ifndef PRODUCT 873 if (VerifyDependencies) { 874 // Object pointers are used as unique identifiers for dependency arguments. This 875 // is only possible if no safepoint, i.e., GC occurs during the verification code. 876 dependentCheckTime.start(); 877 nmethod::check_all_dependencies(changes); 878 dependentCheckTime.stop(); 879 } 880 #endif 881 882 return number_of_marked_CodeBlobs; 883 } 884 885 886 #ifdef HOTSWAP 887 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 888 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 889 int number_of_marked_CodeBlobs = 0; 890 891 // Deoptimize all methods of the evolving class itself 892 Array<Method*>* old_methods = dependee->methods(); 893 for (int i = 0; i < old_methods->length(); i++) { 894 ResourceMark rm; 895 Method* old_method = old_methods->at(i); 896 nmethod *nm = old_method->code(); 897 if (nm != NULL) { 898 nm->mark_for_deoptimization(); 899 number_of_marked_CodeBlobs++; 900 } 901 } 902 903 NMethodIterator iter; 904 while(iter.next_alive()) { 905 nmethod* nm = iter.method(); 906 if (nm->is_marked_for_deoptimization()) { 907 // ...Already marked in the previous pass; don't count it again. 908 } else if (nm->is_evol_dependent_on(dependee())) { 909 ResourceMark rm; 910 nm->mark_for_deoptimization(); 911 number_of_marked_CodeBlobs++; 912 } else { 913 // flush caches in case they refer to a redefined Method* 914 nm->clear_inline_caches(); 915 } 916 } 917 918 return number_of_marked_CodeBlobs; 919 } 920 #endif // HOTSWAP 921 922 923 // Deoptimize all methods 924 void CodeCache::mark_all_nmethods_for_deoptimization() { 925 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 926 NMethodIterator iter; 927 while(iter.next_alive()) { 928 nmethod* nm = iter.method(); 929 if (!nm->method()->is_method_handle_intrinsic()) { 930 nm->mark_for_deoptimization(); 931 } 932 } 933 } 934 935 int CodeCache::mark_for_deoptimization(Method* dependee) { 936 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 937 int number_of_marked_CodeBlobs = 0; 938 939 NMethodIterator iter; 940 while(iter.next_alive()) { 941 nmethod* nm = iter.method(); 942 if (nm->is_dependent_on_method(dependee)) { 943 ResourceMark rm; 944 nm->mark_for_deoptimization(); 945 number_of_marked_CodeBlobs++; 946 } 947 } 948 949 return number_of_marked_CodeBlobs; 950 } 951 952 void CodeCache::make_marked_nmethods_zombies() { 953 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 954 NMethodIterator iter; 955 while(iter.next_alive()) { 956 nmethod* nm = iter.method(); 957 if (nm->is_marked_for_deoptimization()) { 958 959 // If the nmethod has already been made non-entrant and it can be converted 960 // then zombie it now. Otherwise make it non-entrant and it will eventually 961 // be zombied when it is no longer seen on the stack. Note that the nmethod 962 // might be "entrant" and not on the stack and so could be zombied immediately 963 // but we can't tell because we don't track it on stack until it becomes 964 // non-entrant. 965 966 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 967 nm->make_zombie(); 968 } else { 969 nm->make_not_entrant(); 970 } 971 } 972 } 973 } 974 975 void CodeCache::make_marked_nmethods_not_entrant() { 976 assert_locked_or_safepoint(CodeCache_lock); 977 NMethodIterator iter; 978 while(iter.next_alive()) { 979 nmethod* nm = iter.method(); 980 if (nm->is_marked_for_deoptimization()) { 981 nm->make_not_entrant(); 982 } 983 } 984 } 985 986 void CodeCache::verify() { 987 assert_locked_or_safepoint(CodeCache_lock); 988 FOR_ALL_HEAPS(heap) { 989 (*heap)->verify(); 990 FOR_ALL_BLOBS(cb, *heap) { 991 if (cb->is_alive()) { 992 cb->verify(); 993 } 994 } 995 } 996 } 997 998 // A CodeHeap is full. Print out warning and report event. 999 void CodeCache::report_codemem_full(int code_blob_type, bool print) { 1000 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1001 CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All); 1002 1003 if (!heap->was_full() || print) { 1004 // Not yet reported for this heap, report 1005 heap->report_full(); 1006 if (SegmentedCodeCache) { 1007 warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type)); 1008 warning("Try increasing the code heap size using -XX:%s=", 1009 (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize"); 1010 } else { 1011 warning("CodeCache is full. Compiler has been disabled."); 1012 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 1013 } 1014 ResourceMark rm; 1015 stringStream s; 1016 // Dump code cache into a buffer before locking the tty, 1017 { 1018 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1019 print_summary(&s); 1020 } 1021 ttyLocker ttyl; 1022 tty->print("%s", s.as_string()); 1023 } 1024 1025 _codemem_full_count++; 1026 EventCodeCacheFull event; 1027 if (event.should_commit()) { 1028 event.set_codeBlobType((u1)code_blob_type); 1029 event.set_startAddress((u8)heap->low_boundary()); 1030 event.set_commitedTopAddress((u8)heap->high()); 1031 event.set_reservedTopAddress((u8)heap->high_boundary()); 1032 event.set_entryCount(nof_blobs()); 1033 event.set_methodCount(nof_nmethods()); 1034 event.set_adaptorCount(nof_adapters()); 1035 event.set_unallocatedCapacity(heap->unallocated_capacity()/K); 1036 event.set_fullCount(_codemem_full_count); 1037 event.commit(); 1038 } 1039 } 1040 1041 void CodeCache::print_memory_overhead() { 1042 size_t wasted_bytes = 0; 1043 FOR_ALL_HEAPS(heap) { 1044 CodeHeap* curr_heap = *heap; 1045 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { 1046 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1047 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1048 } 1049 } 1050 // Print bytes that are allocated in the freelist 1051 ttyLocker ttl; 1052 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); 1053 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K); 1054 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 1055 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 1056 } 1057 1058 //------------------------------------------------------------------------------------------------ 1059 // Non-product version 1060 1061 #ifndef PRODUCT 1062 1063 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 1064 if (PrintCodeCache2) { // Need to add a new flag 1065 ResourceMark rm; 1066 if (size == 0) size = cb->size(); 1067 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1068 } 1069 } 1070 1071 void CodeCache::print_internals() { 1072 int nmethodCount = 0; 1073 int runtimeStubCount = 0; 1074 int adapterCount = 0; 1075 int deoptimizationStubCount = 0; 1076 int uncommonTrapStubCount = 0; 1077 int bufferBlobCount = 0; 1078 int total = 0; 1079 int nmethodAlive = 0; 1080 int nmethodNotEntrant = 0; 1081 int nmethodZombie = 0; 1082 int nmethodUnloaded = 0; 1083 int nmethodJava = 0; 1084 int nmethodNative = 0; 1085 int max_nm_size = 0; 1086 ResourceMark rm; 1087 1088 int i = 0; 1089 FOR_ALL_HEAPS(heap) { 1090 if (SegmentedCodeCache && Verbose) { 1091 tty->print_cr("-- Code heap '%s' --", (*heap)->name()); 1092 } 1093 FOR_ALL_BLOBS(cb, *heap) { 1094 total++; 1095 if (cb->is_nmethod()) { 1096 nmethod* nm = (nmethod*)cb; 1097 1098 if (Verbose && nm->method() != NULL) { 1099 ResourceMark rm; 1100 char *method_name = nm->method()->name_and_sig_as_C_string(); 1101 tty->print("%s", method_name); 1102 if(nm->is_alive()) { tty->print_cr(" alive"); } 1103 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1104 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1105 } 1106 1107 nmethodCount++; 1108 1109 if(nm->is_alive()) { nmethodAlive++; } 1110 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1111 if(nm->is_zombie()) { nmethodZombie++; } 1112 if(nm->is_unloaded()) { nmethodUnloaded++; } 1113 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 1114 1115 if(nm->method() != NULL && nm->is_java_method()) { 1116 nmethodJava++; 1117 max_nm_size = MAX2(max_nm_size, nm->size()); 1118 } 1119 } else if (cb->is_runtime_stub()) { 1120 runtimeStubCount++; 1121 } else if (cb->is_deoptimization_stub()) { 1122 deoptimizationStubCount++; 1123 } else if (cb->is_uncommon_trap_stub()) { 1124 uncommonTrapStubCount++; 1125 } else if (cb->is_adapter_blob()) { 1126 adapterCount++; 1127 } else if (cb->is_buffer_blob()) { 1128 bufferBlobCount++; 1129 } 1130 } 1131 } 1132 1133 int bucketSize = 512; 1134 int bucketLimit = max_nm_size / bucketSize + 1; 1135 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1136 memset(buckets, 0, sizeof(int) * bucketLimit); 1137 1138 NMethodIterator iter; 1139 while(iter.next()) { 1140 nmethod* nm = iter.method(); 1141 if(nm->method() != NULL && nm->is_java_method()) { 1142 buckets[nm->size() / bucketSize]++; 1143 } 1144 } 1145 1146 tty->print_cr("Code Cache Entries (total of %d)",total); 1147 tty->print_cr("-------------------------------------------------"); 1148 tty->print_cr("nmethods: %d",nmethodCount); 1149 tty->print_cr("\talive: %d",nmethodAlive); 1150 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1151 tty->print_cr("\tzombie: %d",nmethodZombie); 1152 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1153 tty->print_cr("\tjava: %d",nmethodJava); 1154 tty->print_cr("\tnative: %d",nmethodNative); 1155 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1156 tty->print_cr("adapters: %d",adapterCount); 1157 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1158 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1159 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1160 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1161 tty->print_cr("-------------------------------------------------"); 1162 1163 for(int i=0; i<bucketLimit; i++) { 1164 if(buckets[i] != 0) { 1165 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1166 tty->fill_to(40); 1167 tty->print_cr("%d",buckets[i]); 1168 } 1169 } 1170 1171 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 1172 print_memory_overhead(); 1173 } 1174 1175 #endif // !PRODUCT 1176 1177 void CodeCache::print() { 1178 print_summary(tty); 1179 1180 #ifndef PRODUCT 1181 if (!Verbose) return; 1182 1183 CodeBlob_sizes live; 1184 CodeBlob_sizes dead; 1185 1186 FOR_ALL_HEAPS(heap) { 1187 FOR_ALL_BLOBS(cb, *heap) { 1188 if (!cb->is_alive()) { 1189 dead.add(cb); 1190 } else { 1191 live.add(cb); 1192 } 1193 } 1194 } 1195 1196 tty->print_cr("CodeCache:"); 1197 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1198 1199 if (!live.is_empty()) { 1200 live.print("live"); 1201 } 1202 if (!dead.is_empty()) { 1203 dead.print("dead"); 1204 } 1205 1206 if (WizardMode) { 1207 // print the oop_map usage 1208 int code_size = 0; 1209 int number_of_blobs = 0; 1210 int number_of_oop_maps = 0; 1211 int map_size = 0; 1212 FOR_ALL_HEAPS(heap) { 1213 FOR_ALL_BLOBS(cb, *heap) { 1214 if (cb->is_alive()) { 1215 number_of_blobs++; 1216 code_size += cb->code_size(); 1217 OopMapSet* set = cb->oop_maps(); 1218 if (set != NULL) { 1219 number_of_oop_maps += set->size(); 1220 map_size += set->heap_size(); 1221 } 1222 } 1223 } 1224 } 1225 tty->print_cr("OopMaps"); 1226 tty->print_cr(" #blobs = %d", number_of_blobs); 1227 tty->print_cr(" code size = %d", code_size); 1228 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1229 tty->print_cr(" map size = %d", map_size); 1230 } 1231 1232 #endif // !PRODUCT 1233 } 1234 1235 void CodeCache::print_summary(outputStream* st, bool detailed) { 1236 FOR_ALL_HEAPS(heap_iterator) { 1237 CodeHeap* heap = (*heap_iterator); 1238 size_t total = (heap->high_boundary() - heap->low_boundary()); 1239 if (SegmentedCodeCache) { 1240 st->print("CodeHeap '%s':", heap->name()); 1241 } else { 1242 st->print("CodeCache:"); 1243 } 1244 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1245 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1246 total/K, (total - heap->unallocated_capacity())/K, 1247 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1248 1249 if (detailed) { 1250 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1251 p2i(heap->low_boundary()), 1252 p2i(heap->high()), 1253 p2i(heap->high_boundary())); 1254 } 1255 } 1256 1257 if (detailed) { 1258 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1259 " adapters=" UINT32_FORMAT, 1260 nof_blobs(), nof_nmethods(), nof_adapters()); 1261 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1262 "enabled" : Arguments::mode() == Arguments::_int ? 1263 "disabled (interpreter mode)" : 1264 "disabled (not enough contiguous free space left)"); 1265 } 1266 } 1267 1268 void CodeCache::print_codelist(outputStream* st) { 1269 assert_locked_or_safepoint(CodeCache_lock); 1270 1271 NMethodIterator iter; 1272 while(iter.next_alive()) { 1273 nmethod* nm = iter.method(); 1274 ResourceMark rm; 1275 char *method_name = nm->method()->name_and_sig_as_C_string(); 1276 st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]", 1277 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(), 1278 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1279 } 1280 } 1281 1282 void CodeCache::print_layout(outputStream* st) { 1283 assert_locked_or_safepoint(CodeCache_lock); 1284 ResourceMark rm; 1285 1286 print_summary(st, true); 1287 } 1288 1289 void CodeCache::log_state(outputStream* st) { 1290 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1291 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1292 nof_blobs(), nof_nmethods(), nof_adapters(), 1293 unallocated_capacity()); 1294 }