1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "jfr/jfrEvents.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/gcLocker.hpp" 38 #include "memory/iterator.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "oops/method.hpp" 41 #include "oops/objArrayOop.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/arguments.hpp" 45 #include "runtime/deoptimization.hpp" 46 #include "runtime/icache.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/mutexLocker.hpp" 49 #include "services/memoryService.hpp" 50 #include "utilities/xmlstream.hpp" 51 52 53 // Helper class for printing in CodeCache 54 55 class CodeBlob_sizes { 56 private: 57 int count; 58 int total_size; 59 int header_size; 60 int code_size; 61 int stub_size; 62 int relocation_size; 63 int scopes_oop_size; 64 int scopes_metadata_size; 65 int scopes_data_size; 66 int scopes_pcs_size; 67 68 public: 69 CodeBlob_sizes() { 70 count = 0; 71 total_size = 0; 72 header_size = 0; 73 code_size = 0; 74 stub_size = 0; 75 relocation_size = 0; 76 scopes_oop_size = 0; 77 scopes_metadata_size = 0; 78 scopes_data_size = 0; 79 scopes_pcs_size = 0; 80 } 81 82 int total() { return total_size; } 83 bool is_empty() { return count == 0; } 84 85 void print(const char* title) { 86 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 87 count, 88 title, 89 (int)(total() / K), 90 header_size * 100 / total_size, 91 relocation_size * 100 / total_size, 92 code_size * 100 / total_size, 93 stub_size * 100 / total_size, 94 scopes_oop_size * 100 / total_size, 95 scopes_metadata_size * 100 / total_size, 96 scopes_data_size * 100 / total_size, 97 scopes_pcs_size * 100 / total_size); 98 } 99 100 void add(CodeBlob* cb) { 101 count++; 102 total_size += cb->size(); 103 header_size += cb->header_size(); 104 relocation_size += cb->relocation_size(); 105 if (cb->is_nmethod()) { 106 nmethod* nm = cb->as_nmethod_or_null(); 107 code_size += nm->insts_size(); 108 stub_size += nm->stub_size(); 109 110 scopes_oop_size += nm->oops_size(); 111 scopes_metadata_size += nm->metadata_size(); 112 scopes_data_size += nm->scopes_data_size(); 113 scopes_pcs_size += nm->scopes_pcs_size(); 114 } else { 115 code_size += cb->code_size(); 116 } 117 } 118 }; 119 120 // CodeCache implementation 121 122 CodeHeap * CodeCache::_heap = new CodeHeap(); 123 int CodeCache::_number_of_blobs = 0; 124 int CodeCache::_number_of_adapters = 0; 125 int CodeCache::_number_of_nmethods = 0; 126 int CodeCache::_number_of_nmethods_with_dependencies = 0; 127 bool CodeCache::_needs_cache_clean = false; 128 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 129 130 int CodeCache::_codemem_full_count = 0; 131 132 CodeBlob* CodeCache::first() { 133 assert_locked_or_safepoint(CodeCache_lock); 134 return (CodeBlob*)_heap->first(); 135 } 136 137 138 CodeBlob* CodeCache::next(CodeBlob* cb) { 139 assert_locked_or_safepoint(CodeCache_lock); 140 return (CodeBlob*)_heap->next(cb); 141 } 142 143 144 CodeBlob* CodeCache::alive(CodeBlob *cb) { 145 assert_locked_or_safepoint(CodeCache_lock); 146 while (cb != NULL && !cb->is_alive()) cb = next(cb); 147 return cb; 148 } 149 150 151 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { 152 assert_locked_or_safepoint(CodeCache_lock); 153 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); 154 return (nmethod*)cb; 155 } 156 157 nmethod* CodeCache::first_nmethod() { 158 assert_locked_or_safepoint(CodeCache_lock); 159 CodeBlob* cb = first(); 160 while (cb != NULL && !cb->is_nmethod()) { 161 cb = next(cb); 162 } 163 return (nmethod*)cb; 164 } 165 166 nmethod* CodeCache::next_nmethod (CodeBlob* cb) { 167 assert_locked_or_safepoint(CodeCache_lock); 168 cb = next(cb); 169 while (cb != NULL && !cb->is_nmethod()) { 170 cb = next(cb); 171 } 172 return (nmethod*)cb; 173 } 174 175 static size_t maxCodeCacheUsed = 0; 176 177 CodeBlob* CodeCache::allocate(int size, bool is_critical) { 178 // Do not seize the CodeCache lock here--if the caller has not 179 // already done so, we are going to lose bigtime, since the code 180 // cache will contain a garbage CodeBlob until the caller can 181 // run the constructor for the CodeBlob subclass he is busy 182 // instantiating. 183 guarantee(size >= 0, "allocation request must be reasonable"); 184 assert_locked_or_safepoint(CodeCache_lock); 185 CodeBlob* cb = NULL; 186 _number_of_blobs++; 187 while (true) { 188 cb = (CodeBlob*)_heap->allocate(size, is_critical); 189 if (cb != NULL) break; 190 if (!_heap->expand_by(CodeCacheExpansionSize)) { 191 // Expansion failed 192 return NULL; 193 } 194 if (PrintCodeCacheExtension) { 195 ResourceMark rm; 196 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 197 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), 198 (address)_heap->high() - (address)_heap->low_boundary()); 199 } 200 } 201 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - 202 (address)_heap->low_boundary()) - unallocated_capacity()); 203 verify_if_often(); 204 print_trace("allocation", cb, size); 205 return cb; 206 } 207 208 void CodeCache::free(CodeBlob* cb) { 209 assert_locked_or_safepoint(CodeCache_lock); 210 verify_if_often(); 211 212 print_trace("free", cb); 213 if (cb->is_nmethod()) { 214 _number_of_nmethods--; 215 if (((nmethod *)cb)->has_dependencies()) { 216 _number_of_nmethods_with_dependencies--; 217 } 218 } 219 if (cb->is_adapter_blob()) { 220 _number_of_adapters--; 221 } 222 _number_of_blobs--; 223 224 _heap->deallocate(cb); 225 226 verify_if_often(); 227 assert(_number_of_blobs >= 0, "sanity check"); 228 } 229 230 231 void CodeCache::commit(CodeBlob* cb) { 232 // this is called by nmethod::nmethod, which must already own CodeCache_lock 233 assert_locked_or_safepoint(CodeCache_lock); 234 if (cb->is_nmethod()) { 235 _number_of_nmethods++; 236 if (((nmethod *)cb)->has_dependencies()) { 237 _number_of_nmethods_with_dependencies++; 238 } 239 } 240 if (cb->is_adapter_blob()) { 241 _number_of_adapters++; 242 } 243 244 // flush the hardware I-cache 245 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 246 } 247 248 249 void CodeCache::flush() { 250 assert_locked_or_safepoint(CodeCache_lock); 251 Unimplemented(); 252 } 253 254 255 // Iteration over CodeBlobs 256 257 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) 258 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) 259 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) 260 261 262 bool CodeCache::contains(void *p) { 263 // It should be ok to call contains without holding a lock 264 return _heap->contains(p); 265 } 266 267 268 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not 269 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain 270 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 271 CodeBlob* CodeCache::find_blob(void* start) { 272 CodeBlob* result = find_blob_unsafe(start); 273 if (result == NULL) return NULL; 274 // We could potientially look up non_entrant methods 275 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 276 return result; 277 } 278 279 nmethod* CodeCache::find_nmethod(void* start) { 280 CodeBlob *cb = find_blob(start); 281 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); 282 return (nmethod*)cb; 283 } 284 285 286 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 287 assert_locked_or_safepoint(CodeCache_lock); 288 FOR_ALL_BLOBS(p) { 289 f(p); 290 } 291 } 292 293 294 void CodeCache::nmethods_do(void f(nmethod* nm)) { 295 assert_locked_or_safepoint(CodeCache_lock); 296 FOR_ALL_BLOBS(nm) { 297 if (nm->is_nmethod()) f((nmethod*)nm); 298 } 299 } 300 301 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 302 assert_locked_or_safepoint(CodeCache_lock); 303 FOR_ALL_ALIVE_NMETHODS(nm) { 304 f(nm); 305 } 306 } 307 308 int CodeCache::alignment_unit() { 309 return (int)_heap->alignment_unit(); 310 } 311 312 313 int CodeCache::alignment_offset() { 314 return (int)_heap->alignment_offset(); 315 } 316 317 318 // Mark nmethods for unloading if they contain otherwise unreachable 319 // oops. 320 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 321 assert_locked_or_safepoint(CodeCache_lock); 322 FOR_ALL_ALIVE_NMETHODS(nm) { 323 nm->do_unloading(is_alive, unloading_occurred); 324 } 325 } 326 327 void CodeCache::blobs_do(CodeBlobClosure* f) { 328 assert_locked_or_safepoint(CodeCache_lock); 329 FOR_ALL_ALIVE_BLOBS(cb) { 330 f->do_code_blob(cb); 331 332 #ifdef ASSERT 333 if (cb->is_nmethod()) 334 ((nmethod*)cb)->verify_scavenge_root_oops(); 335 #endif //ASSERT 336 } 337 } 338 339 // Walk the list of methods which might contain non-perm oops. 340 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { 341 assert_locked_or_safepoint(CodeCache_lock); 342 343 if (UseG1GC) { 344 return; 345 } 346 347 const bool fix_relocations = f->fix_relocations(); 348 debug_only(mark_scavenge_root_nmethods()); 349 350 nmethod* prev = NULL; 351 nmethod* cur = scavenge_root_nmethods(); 352 while (cur != NULL) { 353 debug_only(cur->clear_scavenge_root_marked()); 354 assert(cur->scavenge_root_not_marked(), ""); 355 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 356 357 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 358 #ifndef PRODUCT 359 if (TraceScavenge) { 360 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 361 } 362 #endif //PRODUCT 363 if (is_live) { 364 // Perform cur->oops_do(f), maybe just once per nmethod. 365 f->do_code_blob(cur); 366 } 367 nmethod* const next = cur->scavenge_root_link(); 368 // The scavengable nmethod list must contain all methods with scavengable 369 // oops. It is safe to include more nmethod on the list, but we do not 370 // expect any live non-scavengable nmethods on the list. 371 if (fix_relocations) { 372 if (!is_live || !cur->detect_scavenge_root_oops()) { 373 unlink_scavenge_root_nmethod(cur, prev); 374 } else { 375 prev = cur; 376 } 377 } 378 cur = next; 379 } 380 381 // Check for stray marks. 382 debug_only(verify_perm_nmethods(NULL)); 383 } 384 385 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 386 assert_locked_or_safepoint(CodeCache_lock); 387 388 if (UseG1GC) { 389 return; 390 } 391 392 nm->set_on_scavenge_root_list(); 393 nm->set_scavenge_root_link(_scavenge_root_nmethods); 394 set_scavenge_root_nmethods(nm); 395 print_trace("add_scavenge_root", nm); 396 } 397 398 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) { 399 assert_locked_or_safepoint(CodeCache_lock); 400 401 assert((prev == NULL && scavenge_root_nmethods() == nm) || 402 (prev != NULL && prev->scavenge_root_link() == nm), "precondition"); 403 404 assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list"); 405 406 print_trace("unlink_scavenge_root", nm); 407 if (prev == NULL) { 408 set_scavenge_root_nmethods(nm->scavenge_root_link()); 409 } else { 410 prev->set_scavenge_root_link(nm->scavenge_root_link()); 411 } 412 nm->set_scavenge_root_link(NULL); 413 nm->clear_on_scavenge_root_list(); 414 } 415 416 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 417 assert_locked_or_safepoint(CodeCache_lock); 418 419 if (UseG1GC) { 420 return; 421 } 422 423 print_trace("drop_scavenge_root", nm); 424 nmethod* prev = NULL; 425 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 426 if (cur == nm) { 427 unlink_scavenge_root_nmethod(cur, prev); 428 return; 429 } 430 prev = cur; 431 } 432 assert(false, "should have been on list"); 433 } 434 435 void CodeCache::prune_scavenge_root_nmethods() { 436 assert_locked_or_safepoint(CodeCache_lock); 437 438 if (UseG1GC) { 439 return; 440 } 441 442 debug_only(mark_scavenge_root_nmethods()); 443 444 nmethod* last = NULL; 445 nmethod* cur = scavenge_root_nmethods(); 446 while (cur != NULL) { 447 nmethod* next = cur->scavenge_root_link(); 448 debug_only(cur->clear_scavenge_root_marked()); 449 assert(cur->scavenge_root_not_marked(), ""); 450 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 451 452 if (!cur->is_zombie() && !cur->is_unloaded() 453 && cur->detect_scavenge_root_oops()) { 454 // Keep it. Advance 'last' to prevent deletion. 455 last = cur; 456 } else { 457 // Prune it from the list, so we don't have to look at it any more. 458 print_trace("prune_scavenge_root", cur); 459 unlink_scavenge_root_nmethod(cur, last); 460 } 461 cur = next; 462 } 463 464 // Check for stray marks. 465 debug_only(verify_perm_nmethods(NULL)); 466 } 467 468 #ifndef PRODUCT 469 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 470 if (UseG1GC) { 471 return; 472 } 473 474 // While we are here, verify the integrity of the list. 475 mark_scavenge_root_nmethods(); 476 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 477 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 478 cur->clear_scavenge_root_marked(); 479 } 480 verify_perm_nmethods(f); 481 } 482 483 // Temporarily mark nmethods that are claimed to be on the non-perm list. 484 void CodeCache::mark_scavenge_root_nmethods() { 485 FOR_ALL_ALIVE_BLOBS(cb) { 486 if (cb->is_nmethod()) { 487 nmethod *nm = (nmethod*)cb; 488 assert(nm->scavenge_root_not_marked(), "clean state"); 489 if (nm->on_scavenge_root_list()) 490 nm->set_scavenge_root_marked(); 491 } 492 } 493 } 494 495 // If the closure is given, run it on the unlisted nmethods. 496 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 497 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 498 FOR_ALL_ALIVE_BLOBS(cb) { 499 bool call_f = (f_or_null != NULL); 500 if (cb->is_nmethod()) { 501 nmethod *nm = (nmethod*)cb; 502 assert(nm->scavenge_root_not_marked(), "must be already processed"); 503 if (nm->on_scavenge_root_list()) 504 call_f = false; // don't show this one to the client 505 nm->verify_scavenge_root_oops(); 506 } else { 507 call_f = false; // not an nmethod 508 } 509 if (call_f) f_or_null->do_code_blob(cb); 510 } 511 } 512 #endif //PRODUCT 513 514 void CodeCache::verify_clean_inline_caches() { 515 #ifdef ASSERT 516 FOR_ALL_ALIVE_BLOBS(cb) { 517 if (cb->is_nmethod()) { 518 nmethod* nm = (nmethod*)cb; 519 assert(!nm->is_unloaded(), "Tautology"); 520 nm->verify_clean_inline_caches(); 521 nm->verify(); 522 } 523 } 524 #endif 525 } 526 527 void CodeCache::verify_icholder_relocations() { 528 #ifdef ASSERT 529 // make sure that we aren't leaking icholders 530 int count = 0; 531 FOR_ALL_BLOBS(cb) { 532 if (cb->is_nmethod()) { 533 nmethod* nm = (nmethod*)cb; 534 count += nm->verify_icholder_relocations(); 535 } 536 } 537 538 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 539 CompiledICHolder::live_count(), "must agree"); 540 #endif 541 } 542 543 void CodeCache::gc_prologue() { 544 } 545 546 void CodeCache::gc_epilogue() { 547 assert_locked_or_safepoint(CodeCache_lock); 548 NOT_DEBUG(if (needs_cache_clean())) { 549 FOR_ALL_ALIVE_BLOBS(cb) { 550 if (cb->is_nmethod()) { 551 nmethod *nm = (nmethod*)cb; 552 assert(!nm->is_unloaded(), "Tautology"); 553 DEBUG_ONLY(if (needs_cache_clean())) { 554 nm->cleanup_inline_caches(); 555 } 556 DEBUG_ONLY(nm->verify()); 557 DEBUG_ONLY(nm->verify_oop_relocations()); 558 } 559 } 560 } 561 set_needs_cache_clean(false); 562 prune_scavenge_root_nmethods(); 563 564 verify_icholder_relocations(); 565 } 566 567 void CodeCache::verify_oops() { 568 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 569 VerifyOopClosure voc; 570 FOR_ALL_ALIVE_BLOBS(cb) { 571 if (cb->is_nmethod()) { 572 nmethod *nm = (nmethod*)cb; 573 nm->oops_do(&voc); 574 nm->verify_oop_relocations(); 575 } 576 } 577 } 578 579 580 address CodeCache::first_address() { 581 assert_locked_or_safepoint(CodeCache_lock); 582 return (address)_heap->low_boundary(); 583 } 584 585 586 address CodeCache::last_address() { 587 assert_locked_or_safepoint(CodeCache_lock); 588 return (address)_heap->high(); 589 } 590 591 /** 592 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 593 * is free, reverse_free_ratio() returns 4. 594 */ 595 double CodeCache::reverse_free_ratio() { 596 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace); 597 double max_capacity = (double)CodeCache::max_capacity(); 598 return max_capacity / unallocated_capacity; 599 } 600 601 void icache_init(); 602 603 void CodeCache::initialize() { 604 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 605 #ifdef COMPILER2 606 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 607 #endif 608 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 609 // This was originally just a check of the alignment, causing failure, instead, round 610 // the code cache to the page size. In particular, Solaris is moving to a larger 611 // default page size. 612 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 613 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); 614 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); 615 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { 616 vm_exit_during_initialization("Could not reserve enough space for code cache"); 617 } 618 619 MemoryService::add_code_heap_memory_pool(_heap); 620 621 // Initialize ICache flush mechanism 622 // This service is needed for os::register_code_area 623 icache_init(); 624 625 // Give OS a chance to register generated code area. 626 // This is used on Windows 64 bit platforms to register 627 // Structured Exception Handlers for our generated code. 628 os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); 629 } 630 631 632 void codeCache_init() { 633 CodeCache::initialize(); 634 } 635 636 //------------------------------------------------------------------------------------------------ 637 638 int CodeCache::number_of_nmethods_with_dependencies() { 639 return _number_of_nmethods_with_dependencies; 640 } 641 642 void CodeCache::clear_inline_caches() { 643 assert_locked_or_safepoint(CodeCache_lock); 644 FOR_ALL_ALIVE_NMETHODS(nm) { 645 nm->clear_inline_caches(); 646 } 647 } 648 649 #ifndef PRODUCT 650 // used to keep track of how much time is spent in mark_for_deoptimization 651 static elapsedTimer dependentCheckTime; 652 static int dependentCheckCount = 0; 653 #endif // PRODUCT 654 655 656 int CodeCache::mark_for_deoptimization(DepChange& changes) { 657 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 658 659 #ifndef PRODUCT 660 dependentCheckTime.start(); 661 dependentCheckCount++; 662 #endif // PRODUCT 663 664 int number_of_marked_CodeBlobs = 0; 665 666 // search the hierarchy looking for nmethods which are affected by the loading of this class 667 668 // then search the interfaces this class implements looking for nmethods 669 // which might be dependent of the fact that an interface only had one 670 // implementor. 671 672 { No_Safepoint_Verifier nsv; 673 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 674 Klass* d = str.klass(); 675 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 676 } 677 } 678 679 if (VerifyDependencies) { 680 // Turn off dependency tracing while actually testing deps. 681 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); 682 FOR_ALL_ALIVE_NMETHODS(nm) { 683 if (!nm->is_marked_for_deoptimization() && 684 nm->check_all_dependencies()) { 685 ResourceMark rm; 686 tty->print_cr("Should have been marked for deoptimization:"); 687 changes.print(); 688 nm->print(); 689 nm->print_dependencies(); 690 } 691 } 692 } 693 694 #ifndef PRODUCT 695 dependentCheckTime.stop(); 696 #endif // PRODUCT 697 698 return number_of_marked_CodeBlobs; 699 } 700 701 702 #ifdef HOTSWAP 703 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 704 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 705 int number_of_marked_CodeBlobs = 0; 706 707 // Deoptimize all methods of the evolving class itself 708 Array<Method*>* old_methods = dependee->methods(); 709 for (int i = 0; i < old_methods->length(); i++) { 710 ResourceMark rm; 711 Method* old_method = old_methods->at(i); 712 nmethod *nm = old_method->code(); 713 if (nm != NULL) { 714 nm->mark_for_deoptimization(); 715 number_of_marked_CodeBlobs++; 716 } 717 } 718 719 FOR_ALL_ALIVE_NMETHODS(nm) { 720 if (nm->is_marked_for_deoptimization()) { 721 // ...Already marked in the previous pass; don't count it again. 722 } else if (nm->is_evol_dependent_on(dependee())) { 723 ResourceMark rm; 724 nm->mark_for_deoptimization(); 725 number_of_marked_CodeBlobs++; 726 } else { 727 // flush caches in case they refer to a redefined Method* 728 nm->clear_inline_caches(); 729 } 730 } 731 732 return number_of_marked_CodeBlobs; 733 } 734 #endif // HOTSWAP 735 736 737 // Deoptimize all methods 738 void CodeCache::mark_all_nmethods_for_deoptimization() { 739 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 740 FOR_ALL_ALIVE_NMETHODS(nm) { 741 if (!nm->method()->is_method_handle_intrinsic()) { 742 nm->mark_for_deoptimization(); 743 } 744 } 745 } 746 747 748 int CodeCache::mark_for_deoptimization(Method* dependee) { 749 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 750 int number_of_marked_CodeBlobs = 0; 751 752 FOR_ALL_ALIVE_NMETHODS(nm) { 753 if (nm->is_dependent_on_method(dependee)) { 754 ResourceMark rm; 755 nm->mark_for_deoptimization(); 756 number_of_marked_CodeBlobs++; 757 } 758 } 759 760 return number_of_marked_CodeBlobs; 761 } 762 763 void CodeCache::make_marked_nmethods_not_entrant() { 764 assert_locked_or_safepoint(CodeCache_lock); 765 FOR_ALL_ALIVE_NMETHODS(nm) { 766 if (nm->is_marked_for_deoptimization()) { 767 nm->make_not_entrant(); 768 } 769 } 770 } 771 772 void CodeCache::verify() { 773 _heap->verify(); 774 FOR_ALL_ALIVE_BLOBS(p) { 775 p->verify(); 776 } 777 } 778 779 void CodeCache::report_codemem_full() { 780 _codemem_full_count++; 781 EventCodeCacheFull event; 782 if (event.should_commit()) { 783 event.set_startAddress((u8)low_bound()); 784 event.set_commitedTopAddress((u8)high()); 785 event.set_reservedTopAddress((u8)high_bound()); 786 event.set_entryCount(nof_blobs()); 787 event.set_methodCount(nof_nmethods()); 788 event.set_adaptorCount(nof_adapters()); 789 event.set_unallocatedCapacity(unallocated_capacity()/K); 790 event.set_fullCount(_codemem_full_count); 791 event.commit(); 792 } 793 } 794 795 //------------------------------------------------------------------------------------------------ 796 // Non-product version 797 798 #ifndef PRODUCT 799 800 void CodeCache::verify_if_often() { 801 if (VerifyCodeCacheOften) { 802 _heap->verify(); 803 } 804 } 805 806 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 807 if (PrintCodeCache2) { // Need to add a new flag 808 ResourceMark rm; 809 if (size == 0) size = cb->size(); 810 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 811 } 812 } 813 814 void CodeCache::print_internals() { 815 int nmethodCount = 0; 816 int runtimeStubCount = 0; 817 int adapterCount = 0; 818 int deoptimizationStubCount = 0; 819 int uncommonTrapStubCount = 0; 820 int bufferBlobCount = 0; 821 int total = 0; 822 int nmethodAlive = 0; 823 int nmethodNotEntrant = 0; 824 int nmethodZombie = 0; 825 int nmethodUnloaded = 0; 826 int nmethodJava = 0; 827 int nmethodNative = 0; 828 int maxCodeSize = 0; 829 ResourceMark rm; 830 831 CodeBlob *cb; 832 for (cb = first(); cb != NULL; cb = next(cb)) { 833 total++; 834 if (cb->is_nmethod()) { 835 nmethod* nm = (nmethod*)cb; 836 837 if (Verbose && nm->method() != NULL) { 838 ResourceMark rm; 839 char *method_name = nm->method()->name_and_sig_as_C_string(); 840 tty->print("%s", method_name); 841 if(nm->is_alive()) { tty->print_cr(" alive"); } 842 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 843 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 844 } 845 846 nmethodCount++; 847 848 if(nm->is_alive()) { nmethodAlive++; } 849 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 850 if(nm->is_zombie()) { nmethodZombie++; } 851 if(nm->is_unloaded()) { nmethodUnloaded++; } 852 if(nm->is_native_method()) { nmethodNative++; } 853 854 if(nm->method() != NULL && nm->is_java_method()) { 855 nmethodJava++; 856 if (nm->insts_size() > maxCodeSize) { 857 maxCodeSize = nm->insts_size(); 858 } 859 } 860 } else if (cb->is_runtime_stub()) { 861 runtimeStubCount++; 862 } else if (cb->is_deoptimization_stub()) { 863 deoptimizationStubCount++; 864 } else if (cb->is_uncommon_trap_stub()) { 865 uncommonTrapStubCount++; 866 } else if (cb->is_adapter_blob()) { 867 adapterCount++; 868 } else if (cb->is_buffer_blob()) { 869 bufferBlobCount++; 870 } 871 } 872 873 int bucketSize = 512; 874 int bucketLimit = maxCodeSize / bucketSize + 1; 875 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 876 memset(buckets,0,sizeof(int) * bucketLimit); 877 878 for (cb = first(); cb != NULL; cb = next(cb)) { 879 if (cb->is_nmethod()) { 880 nmethod* nm = (nmethod*)cb; 881 if(nm->is_java_method()) { 882 buckets[nm->insts_size() / bucketSize]++; 883 } 884 } 885 } 886 tty->print_cr("Code Cache Entries (total of %d)",total); 887 tty->print_cr("-------------------------------------------------"); 888 tty->print_cr("nmethods: %d",nmethodCount); 889 tty->print_cr("\talive: %d",nmethodAlive); 890 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 891 tty->print_cr("\tzombie: %d",nmethodZombie); 892 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 893 tty->print_cr("\tjava: %d",nmethodJava); 894 tty->print_cr("\tnative: %d",nmethodNative); 895 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 896 tty->print_cr("adapters: %d",adapterCount); 897 tty->print_cr("buffer blobs: %d",bufferBlobCount); 898 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 899 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 900 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 901 tty->print_cr("-------------------------------------------------"); 902 903 for(int i=0; i<bucketLimit; i++) { 904 if(buckets[i] != 0) { 905 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 906 tty->fill_to(40); 907 tty->print_cr("%d",buckets[i]); 908 } 909 } 910 911 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 912 } 913 914 #endif // !PRODUCT 915 916 void CodeCache::print() { 917 print_summary(tty); 918 919 #ifndef PRODUCT 920 if (!Verbose) return; 921 922 CodeBlob_sizes live; 923 CodeBlob_sizes dead; 924 925 FOR_ALL_BLOBS(p) { 926 if (!p->is_alive()) { 927 dead.add(p); 928 } else { 929 live.add(p); 930 } 931 } 932 933 tty->print_cr("CodeCache:"); 934 935 tty->print_cr("nmethod dependency checking time %f, per dependent %f", dependentCheckTime.seconds(), 936 dependentCheckTime.seconds() / dependentCheckCount); 937 938 if (!live.is_empty()) { 939 live.print("live"); 940 } 941 if (!dead.is_empty()) { 942 dead.print("dead"); 943 } 944 945 946 if (WizardMode) { 947 // print the oop_map usage 948 int code_size = 0; 949 int number_of_blobs = 0; 950 int number_of_oop_maps = 0; 951 int map_size = 0; 952 FOR_ALL_BLOBS(p) { 953 if (p->is_alive()) { 954 number_of_blobs++; 955 code_size += p->code_size(); 956 OopMapSet* set = p->oop_maps(); 957 if (set != NULL) { 958 number_of_oop_maps += set->size(); 959 map_size += set->heap_size(); 960 } 961 } 962 } 963 tty->print_cr("OopMaps"); 964 tty->print_cr(" #blobs = %d", number_of_blobs); 965 tty->print_cr(" code size = %d", code_size); 966 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 967 tty->print_cr(" map size = %d", map_size); 968 } 969 970 #endif // !PRODUCT 971 } 972 973 void CodeCache::print_summary(outputStream* st, bool detailed) { 974 size_t total = (_heap->high_boundary() - _heap->low_boundary()); 975 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 976 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 977 total/K, (total - unallocated_capacity())/K, 978 maxCodeCacheUsed/K, unallocated_capacity()/K); 979 980 if (detailed) { 981 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 982 p2i(_heap->low_boundary()), 983 p2i(_heap->high()), 984 p2i(_heap->high_boundary())); 985 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 986 " adapters=" UINT32_FORMAT, 987 nof_blobs(), nof_nmethods(), nof_adapters()); 988 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 989 "enabled" : Arguments::mode() == Arguments::_int ? 990 "disabled (interpreter mode)" : 991 "disabled (not enough contiguous free space left)"); 992 } 993 } 994 995 void CodeCache::log_state(outputStream* st) { 996 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 997 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 998 nof_blobs(), nof_nmethods(), nof_adapters(), 999 unallocated_capacity()); 1000 } 1001