1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/dependencies.hpp" 29 #include "code/nmethod.hpp" 30 #include "code/pcDesc.hpp" 31 #include "gc_implementation/shared/markSweep.hpp" 32 #include "memory/allocation.inline.hpp" 33 #include "memory/gcLocker.hpp" 34 #include "memory/iterator.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/methodOop.hpp" 37 #include "oops/objArrayOop.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/icache.hpp" 41 #include "runtime/java.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "services/memoryService.hpp" 44 #include "utilities/xmlstream.hpp" 45 46 // Helper class for printing in CodeCache 47 48 class CodeBlob_sizes { 49 private: 50 int count; 51 int total_size; 52 int header_size; 53 int code_size; 54 int stub_size; 55 int relocation_size; 56 int scopes_oop_size; 57 int scopes_data_size; 58 int scopes_pcs_size; 59 60 public: 61 CodeBlob_sizes() { 62 count = 0; 63 total_size = 0; 64 header_size = 0; 65 code_size = 0; 66 stub_size = 0; 67 relocation_size = 0; 68 scopes_oop_size = 0; 69 scopes_data_size = 0; 70 scopes_pcs_size = 0; 71 } 72 73 int total() { return total_size; } 74 bool is_empty() { return count == 0; } 75 76 void print(const char* title) { 77 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 78 count, 79 title, 80 total() / K, 81 header_size * 100 / total_size, 82 relocation_size * 100 / total_size, 83 code_size * 100 / total_size, 84 stub_size * 100 / total_size, 85 scopes_oop_size * 100 / total_size, 86 scopes_data_size * 100 / total_size, 87 scopes_pcs_size * 100 / total_size); 88 } 89 90 void add(CodeBlob* cb) { 91 count++; 92 total_size += cb->size(); 93 header_size += cb->header_size(); 94 relocation_size += cb->relocation_size(); 95 if (cb->is_nmethod()) { 96 nmethod* nm = cb->as_nmethod_or_null(); 97 code_size += nm->insts_size(); 98 stub_size += nm->stub_size(); 99 100 scopes_oop_size += nm->oops_size(); 101 scopes_data_size += nm->scopes_data_size(); 102 scopes_pcs_size += nm->scopes_pcs_size(); 103 } else { 104 code_size += cb->code_size(); 105 } 106 } 107 }; 108 109 110 // CodeCache implementation 111 112 CodeHeap * CodeCache::_heap = new CodeHeap(); 113 int CodeCache::_number_of_blobs = 0; 114 int CodeCache::_number_of_adapters = 0; 115 int CodeCache::_number_of_nmethods = 0; 116 int CodeCache::_number_of_nmethods_with_dependencies = 0; 117 bool CodeCache::_needs_cache_clean = false; 118 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 119 nmethod* CodeCache::_saved_nmethods = NULL; 120 121 122 CodeBlob* CodeCache::first() { 123 assert_locked_or_safepoint(CodeCache_lock); 124 return (CodeBlob*)_heap->first(); 125 } 126 127 128 CodeBlob* CodeCache::next(CodeBlob* cb) { 129 assert_locked_or_safepoint(CodeCache_lock); 130 return (CodeBlob*)_heap->next(cb); 131 } 132 133 134 CodeBlob* CodeCache::alive(CodeBlob *cb) { 135 assert_locked_or_safepoint(CodeCache_lock); 136 while (cb != NULL && !cb->is_alive()) cb = next(cb); 137 return cb; 138 } 139 140 141 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { 142 assert_locked_or_safepoint(CodeCache_lock); 143 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); 144 return (nmethod*)cb; 145 } 146 147 nmethod* CodeCache::first_nmethod() { 148 assert_locked_or_safepoint(CodeCache_lock); 149 CodeBlob* cb = first(); 150 while (cb != NULL && !cb->is_nmethod()) { 151 cb = next(cb); 152 } 153 return (nmethod*)cb; 154 } 155 156 nmethod* CodeCache::next_nmethod (CodeBlob* cb) { 157 assert_locked_or_safepoint(CodeCache_lock); 158 cb = next(cb); 159 while (cb != NULL && !cb->is_nmethod()) { 160 cb = next(cb); 161 } 162 return (nmethod*)cb; 163 } 164 165 CodeBlob* CodeCache::allocate(int size) { 166 // Do not seize the CodeCache lock here--if the caller has not 167 // already done so, we are going to lose bigtime, since the code 168 // cache will contain a garbage CodeBlob until the caller can 169 // run the constructor for the CodeBlob subclass he is busy 170 // instantiating. 171 guarantee(size >= 0, "allocation request must be reasonable"); 172 assert_locked_or_safepoint(CodeCache_lock); 173 CodeBlob* cb = NULL; 174 _number_of_blobs++; 175 while (true) { 176 cb = (CodeBlob*)_heap->allocate(size); 177 if (cb != NULL) break; 178 if (!_heap->expand_by(CodeCacheExpansionSize)) { 179 // Expansion failed 180 return NULL; 181 } 182 if (PrintCodeCacheExtension) { 183 ResourceMark rm; 184 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", 185 (intptr_t)_heap->begin(), (intptr_t)_heap->end(), 186 (address)_heap->end() - (address)_heap->begin()); 187 } 188 } 189 verify_if_often(); 190 print_trace("allocation", cb, size); 191 return cb; 192 } 193 194 void CodeCache::free(CodeBlob* cb) { 195 assert_locked_or_safepoint(CodeCache_lock); 196 verify_if_often(); 197 198 print_trace("free", cb); 199 if (cb->is_nmethod()) { 200 _number_of_nmethods--; 201 if (((nmethod *)cb)->has_dependencies()) { 202 _number_of_nmethods_with_dependencies--; 203 } 204 } 205 if (cb->is_adapter_blob()) { 206 _number_of_adapters--; 207 } 208 _number_of_blobs--; 209 210 _heap->deallocate(cb); 211 212 verify_if_often(); 213 assert(_number_of_blobs >= 0, "sanity check"); 214 } 215 216 217 void CodeCache::commit(CodeBlob* cb) { 218 // this is called by nmethod::nmethod, which must already own CodeCache_lock 219 assert_locked_or_safepoint(CodeCache_lock); 220 if (cb->is_nmethod()) { 221 _number_of_nmethods++; 222 if (((nmethod *)cb)->has_dependencies()) { 223 _number_of_nmethods_with_dependencies++; 224 } 225 } 226 if (cb->is_adapter_blob()) { 227 _number_of_adapters++; 228 } 229 230 // flush the hardware I-cache 231 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 232 } 233 234 235 void CodeCache::flush() { 236 assert_locked_or_safepoint(CodeCache_lock); 237 Unimplemented(); 238 } 239 240 241 // Iteration over CodeBlobs 242 243 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) 244 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) 245 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) 246 247 248 bool CodeCache::contains(void *p) { 249 // It should be ok to call contains without holding a lock 250 return _heap->contains(p); 251 } 252 253 254 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not 255 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain 256 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 257 CodeBlob* CodeCache::find_blob(void* start) { 258 CodeBlob* result = find_blob_unsafe(start); 259 if (result == NULL) return NULL; 260 // We could potientially look up non_entrant methods 261 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 262 return result; 263 } 264 265 nmethod* CodeCache::find_nmethod(void* start) { 266 CodeBlob *cb = find_blob(start); 267 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); 268 return (nmethod*)cb; 269 } 270 271 272 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 273 assert_locked_or_safepoint(CodeCache_lock); 274 FOR_ALL_BLOBS(p) { 275 f(p); 276 } 277 } 278 279 280 void CodeCache::nmethods_do(void f(nmethod* nm)) { 281 assert_locked_or_safepoint(CodeCache_lock); 282 FOR_ALL_BLOBS(nm) { 283 if (nm->is_nmethod()) f((nmethod*)nm); 284 } 285 } 286 287 288 int CodeCache::alignment_unit() { 289 return (int)_heap->alignment_unit(); 290 } 291 292 293 int CodeCache::alignment_offset() { 294 return (int)_heap->alignment_offset(); 295 } 296 297 298 // Mark nmethods for unloading if they contain otherwise unreachable 299 // oops. 300 void CodeCache::do_unloading(BoolObjectClosure* is_alive, 301 OopClosure* keep_alive, 302 bool unloading_occurred) { 303 assert_locked_or_safepoint(CodeCache_lock); 304 FOR_ALL_ALIVE_NMETHODS(nm) { 305 nm->do_unloading(is_alive, keep_alive, unloading_occurred); 306 } 307 } 308 309 void CodeCache::blobs_do(CodeBlobClosure* f) { 310 assert_locked_or_safepoint(CodeCache_lock); 311 FOR_ALL_ALIVE_BLOBS(cb) { 312 f->do_code_blob(cb); 313 314 #ifdef ASSERT 315 if (cb->is_nmethod()) 316 ((nmethod*)cb)->verify_scavenge_root_oops(); 317 #endif //ASSERT 318 } 319 } 320 321 // Walk the list of methods which might contain non-perm oops. 322 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 323 assert_locked_or_safepoint(CodeCache_lock); 324 debug_only(mark_scavenge_root_nmethods()); 325 326 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 327 debug_only(cur->clear_scavenge_root_marked()); 328 assert(cur->scavenge_root_not_marked(), ""); 329 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 330 331 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 332 #ifndef PRODUCT 333 if (TraceScavenge) { 334 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 335 } 336 #endif //PRODUCT 337 if (is_live) { 338 // Perform cur->oops_do(f), maybe just once per nmethod. 339 f->do_code_blob(cur); 340 } 341 } 342 343 // Check for stray marks. 344 debug_only(verify_perm_nmethods(NULL)); 345 } 346 347 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 348 assert_locked_or_safepoint(CodeCache_lock); 349 nm->set_on_scavenge_root_list(); 350 nm->set_scavenge_root_link(_scavenge_root_nmethods); 351 set_scavenge_root_nmethods(nm); 352 print_trace("add_scavenge_root", nm); 353 } 354 355 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 356 assert_locked_or_safepoint(CodeCache_lock); 357 print_trace("drop_scavenge_root", nm); 358 nmethod* last = NULL; 359 nmethod* cur = scavenge_root_nmethods(); 360 while (cur != NULL) { 361 nmethod* next = cur->scavenge_root_link(); 362 if (cur == nm) { 363 if (last != NULL) 364 last->set_scavenge_root_link(next); 365 else set_scavenge_root_nmethods(next); 366 nm->set_scavenge_root_link(NULL); 367 nm->clear_on_scavenge_root_list(); 368 return; 369 } 370 last = cur; 371 cur = next; 372 } 373 assert(false, "should have been on list"); 374 } 375 376 void CodeCache::prune_scavenge_root_nmethods() { 377 assert_locked_or_safepoint(CodeCache_lock); 378 debug_only(mark_scavenge_root_nmethods()); 379 380 nmethod* last = NULL; 381 nmethod* cur = scavenge_root_nmethods(); 382 while (cur != NULL) { 383 nmethod* next = cur->scavenge_root_link(); 384 debug_only(cur->clear_scavenge_root_marked()); 385 assert(cur->scavenge_root_not_marked(), ""); 386 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 387 388 if (!cur->is_zombie() && !cur->is_unloaded() 389 && cur->detect_scavenge_root_oops()) { 390 // Keep it. Advance 'last' to prevent deletion. 391 last = cur; 392 } else { 393 // Prune it from the list, so we don't have to look at it any more. 394 print_trace("prune_scavenge_root", cur); 395 cur->set_scavenge_root_link(NULL); 396 cur->clear_on_scavenge_root_list(); 397 if (last != NULL) 398 last->set_scavenge_root_link(next); 399 else set_scavenge_root_nmethods(next); 400 } 401 cur = next; 402 } 403 404 // Check for stray marks. 405 debug_only(verify_perm_nmethods(NULL)); 406 } 407 408 #ifndef PRODUCT 409 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 410 // While we are here, verify the integrity of the list. 411 mark_scavenge_root_nmethods(); 412 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 413 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 414 cur->clear_scavenge_root_marked(); 415 } 416 verify_perm_nmethods(f); 417 } 418 419 // Temporarily mark nmethods that are claimed to be on the non-perm list. 420 void CodeCache::mark_scavenge_root_nmethods() { 421 FOR_ALL_ALIVE_BLOBS(cb) { 422 if (cb->is_nmethod()) { 423 nmethod *nm = (nmethod*)cb; 424 assert(nm->scavenge_root_not_marked(), "clean state"); 425 if (nm->on_scavenge_root_list()) 426 nm->set_scavenge_root_marked(); 427 } 428 } 429 } 430 431 // If the closure is given, run it on the unlisted nmethods. 432 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 433 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 434 FOR_ALL_ALIVE_BLOBS(cb) { 435 bool call_f = (f_or_null != NULL); 436 if (cb->is_nmethod()) { 437 nmethod *nm = (nmethod*)cb; 438 assert(nm->scavenge_root_not_marked(), "must be already processed"); 439 if (nm->on_scavenge_root_list()) 440 call_f = false; // don't show this one to the client 441 nm->verify_scavenge_root_oops(); 442 } else { 443 call_f = false; // not an nmethod 444 } 445 if (call_f) f_or_null->do_code_blob(cb); 446 } 447 } 448 #endif //PRODUCT 449 450 451 nmethod* CodeCache::find_and_remove_saved_code(methodOop m) { 452 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 453 nmethod* saved = _saved_nmethods; 454 nmethod* prev = NULL; 455 while (saved != NULL) { 456 if (saved->is_in_use() && saved->method() == m) { 457 if (prev != NULL) { 458 prev->set_saved_nmethod_link(saved->saved_nmethod_link()); 459 } else { 460 _saved_nmethods = saved->saved_nmethod_link(); 461 } 462 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods"); 463 saved->set_speculatively_disconnected(false); 464 saved->set_saved_nmethod_link(NULL); 465 if (PrintMethodFlushing) { 466 saved->print_on(tty, " ### nmethod is reconnected\n"); 467 } 468 if (LogCompilation && (xtty != NULL)) { 469 ttyLocker ttyl; 470 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id()); 471 xtty->method(methodOop(m)); 472 xtty->stamp(); 473 xtty->end_elem(); 474 } 475 return saved; 476 } 477 prev = saved; 478 saved = saved->saved_nmethod_link(); 479 } 480 return NULL; 481 } 482 483 void CodeCache::remove_saved_code(nmethod* nm) { 484 // For conc swpr this will be called with CodeCache_lock taken by caller 485 assert_locked_or_safepoint(CodeCache_lock); 486 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods"); 487 nmethod* saved = _saved_nmethods; 488 nmethod* prev = NULL; 489 while (saved != NULL) { 490 if (saved == nm) { 491 if (prev != NULL) { 492 prev->set_saved_nmethod_link(saved->saved_nmethod_link()); 493 } else { 494 _saved_nmethods = saved->saved_nmethod_link(); 495 } 496 if (LogCompilation && (xtty != NULL)) { 497 ttyLocker ttyl; 498 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id()); 499 xtty->stamp(); 500 xtty->end_elem(); 501 } 502 return; 503 } 504 prev = saved; 505 saved = saved->saved_nmethod_link(); 506 } 507 ShouldNotReachHere(); 508 } 509 510 void CodeCache::speculatively_disconnect(nmethod* nm) { 511 assert_locked_or_safepoint(CodeCache_lock); 512 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods"); 513 nm->set_saved_nmethod_link(_saved_nmethods); 514 _saved_nmethods = nm; 515 if (PrintMethodFlushing) { 516 nm->print_on(tty, " ### nmethod is speculatively disconnected\n"); 517 } 518 if (LogCompilation && (xtty != NULL)) { 519 ttyLocker ttyl; 520 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id()); 521 xtty->method(methodOop(nm->method())); 522 xtty->stamp(); 523 xtty->end_elem(); 524 } 525 nm->method()->clear_code(); 526 nm->set_speculatively_disconnected(true); 527 } 528 529 530 void CodeCache::gc_prologue() { 531 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); 532 } 533 534 535 void CodeCache::gc_epilogue() { 536 assert_locked_or_safepoint(CodeCache_lock); 537 FOR_ALL_ALIVE_BLOBS(cb) { 538 if (cb->is_nmethod()) { 539 nmethod *nm = (nmethod*)cb; 540 assert(!nm->is_unloaded(), "Tautology"); 541 if (needs_cache_clean()) { 542 nm->cleanup_inline_caches(); 543 } 544 DEBUG_ONLY(nm->verify()); 545 nm->fix_oop_relocations(); 546 } 547 } 548 set_needs_cache_clean(false); 549 prune_scavenge_root_nmethods(); 550 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); 551 } 552 553 554 address CodeCache::first_address() { 555 assert_locked_or_safepoint(CodeCache_lock); 556 return (address)_heap->begin(); 557 } 558 559 560 address CodeCache::last_address() { 561 assert_locked_or_safepoint(CodeCache_lock); 562 return (address)_heap->end(); 563 } 564 565 566 void icache_init(); 567 568 void CodeCache::initialize() { 569 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 570 #ifdef COMPILER2 571 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 572 #endif 573 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 574 // This was originally just a check of the alignment, causing failure, instead, round 575 // the code cache to the page size. In particular, Solaris is moving to a larger 576 // default page size. 577 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 578 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); 579 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); 580 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { 581 vm_exit_during_initialization("Could not reserve enough space for code cache"); 582 } 583 584 MemoryService::add_code_heap_memory_pool(_heap); 585 586 // Initialize ICache flush mechanism 587 // This service is needed for os::register_code_area 588 icache_init(); 589 590 // Give OS a chance to register generated code area. 591 // This is used on Windows 64 bit platforms to register 592 // Structured Exception Handlers for our generated code. 593 os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); 594 } 595 596 597 void codeCache_init() { 598 CodeCache::initialize(); 599 } 600 601 //------------------------------------------------------------------------------------------------ 602 603 int CodeCache::number_of_nmethods_with_dependencies() { 604 return _number_of_nmethods_with_dependencies; 605 } 606 607 void CodeCache::clear_inline_caches() { 608 assert_locked_or_safepoint(CodeCache_lock); 609 FOR_ALL_ALIVE_NMETHODS(nm) { 610 nm->clear_inline_caches(); 611 } 612 } 613 614 #ifndef PRODUCT 615 // used to keep track of how much time is spent in mark_for_deoptimization 616 static elapsedTimer dependentCheckTime; 617 static int dependentCheckCount = 0; 618 #endif // PRODUCT 619 620 621 int CodeCache::mark_for_deoptimization(DepChange& changes) { 622 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 623 624 #ifndef PRODUCT 625 dependentCheckTime.start(); 626 dependentCheckCount++; 627 #endif // PRODUCT 628 629 int number_of_marked_CodeBlobs = 0; 630 631 // search the hierarchy looking for nmethods which are affected by the loading of this class 632 633 // then search the interfaces this class implements looking for nmethods 634 // which might be dependent of the fact that an interface only had one 635 // implementor. 636 637 { No_Safepoint_Verifier nsv; 638 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 639 klassOop d = str.klass(); 640 number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes); 641 } 642 } 643 644 if (VerifyDependencies) { 645 // Turn off dependency tracing while actually testing deps. 646 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); 647 FOR_ALL_ALIVE_NMETHODS(nm) { 648 if (!nm->is_marked_for_deoptimization() && 649 nm->check_all_dependencies()) { 650 ResourceMark rm; 651 tty->print_cr("Should have been marked for deoptimization:"); 652 changes.print(); 653 nm->print(); 654 nm->print_dependencies(); 655 } 656 } 657 } 658 659 #ifndef PRODUCT 660 dependentCheckTime.stop(); 661 #endif // PRODUCT 662 663 return number_of_marked_CodeBlobs; 664 } 665 666 667 #ifdef HOTSWAP 668 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 669 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 670 int number_of_marked_CodeBlobs = 0; 671 672 // Deoptimize all methods of the evolving class itself 673 objArrayOop old_methods = dependee->methods(); 674 for (int i = 0; i < old_methods->length(); i++) { 675 ResourceMark rm; 676 methodOop old_method = (methodOop) old_methods->obj_at(i); 677 nmethod *nm = old_method->code(); 678 if (nm != NULL) { 679 nm->mark_for_deoptimization(); 680 number_of_marked_CodeBlobs++; 681 } 682 } 683 684 FOR_ALL_ALIVE_NMETHODS(nm) { 685 if (nm->is_marked_for_deoptimization()) { 686 // ...Already marked in the previous pass; don't count it again. 687 } else if (nm->is_evol_dependent_on(dependee())) { 688 ResourceMark rm; 689 nm->mark_for_deoptimization(); 690 number_of_marked_CodeBlobs++; 691 } else { 692 // flush caches in case they refer to a redefined methodOop 693 nm->clear_inline_caches(); 694 } 695 } 696 697 return number_of_marked_CodeBlobs; 698 } 699 #endif // HOTSWAP 700 701 702 // Deoptimize all methods 703 void CodeCache::mark_all_nmethods_for_deoptimization() { 704 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 705 FOR_ALL_ALIVE_NMETHODS(nm) { 706 nm->mark_for_deoptimization(); 707 } 708 } 709 710 711 int CodeCache::mark_for_deoptimization(methodOop dependee) { 712 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 713 int number_of_marked_CodeBlobs = 0; 714 715 FOR_ALL_ALIVE_NMETHODS(nm) { 716 if (nm->is_dependent_on_method(dependee)) { 717 ResourceMark rm; 718 nm->mark_for_deoptimization(); 719 number_of_marked_CodeBlobs++; 720 } 721 } 722 723 return number_of_marked_CodeBlobs; 724 } 725 726 void CodeCache::make_marked_nmethods_zombies() { 727 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 728 FOR_ALL_ALIVE_NMETHODS(nm) { 729 if (nm->is_marked_for_deoptimization()) { 730 731 // If the nmethod has already been made non-entrant and it can be converted 732 // then zombie it now. Otherwise make it non-entrant and it will eventually 733 // be zombied when it is no longer seen on the stack. Note that the nmethod 734 // might be "entrant" and not on the stack and so could be zombied immediately 735 // but we can't tell because we don't track it on stack until it becomes 736 // non-entrant. 737 738 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 739 nm->make_zombie(); 740 } else { 741 nm->make_not_entrant(); 742 } 743 } 744 } 745 } 746 747 void CodeCache::make_marked_nmethods_not_entrant() { 748 assert_locked_or_safepoint(CodeCache_lock); 749 FOR_ALL_ALIVE_NMETHODS(nm) { 750 if (nm->is_marked_for_deoptimization()) { 751 nm->make_not_entrant(); 752 } 753 } 754 } 755 756 void CodeCache::verify() { 757 _heap->verify(); 758 FOR_ALL_ALIVE_BLOBS(p) { 759 p->verify(); 760 } 761 } 762 763 //------------------------------------------------------------------------------------------------ 764 // Non-product version 765 766 #ifndef PRODUCT 767 768 void CodeCache::verify_if_often() { 769 if (VerifyCodeCacheOften) { 770 _heap->verify(); 771 } 772 } 773 774 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 775 if (PrintCodeCache2) { // Need to add a new flag 776 ResourceMark rm; 777 if (size == 0) size = cb->size(); 778 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size); 779 } 780 } 781 782 void CodeCache::print_internals() { 783 int nmethodCount = 0; 784 int runtimeStubCount = 0; 785 int adapterCount = 0; 786 int deoptimizationStubCount = 0; 787 int uncommonTrapStubCount = 0; 788 int bufferBlobCount = 0; 789 int total = 0; 790 int nmethodAlive = 0; 791 int nmethodNotEntrant = 0; 792 int nmethodZombie = 0; 793 int nmethodUnloaded = 0; 794 int nmethodJava = 0; 795 int nmethodNative = 0; 796 int maxCodeSize = 0; 797 ResourceMark rm; 798 799 CodeBlob *cb; 800 for (cb = first(); cb != NULL; cb = next(cb)) { 801 total++; 802 if (cb->is_nmethod()) { 803 nmethod* nm = (nmethod*)cb; 804 805 if (Verbose && nm->method() != NULL) { 806 ResourceMark rm; 807 char *method_name = nm->method()->name_and_sig_as_C_string(); 808 tty->print("%s", method_name); 809 if(nm->is_alive()) { tty->print_cr(" alive"); } 810 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 811 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 812 } 813 814 nmethodCount++; 815 816 if(nm->is_alive()) { nmethodAlive++; } 817 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 818 if(nm->is_zombie()) { nmethodZombie++; } 819 if(nm->is_unloaded()) { nmethodUnloaded++; } 820 if(nm->is_native_method()) { nmethodNative++; } 821 822 if(nm->method() != NULL && nm->is_java_method()) { 823 nmethodJava++; 824 if (nm->insts_size() > maxCodeSize) { 825 maxCodeSize = nm->insts_size(); 826 } 827 } 828 } else if (cb->is_runtime_stub()) { 829 runtimeStubCount++; 830 } else if (cb->is_deoptimization_stub()) { 831 deoptimizationStubCount++; 832 } else if (cb->is_uncommon_trap_stub()) { 833 uncommonTrapStubCount++; 834 } else if (cb->is_adapter_blob()) { 835 adapterCount++; 836 } else if (cb->is_buffer_blob()) { 837 bufferBlobCount++; 838 } 839 } 840 841 int bucketSize = 512; 842 int bucketLimit = maxCodeSize / bucketSize + 1; 843 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit); 844 memset(buckets,0,sizeof(int) * bucketLimit); 845 846 for (cb = first(); cb != NULL; cb = next(cb)) { 847 if (cb->is_nmethod()) { 848 nmethod* nm = (nmethod*)cb; 849 if(nm->is_java_method()) { 850 buckets[nm->insts_size() / bucketSize]++; 851 } 852 } 853 } 854 tty->print_cr("Code Cache Entries (total of %d)",total); 855 tty->print_cr("-------------------------------------------------"); 856 tty->print_cr("nmethods: %d",nmethodCount); 857 tty->print_cr("\talive: %d",nmethodAlive); 858 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 859 tty->print_cr("\tzombie: %d",nmethodZombie); 860 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 861 tty->print_cr("\tjava: %d",nmethodJava); 862 tty->print_cr("\tnative: %d",nmethodNative); 863 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 864 tty->print_cr("adapters: %d",adapterCount); 865 tty->print_cr("buffer blobs: %d",bufferBlobCount); 866 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 867 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 868 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 869 tty->print_cr("-------------------------------------------------"); 870 871 for(int i=0; i<bucketLimit; i++) { 872 if(buckets[i] != 0) { 873 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 874 tty->fill_to(40); 875 tty->print_cr("%d",buckets[i]); 876 } 877 } 878 879 FREE_C_HEAP_ARRAY(int, buckets); 880 } 881 882 void CodeCache::print() { 883 CodeBlob_sizes live; 884 CodeBlob_sizes dead; 885 886 FOR_ALL_BLOBS(p) { 887 if (!p->is_alive()) { 888 dead.add(p); 889 } else { 890 live.add(p); 891 } 892 } 893 894 tty->print_cr("CodeCache:"); 895 896 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(), 897 dependentCheckTime.seconds() / dependentCheckCount); 898 899 if (!live.is_empty()) { 900 live.print("live"); 901 } 902 if (!dead.is_empty()) { 903 dead.print("dead"); 904 } 905 906 907 if (Verbose) { 908 // print the oop_map usage 909 int code_size = 0; 910 int number_of_blobs = 0; 911 int number_of_oop_maps = 0; 912 int map_size = 0; 913 FOR_ALL_BLOBS(p) { 914 if (p->is_alive()) { 915 number_of_blobs++; 916 code_size += p->code_size(); 917 OopMapSet* set = p->oop_maps(); 918 if (set != NULL) { 919 number_of_oop_maps += set->size(); 920 map_size += set->heap_size(); 921 } 922 } 923 } 924 tty->print_cr("OopMaps"); 925 tty->print_cr(" #blobs = %d", number_of_blobs); 926 tty->print_cr(" code size = %d", code_size); 927 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 928 tty->print_cr(" map size = %d", map_size); 929 } 930 931 } 932 933 #endif // PRODUCT 934 935 void CodeCache::print_bounds(outputStream* st) { 936 st->print_cr("Code Cache [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 937 _heap->low_boundary(), 938 _heap->high(), 939 _heap->high_boundary()); 940 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 941 " adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT "Kb" 942 " largest_free_block=" SIZE_FORMAT, 943 nof_blobs(), nof_nmethods(), nof_adapters(), 944 unallocated_capacity()/K, largest_free_block()); 945 } 946 947 void CodeCache::log_state(outputStream* st) { 948 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 949 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'" 950 " largest_free_block='" SIZE_FORMAT "'", 951 nof_blobs(), nof_nmethods(), nof_adapters(), 952 unallocated_capacity(), largest_free_block()); 953 }