1 /* 2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_codeCache.cpp.incl" 27 28 // Helper class for printing in CodeCache 29 30 class CodeBlob_sizes { 31 private: 32 int count; 33 int total_size; 34 int header_size; 35 int code_size; 36 int stub_size; 37 int relocation_size; 38 int scopes_oop_size; 39 int scopes_data_size; 40 int scopes_pcs_size; 41 42 public: 43 CodeBlob_sizes() { 44 count = 0; 45 total_size = 0; 46 header_size = 0; 47 code_size = 0; 48 stub_size = 0; 49 relocation_size = 0; 50 scopes_oop_size = 0; 51 scopes_data_size = 0; 52 scopes_pcs_size = 0; 53 } 54 55 int total() { return total_size; } 56 bool is_empty() { return count == 0; } 57 58 void print(const char* title) { 59 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 60 count, 61 title, 62 total() / K, 63 header_size * 100 / total_size, 64 relocation_size * 100 / total_size, 65 code_size * 100 / total_size, 66 stub_size * 100 / total_size, 67 scopes_oop_size * 100 / total_size, 68 scopes_data_size * 100 / total_size, 69 scopes_pcs_size * 100 / total_size); 70 } 71 72 void add(CodeBlob* cb) { 73 count++; 74 total_size += cb->size(); 75 header_size += cb->header_size(); 76 relocation_size += cb->relocation_size(); 77 if (cb->is_nmethod()) { 78 nmethod* nm = cb->as_nmethod_or_null(); 79 code_size += nm->code_size(); 80 stub_size += nm->stub_size(); 81 82 scopes_oop_size += nm->oops_size(); 83 scopes_data_size += nm->scopes_data_size(); 84 scopes_pcs_size += nm->scopes_pcs_size(); 85 } else { 86 code_size += cb->instructions_size(); 87 } 88 } 89 }; 90 91 92 // CodeCache implementation 93 94 CodeHeap * CodeCache::_heap = new CodeHeap(); 95 int CodeCache::_number_of_blobs = 0; 96 int CodeCache::_number_of_nmethods_with_dependencies = 0; 97 bool CodeCache::_needs_cache_clean = false; 98 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 99 nmethod* CodeCache::_saved_nmethods = NULL; 100 101 102 CodeBlob* CodeCache::first() { 103 assert_locked_or_safepoint(CodeCache_lock); 104 return (CodeBlob*)_heap->first(); 105 } 106 107 108 CodeBlob* CodeCache::next(CodeBlob* cb) { 109 assert_locked_or_safepoint(CodeCache_lock); 110 return (CodeBlob*)_heap->next(cb); 111 } 112 113 114 CodeBlob* CodeCache::alive(CodeBlob *cb) { 115 assert_locked_or_safepoint(CodeCache_lock); 116 while (cb != NULL && !cb->is_alive()) cb = next(cb); 117 return cb; 118 } 119 120 121 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { 122 assert_locked_or_safepoint(CodeCache_lock); 123 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); 124 return (nmethod*)cb; 125 } 126 127 128 CodeBlob* CodeCache::allocate(int size) { 129 // Do not seize the CodeCache lock here--if the caller has not 130 // already done so, we are going to lose bigtime, since the code 131 // cache will contain a garbage CodeBlob until the caller can 132 // run the constructor for the CodeBlob subclass he is busy 133 // instantiating. 134 guarantee(size >= 0, "allocation request must be reasonable"); 135 assert_locked_or_safepoint(CodeCache_lock); 136 CodeBlob* cb = NULL; 137 _number_of_blobs++; 138 while (true) { 139 cb = (CodeBlob*)_heap->allocate(size); 140 if (cb != NULL) break; 141 if (!_heap->expand_by(CodeCacheExpansionSize)) { 142 // Expansion failed 143 return NULL; 144 } 145 if (PrintCodeCacheExtension) { 146 ResourceMark rm; 147 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", 148 (intptr_t)_heap->begin(), (intptr_t)_heap->end(), 149 (address)_heap->end() - (address)_heap->begin()); 150 } 151 } 152 verify_if_often(); 153 print_trace("allocation", cb, size); 154 return cb; 155 } 156 157 void CodeCache::free(CodeBlob* cb) { 158 assert_locked_or_safepoint(CodeCache_lock); 159 verify_if_often(); 160 161 print_trace("free", cb); 162 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { 163 _number_of_nmethods_with_dependencies--; 164 } 165 _number_of_blobs--; 166 167 _heap->deallocate(cb); 168 169 verify_if_often(); 170 assert(_number_of_blobs >= 0, "sanity check"); 171 } 172 173 174 void CodeCache::commit(CodeBlob* cb) { 175 // this is called by nmethod::nmethod, which must already own CodeCache_lock 176 assert_locked_or_safepoint(CodeCache_lock); 177 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { 178 _number_of_nmethods_with_dependencies++; 179 } 180 // flush the hardware I-cache 181 ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size()); 182 } 183 184 185 void CodeCache::flush() { 186 assert_locked_or_safepoint(CodeCache_lock); 187 Unimplemented(); 188 } 189 190 191 // Iteration over CodeBlobs 192 193 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) 194 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) 195 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) 196 197 198 bool CodeCache::contains(void *p) { 199 // It should be ok to call contains without holding a lock 200 return _heap->contains(p); 201 } 202 203 204 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not 205 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain 206 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 207 CodeBlob* CodeCache::find_blob(void* start) { 208 CodeBlob* result = find_blob_unsafe(start); 209 if (result == NULL) return NULL; 210 // We could potientially look up non_entrant methods 211 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 212 return result; 213 } 214 215 nmethod* CodeCache::find_nmethod(void* start) { 216 CodeBlob *cb = find_blob(start); 217 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); 218 return (nmethod*)cb; 219 } 220 221 222 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 223 assert_locked_or_safepoint(CodeCache_lock); 224 FOR_ALL_BLOBS(p) { 225 f(p); 226 } 227 } 228 229 230 void CodeCache::nmethods_do(void f(nmethod* nm)) { 231 assert_locked_or_safepoint(CodeCache_lock); 232 FOR_ALL_BLOBS(nm) { 233 if (nm->is_nmethod()) f((nmethod*)nm); 234 } 235 } 236 237 238 int CodeCache::alignment_unit() { 239 return (int)_heap->alignment_unit(); 240 } 241 242 243 int CodeCache::alignment_offset() { 244 return (int)_heap->alignment_offset(); 245 } 246 247 248 // Mark code blobs for unloading if they contain otherwise 249 // unreachable oops. 250 void CodeCache::do_unloading(BoolObjectClosure* is_alive, 251 OopClosure* keep_alive, 252 bool unloading_occurred) { 253 assert_locked_or_safepoint(CodeCache_lock); 254 FOR_ALL_ALIVE_BLOBS(cb) { 255 cb->do_unloading(is_alive, keep_alive, unloading_occurred); 256 } 257 } 258 259 void CodeCache::blobs_do(CodeBlobClosure* f) { 260 assert_locked_or_safepoint(CodeCache_lock); 261 FOR_ALL_ALIVE_BLOBS(cb) { 262 f->do_code_blob(cb); 263 264 #ifdef ASSERT 265 if (cb->is_nmethod()) 266 ((nmethod*)cb)->verify_scavenge_root_oops(); 267 #endif //ASSERT 268 } 269 } 270 271 // Walk the list of methods which might contain non-perm oops. 272 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 273 assert_locked_or_safepoint(CodeCache_lock); 274 debug_only(mark_scavenge_root_nmethods()); 275 276 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 277 debug_only(cur->clear_scavenge_root_marked()); 278 assert(cur->scavenge_root_not_marked(), ""); 279 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 280 281 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 282 #ifndef PRODUCT 283 if (TraceScavenge) { 284 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 285 } 286 #endif //PRODUCT 287 if (is_live) { 288 // Perform cur->oops_do(f), maybe just once per nmethod. 289 f->do_code_blob(cur); 290 cur->fix_oop_relocations(); 291 } 292 } 293 294 // Check for stray marks. 295 debug_only(verify_perm_nmethods(NULL)); 296 } 297 298 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 299 assert_locked_or_safepoint(CodeCache_lock); 300 nm->set_on_scavenge_root_list(); 301 nm->set_scavenge_root_link(_scavenge_root_nmethods); 302 set_scavenge_root_nmethods(nm); 303 print_trace("add_scavenge_root", nm); 304 } 305 306 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 307 assert_locked_or_safepoint(CodeCache_lock); 308 print_trace("drop_scavenge_root", nm); 309 nmethod* last = NULL; 310 nmethod* cur = scavenge_root_nmethods(); 311 while (cur != NULL) { 312 nmethod* next = cur->scavenge_root_link(); 313 if (cur == nm) { 314 if (last != NULL) 315 last->set_scavenge_root_link(next); 316 else set_scavenge_root_nmethods(next); 317 nm->set_scavenge_root_link(NULL); 318 nm->clear_on_scavenge_root_list(); 319 return; 320 } 321 last = cur; 322 cur = next; 323 } 324 assert(false, "should have been on list"); 325 } 326 327 void CodeCache::prune_scavenge_root_nmethods() { 328 assert_locked_or_safepoint(CodeCache_lock); 329 debug_only(mark_scavenge_root_nmethods()); 330 331 nmethod* last = NULL; 332 nmethod* cur = scavenge_root_nmethods(); 333 while (cur != NULL) { 334 nmethod* next = cur->scavenge_root_link(); 335 debug_only(cur->clear_scavenge_root_marked()); 336 assert(cur->scavenge_root_not_marked(), ""); 337 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 338 339 if (!cur->is_zombie() && !cur->is_unloaded() 340 && cur->detect_scavenge_root_oops()) { 341 // Keep it. Advance 'last' to prevent deletion. 342 last = cur; 343 } else { 344 // Prune it from the list, so we don't have to look at it any more. 345 print_trace("prune_scavenge_root", cur); 346 cur->set_scavenge_root_link(NULL); 347 cur->clear_on_scavenge_root_list(); 348 if (last != NULL) 349 last->set_scavenge_root_link(next); 350 else set_scavenge_root_nmethods(next); 351 } 352 cur = next; 353 } 354 355 // Check for stray marks. 356 debug_only(verify_perm_nmethods(NULL)); 357 } 358 359 #ifndef PRODUCT 360 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 361 // While we are here, verify the integrity of the list. 362 mark_scavenge_root_nmethods(); 363 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 364 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 365 cur->clear_scavenge_root_marked(); 366 } 367 verify_perm_nmethods(f); 368 } 369 370 // Temporarily mark nmethods that are claimed to be on the non-perm list. 371 void CodeCache::mark_scavenge_root_nmethods() { 372 FOR_ALL_ALIVE_BLOBS(cb) { 373 if (cb->is_nmethod()) { 374 nmethod *nm = (nmethod*)cb; 375 assert(nm->scavenge_root_not_marked(), "clean state"); 376 if (nm->on_scavenge_root_list()) 377 nm->set_scavenge_root_marked(); 378 } 379 } 380 } 381 382 // If the closure is given, run it on the unlisted nmethods. 383 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 384 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 385 FOR_ALL_ALIVE_BLOBS(cb) { 386 bool call_f = (f_or_null != NULL); 387 if (cb->is_nmethod()) { 388 nmethod *nm = (nmethod*)cb; 389 assert(nm->scavenge_root_not_marked(), "must be already processed"); 390 if (nm->on_scavenge_root_list()) 391 call_f = false; // don't show this one to the client 392 nm->verify_scavenge_root_oops(); 393 } else { 394 call_f = false; // not an nmethod 395 } 396 if (call_f) f_or_null->do_code_blob(cb); 397 } 398 } 399 #endif //PRODUCT 400 401 402 nmethod* CodeCache::find_and_remove_saved_code(methodOop m) { 403 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 404 nmethod* saved = _saved_nmethods; 405 nmethod* prev = NULL; 406 while (saved != NULL) { 407 if (saved->is_in_use() && saved->method() == m) { 408 if (prev != NULL) { 409 prev->set_saved_nmethod_link(saved->saved_nmethod_link()); 410 } else { 411 _saved_nmethods = saved->saved_nmethod_link(); 412 } 413 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods"); 414 saved->set_speculatively_disconnected(false); 415 saved->set_saved_nmethod_link(NULL); 416 if (PrintMethodFlushing) { 417 saved->print_on(tty, " ### nmethod is reconnected"); 418 } 419 if (LogCompilation && (xtty != NULL)) { 420 ttyLocker ttyl; 421 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id()); 422 xtty->method(methodOop(m)); 423 xtty->stamp(); 424 xtty->end_elem(); 425 } 426 return saved; 427 } 428 prev = saved; 429 saved = saved->saved_nmethod_link(); 430 } 431 return NULL; 432 } 433 434 void CodeCache::remove_saved_code(nmethod* nm) { 435 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 436 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods"); 437 nmethod* saved = _saved_nmethods; 438 nmethod* prev = NULL; 439 while (saved != NULL) { 440 if (saved == nm) { 441 if (prev != NULL) { 442 prev->set_saved_nmethod_link(saved->saved_nmethod_link()); 443 } else { 444 _saved_nmethods = saved->saved_nmethod_link(); 445 } 446 if (LogCompilation && (xtty != NULL)) { 447 ttyLocker ttyl; 448 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id()); 449 xtty->stamp(); 450 xtty->end_elem(); 451 } 452 return; 453 } 454 prev = saved; 455 saved = saved->saved_nmethod_link(); 456 } 457 ShouldNotReachHere(); 458 } 459 460 void CodeCache::speculatively_disconnect(nmethod* nm) { 461 assert_locked_or_safepoint(CodeCache_lock); 462 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods"); 463 nm->set_saved_nmethod_link(_saved_nmethods); 464 _saved_nmethods = nm; 465 if (PrintMethodFlushing) { 466 nm->print_on(tty, " ### nmethod is speculatively disconnected"); 467 } 468 if (LogCompilation && (xtty != NULL)) { 469 ttyLocker ttyl; 470 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id()); 471 xtty->method(methodOop(nm->method())); 472 xtty->stamp(); 473 xtty->end_elem(); 474 } 475 nm->method()->clear_code(); 476 nm->set_speculatively_disconnected(true); 477 } 478 479 480 void CodeCache::gc_prologue() { 481 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); 482 } 483 484 485 void CodeCache::gc_epilogue() { 486 assert_locked_or_safepoint(CodeCache_lock); 487 FOR_ALL_ALIVE_BLOBS(cb) { 488 if (cb->is_nmethod()) { 489 nmethod *nm = (nmethod*)cb; 490 assert(!nm->is_unloaded(), "Tautology"); 491 if (needs_cache_clean()) { 492 nm->cleanup_inline_caches(); 493 } 494 DEBUG_ONLY(nm->verify()); 495 nm->fix_oop_relocations(); 496 } 497 } 498 set_needs_cache_clean(false); 499 prune_scavenge_root_nmethods(); 500 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); 501 } 502 503 504 address CodeCache::first_address() { 505 assert_locked_or_safepoint(CodeCache_lock); 506 return (address)_heap->begin(); 507 } 508 509 510 address CodeCache::last_address() { 511 assert_locked_or_safepoint(CodeCache_lock); 512 return (address)_heap->end(); 513 } 514 515 516 void icache_init(); 517 518 void CodeCache::initialize() { 519 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 520 #ifdef COMPILER2 521 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 522 #endif 523 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 524 // This was originally just a check of the alignment, causing failure, instead, round 525 // the code cache to the page size. In particular, Solaris is moving to a larger 526 // default page size. 527 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 528 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); 529 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); 530 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { 531 vm_exit_during_initialization("Could not reserve enough space for code cache"); 532 } 533 534 MemoryService::add_code_heap_memory_pool(_heap); 535 536 // Initialize ICache flush mechanism 537 // This service is needed for os::register_code_area 538 icache_init(); 539 540 // Give OS a chance to register generated code area. 541 // This is used on Windows 64 bit platforms to register 542 // Structured Exception Handlers for our generated code. 543 os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); 544 } 545 546 547 void codeCache_init() { 548 CodeCache::initialize(); 549 } 550 551 //------------------------------------------------------------------------------------------------ 552 553 int CodeCache::number_of_nmethods_with_dependencies() { 554 return _number_of_nmethods_with_dependencies; 555 } 556 557 void CodeCache::clear_inline_caches() { 558 assert_locked_or_safepoint(CodeCache_lock); 559 FOR_ALL_ALIVE_NMETHODS(nm) { 560 nm->clear_inline_caches(); 561 } 562 } 563 564 #ifndef PRODUCT 565 // used to keep track of how much time is spent in mark_for_deoptimization 566 static elapsedTimer dependentCheckTime; 567 static int dependentCheckCount = 0; 568 #endif // PRODUCT 569 570 571 int CodeCache::mark_for_deoptimization(DepChange& changes) { 572 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 573 574 #ifndef PRODUCT 575 dependentCheckTime.start(); 576 dependentCheckCount++; 577 #endif // PRODUCT 578 579 int number_of_marked_CodeBlobs = 0; 580 581 // search the hierarchy looking for nmethods which are affected by the loading of this class 582 583 // then search the interfaces this class implements looking for nmethods 584 // which might be dependent of the fact that an interface only had one 585 // implementor. 586 587 { No_Safepoint_Verifier nsv; 588 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 589 klassOop d = str.klass(); 590 number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes); 591 } 592 } 593 594 if (VerifyDependencies) { 595 // Turn off dependency tracing while actually testing deps. 596 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); 597 FOR_ALL_ALIVE_NMETHODS(nm) { 598 if (!nm->is_marked_for_deoptimization() && 599 nm->check_all_dependencies()) { 600 ResourceMark rm; 601 tty->print_cr("Should have been marked for deoptimization:"); 602 changes.print(); 603 nm->print(); 604 nm->print_dependencies(); 605 } 606 } 607 } 608 609 #ifndef PRODUCT 610 dependentCheckTime.stop(); 611 #endif // PRODUCT 612 613 return number_of_marked_CodeBlobs; 614 } 615 616 617 #ifdef HOTSWAP 618 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 619 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 620 int number_of_marked_CodeBlobs = 0; 621 622 // Deoptimize all methods of the evolving class itself 623 objArrayOop old_methods = dependee->methods(); 624 for (int i = 0; i < old_methods->length(); i++) { 625 ResourceMark rm; 626 methodOop old_method = (methodOop) old_methods->obj_at(i); 627 nmethod *nm = old_method->code(); 628 if (nm != NULL) { 629 nm->mark_for_deoptimization(); 630 number_of_marked_CodeBlobs++; 631 } 632 } 633 634 FOR_ALL_ALIVE_NMETHODS(nm) { 635 if (nm->is_marked_for_deoptimization()) { 636 // ...Already marked in the previous pass; don't count it again. 637 } else if (nm->is_evol_dependent_on(dependee())) { 638 ResourceMark rm; 639 nm->mark_for_deoptimization(); 640 number_of_marked_CodeBlobs++; 641 } else { 642 // flush caches in case they refer to a redefined methodOop 643 nm->clear_inline_caches(); 644 } 645 } 646 647 return number_of_marked_CodeBlobs; 648 } 649 #endif // HOTSWAP 650 651 652 // Deoptimize all methods 653 void CodeCache::mark_all_nmethods_for_deoptimization() { 654 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 655 FOR_ALL_ALIVE_NMETHODS(nm) { 656 nm->mark_for_deoptimization(); 657 } 658 } 659 660 661 int CodeCache::mark_for_deoptimization(methodOop dependee) { 662 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 663 int number_of_marked_CodeBlobs = 0; 664 665 FOR_ALL_ALIVE_NMETHODS(nm) { 666 if (nm->is_dependent_on_method(dependee)) { 667 ResourceMark rm; 668 nm->mark_for_deoptimization(); 669 number_of_marked_CodeBlobs++; 670 } 671 } 672 673 return number_of_marked_CodeBlobs; 674 } 675 676 void CodeCache::make_marked_nmethods_zombies() { 677 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 678 FOR_ALL_ALIVE_NMETHODS(nm) { 679 if (nm->is_marked_for_deoptimization()) { 680 681 // If the nmethod has already been made non-entrant and it can be converted 682 // then zombie it now. Otherwise make it non-entrant and it will eventually 683 // be zombied when it is no longer seen on the stack. Note that the nmethod 684 // might be "entrant" and not on the stack and so could be zombied immediately 685 // but we can't tell because we don't track it on stack until it becomes 686 // non-entrant. 687 688 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 689 nm->make_zombie(); 690 } else { 691 nm->make_not_entrant(); 692 } 693 } 694 } 695 } 696 697 void CodeCache::make_marked_nmethods_not_entrant() { 698 assert_locked_or_safepoint(CodeCache_lock); 699 FOR_ALL_ALIVE_NMETHODS(nm) { 700 if (nm->is_marked_for_deoptimization()) { 701 nm->make_not_entrant(); 702 } 703 } 704 } 705 706 void CodeCache::verify() { 707 _heap->verify(); 708 FOR_ALL_ALIVE_BLOBS(p) { 709 p->verify(); 710 } 711 } 712 713 //------------------------------------------------------------------------------------------------ 714 // Non-product version 715 716 #ifndef PRODUCT 717 718 void CodeCache::verify_if_often() { 719 if (VerifyCodeCacheOften) { 720 _heap->verify(); 721 } 722 } 723 724 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 725 if (PrintCodeCache2) { // Need to add a new flag 726 ResourceMark rm; 727 if (size == 0) size = cb->size(); 728 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size); 729 } 730 } 731 732 void CodeCache::print_internals() { 733 int nmethodCount = 0; 734 int runtimeStubCount = 0; 735 int adapterCount = 0; 736 int deoptimizationStubCount = 0; 737 int uncommonTrapStubCount = 0; 738 int bufferBlobCount = 0; 739 int total = 0; 740 int nmethodAlive = 0; 741 int nmethodNotEntrant = 0; 742 int nmethodZombie = 0; 743 int nmethodUnloaded = 0; 744 int nmethodJava = 0; 745 int nmethodNative = 0; 746 int maxCodeSize = 0; 747 ResourceMark rm; 748 749 CodeBlob *cb; 750 for (cb = first(); cb != NULL; cb = next(cb)) { 751 total++; 752 if (cb->is_nmethod()) { 753 nmethod* nm = (nmethod*)cb; 754 755 if (Verbose && nm->method() != NULL) { 756 ResourceMark rm; 757 char *method_name = nm->method()->name_and_sig_as_C_string(); 758 tty->print("%s", method_name); 759 if(nm->is_alive()) { tty->print_cr(" alive"); } 760 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 761 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 762 } 763 764 nmethodCount++; 765 766 if(nm->is_alive()) { nmethodAlive++; } 767 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 768 if(nm->is_zombie()) { nmethodZombie++; } 769 if(nm->is_unloaded()) { nmethodUnloaded++; } 770 if(nm->is_native_method()) { nmethodNative++; } 771 772 if(nm->method() != NULL && nm->is_java_method()) { 773 nmethodJava++; 774 if(nm->code_size() > maxCodeSize) { 775 maxCodeSize = nm->code_size(); 776 } 777 } 778 } else if (cb->is_runtime_stub()) { 779 runtimeStubCount++; 780 } else if (cb->is_deoptimization_stub()) { 781 deoptimizationStubCount++; 782 } else if (cb->is_uncommon_trap_stub()) { 783 uncommonTrapStubCount++; 784 } else if (cb->is_adapter_blob()) { 785 adapterCount++; 786 } else if (cb->is_buffer_blob()) { 787 bufferBlobCount++; 788 } 789 } 790 791 int bucketSize = 512; 792 int bucketLimit = maxCodeSize / bucketSize + 1; 793 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit); 794 memset(buckets,0,sizeof(int) * bucketLimit); 795 796 for (cb = first(); cb != NULL; cb = next(cb)) { 797 if (cb->is_nmethod()) { 798 nmethod* nm = (nmethod*)cb; 799 if(nm->is_java_method()) { 800 buckets[nm->code_size() / bucketSize]++; 801 } 802 } 803 } 804 tty->print_cr("Code Cache Entries (total of %d)",total); 805 tty->print_cr("-------------------------------------------------"); 806 tty->print_cr("nmethods: %d",nmethodCount); 807 tty->print_cr("\talive: %d",nmethodAlive); 808 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 809 tty->print_cr("\tzombie: %d",nmethodZombie); 810 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 811 tty->print_cr("\tjava: %d",nmethodJava); 812 tty->print_cr("\tnative: %d",nmethodNative); 813 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 814 tty->print_cr("adapters: %d",adapterCount); 815 tty->print_cr("buffer blobs: %d",bufferBlobCount); 816 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 817 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 818 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 819 tty->print_cr("-------------------------------------------------"); 820 821 for(int i=0; i<bucketLimit; i++) { 822 if(buckets[i] != 0) { 823 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 824 tty->fill_to(40); 825 tty->print_cr("%d",buckets[i]); 826 } 827 } 828 829 FREE_C_HEAP_ARRAY(int, buckets); 830 } 831 832 void CodeCache::print() { 833 CodeBlob_sizes live; 834 CodeBlob_sizes dead; 835 836 FOR_ALL_BLOBS(p) { 837 if (!p->is_alive()) { 838 dead.add(p); 839 } else { 840 live.add(p); 841 } 842 } 843 844 tty->print_cr("CodeCache:"); 845 846 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(), 847 dependentCheckTime.seconds() / dependentCheckCount); 848 849 if (!live.is_empty()) { 850 live.print("live"); 851 } 852 if (!dead.is_empty()) { 853 dead.print("dead"); 854 } 855 856 857 if (Verbose) { 858 // print the oop_map usage 859 int code_size = 0; 860 int number_of_blobs = 0; 861 int number_of_oop_maps = 0; 862 int map_size = 0; 863 FOR_ALL_BLOBS(p) { 864 if (p->is_alive()) { 865 number_of_blobs++; 866 code_size += p->instructions_size(); 867 OopMapSet* set = p->oop_maps(); 868 if (set != NULL) { 869 number_of_oop_maps += set->size(); 870 map_size += set->heap_size(); 871 } 872 } 873 } 874 tty->print_cr("OopMaps"); 875 tty->print_cr(" #blobs = %d", number_of_blobs); 876 tty->print_cr(" code size = %d", code_size); 877 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 878 tty->print_cr(" map size = %d", map_size); 879 } 880 881 } 882 883 #endif // PRODUCT