1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/gcLocker.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/icache.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "services/memoryService.hpp" 48 #include "trace/tracing.hpp" 49 #include "utilities/xmlstream.hpp" 50 51 // Helper class for printing in CodeCache 52 53 class CodeBlob_sizes { 54 private: 55 int count; 56 int total_size; 57 int header_size; 58 int code_size; 59 int stub_size; 60 int relocation_size; 61 int scopes_oop_size; 62 int scopes_metadata_size; 63 int scopes_data_size; 64 int scopes_pcs_size; 65 66 public: 67 CodeBlob_sizes() { 68 count = 0; 69 total_size = 0; 70 header_size = 0; 71 code_size = 0; 72 stub_size = 0; 73 relocation_size = 0; 74 scopes_oop_size = 0; 75 scopes_metadata_size = 0; 76 scopes_data_size = 0; 77 scopes_pcs_size = 0; 78 } 79 80 int total() { return total_size; } 81 bool is_empty() { return count == 0; } 82 83 void print(const char* title) { 84 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", 85 count, 86 title, 87 (int)(total() / K), 88 header_size * 100 / total_size, 89 relocation_size * 100 / total_size, 90 code_size * 100 / total_size, 91 stub_size * 100 / total_size, 92 scopes_oop_size * 100 / total_size, 93 scopes_metadata_size * 100 / total_size, 94 scopes_data_size * 100 / total_size, 95 scopes_pcs_size * 100 / total_size); 96 } 97 98 void add(CodeBlob* cb) { 99 count++; 100 total_size += cb->size(); 101 header_size += cb->header_size(); 102 relocation_size += cb->relocation_size(); 103 if (cb->is_nmethod()) { 104 nmethod* nm = cb->as_nmethod_or_null(); 105 code_size += nm->insts_size(); 106 stub_size += nm->stub_size(); 107 108 scopes_oop_size += nm->oops_size(); 109 scopes_metadata_size += nm->metadata_size(); 110 scopes_data_size += nm->scopes_data_size(); 111 scopes_pcs_size += nm->scopes_pcs_size(); 112 } else { 113 code_size += cb->code_size(); 114 } 115 } 116 }; 117 118 // CodeCache implementation 119 120 CodeHeap * CodeCache::_heap = new CodeHeap(); 121 int CodeCache::_number_of_blobs = 0; 122 int CodeCache::_number_of_adapters = 0; 123 int CodeCache::_number_of_nmethods = 0; 124 int CodeCache::_number_of_nmethods_with_dependencies = 0; 125 bool CodeCache::_needs_cache_clean = false; 126 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 127 128 int CodeCache::_codemem_full_count = 0; 129 130 CodeBlob* CodeCache::first() { 131 assert_locked_or_safepoint(CodeCache_lock); 132 return (CodeBlob*)_heap->first(); 133 } 134 135 136 CodeBlob* CodeCache::next(CodeBlob* cb) { 137 assert_locked_or_safepoint(CodeCache_lock); 138 return (CodeBlob*)_heap->next(cb); 139 } 140 141 142 CodeBlob* CodeCache::alive(CodeBlob *cb) { 143 assert_locked_or_safepoint(CodeCache_lock); 144 while (cb != NULL && !cb->is_alive()) cb = next(cb); 145 return cb; 146 } 147 148 149 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { 150 assert_locked_or_safepoint(CodeCache_lock); 151 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); 152 return (nmethod*)cb; 153 } 154 155 nmethod* CodeCache::first_nmethod() { 156 assert_locked_or_safepoint(CodeCache_lock); 157 CodeBlob* cb = first(); 158 while (cb != NULL && !cb->is_nmethod()) { 159 cb = next(cb); 160 } 161 return (nmethod*)cb; 162 } 163 164 nmethod* CodeCache::next_nmethod (CodeBlob* cb) { 165 assert_locked_or_safepoint(CodeCache_lock); 166 cb = next(cb); 167 while (cb != NULL && !cb->is_nmethod()) { 168 cb = next(cb); 169 } 170 return (nmethod*)cb; 171 } 172 173 static size_t maxCodeCacheUsed = 0; 174 175 CodeBlob* CodeCache::allocate(int size, bool is_critical) { 176 // Do not seize the CodeCache lock here--if the caller has not 177 // already done so, we are going to lose bigtime, since the code 178 // cache will contain a garbage CodeBlob until the caller can 179 // run the constructor for the CodeBlob subclass he is busy 180 // instantiating. 181 assert_locked_or_safepoint(CodeCache_lock); 182 assert(size > 0, "allocation request must be reasonable"); 183 if (size <= 0) { 184 return NULL; 185 } 186 CodeBlob* cb = NULL; 187 while (true) { 188 cb = (CodeBlob*)_heap->allocate(size, is_critical); 189 if (cb != NULL) break; 190 if (!_heap->expand_by(CodeCacheExpansionSize)) { 191 // Expansion failed 192 return NULL; 193 } 194 if (PrintCodeCacheExtension) { 195 ResourceMark rm; 196 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", 197 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), 198 (address)_heap->high() - (address)_heap->low_boundary()); 199 } 200 } 201 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - 202 (address)_heap->low_boundary()) - unallocated_capacity()); 203 print_trace("allocation", cb, size); 204 _number_of_blobs++; 205 return cb; 206 } 207 208 void CodeCache::free(CodeBlob* cb) { 209 assert_locked_or_safepoint(CodeCache_lock); 210 211 print_trace("free", cb); 212 if (cb->is_nmethod()) { 213 _number_of_nmethods--; 214 if (((nmethod *)cb)->has_dependencies()) { 215 _number_of_nmethods_with_dependencies--; 216 } 217 } 218 if (cb->is_adapter_blob()) { 219 _number_of_adapters--; 220 } 221 _number_of_blobs--; 222 223 _heap->deallocate(cb); 224 225 assert(_number_of_blobs >= 0, "sanity check"); 226 } 227 228 229 void CodeCache::commit(CodeBlob* cb) { 230 // this is called by nmethod::nmethod, which must already own CodeCache_lock 231 assert_locked_or_safepoint(CodeCache_lock); 232 if (cb->is_nmethod()) { 233 _number_of_nmethods++; 234 if (((nmethod *)cb)->has_dependencies()) { 235 _number_of_nmethods_with_dependencies++; 236 } 237 } 238 if (cb->is_adapter_blob()) { 239 _number_of_adapters++; 240 } 241 242 // flush the hardware I-cache 243 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 244 } 245 246 247 // Iteration over CodeBlobs 248 249 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) 250 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) 251 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) 252 #define FOR_ALL_NMETHODS(var) for (nmethod *var = first_nmethod(); var != NULL; var = next_nmethod(var)) 253 254 255 bool CodeCache::contains(void *p) { 256 // It should be ok to call contains without holding a lock 257 return _heap->contains(p); 258 } 259 260 261 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not 262 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain 263 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 264 CodeBlob* CodeCache::find_blob(void* start) { 265 CodeBlob* result = find_blob_unsafe(start); 266 if (result == NULL) return NULL; 267 // We could potentially look up non_entrant methods 268 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 269 return result; 270 } 271 272 nmethod* CodeCache::find_nmethod(void* start) { 273 CodeBlob *cb = find_blob(start); 274 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); 275 return (nmethod*)cb; 276 } 277 278 279 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 280 assert_locked_or_safepoint(CodeCache_lock); 281 FOR_ALL_BLOBS(p) { 282 f(p); 283 } 284 } 285 286 287 void CodeCache::nmethods_do(void f(nmethod* nm)) { 288 assert_locked_or_safepoint(CodeCache_lock); 289 FOR_ALL_BLOBS(nm) { 290 if (nm->is_nmethod()) f((nmethod*)nm); 291 } 292 } 293 294 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 295 assert_locked_or_safepoint(CodeCache_lock); 296 FOR_ALL_ALIVE_NMETHODS(nm) { 297 f(nm); 298 } 299 } 300 301 int CodeCache::alignment_unit() { 302 return (int)_heap->alignment_unit(); 303 } 304 305 306 int CodeCache::alignment_offset() { 307 return (int)_heap->alignment_offset(); 308 } 309 310 311 // Mark nmethods for unloading if they contain otherwise unreachable 312 // oops. 313 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 314 assert_locked_or_safepoint(CodeCache_lock); 315 FOR_ALL_ALIVE_NMETHODS(nm) { 316 nm->do_unloading(is_alive, unloading_occurred); 317 } 318 } 319 320 void CodeCache::blobs_do(CodeBlobClosure* f) { 321 assert_locked_or_safepoint(CodeCache_lock); 322 FOR_ALL_ALIVE_BLOBS(cb) { 323 f->do_code_blob(cb); 324 325 #ifdef ASSERT 326 if (cb->is_nmethod()) 327 ((nmethod*)cb)->verify_scavenge_root_oops(); 328 #endif //ASSERT 329 } 330 } 331 332 // Walk the list of methods which might contain non-perm oops. 333 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 334 assert_locked_or_safepoint(CodeCache_lock); 335 336 if (UseG1GC) { 337 return; 338 } 339 340 debug_only(mark_scavenge_root_nmethods()); 341 342 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 343 debug_only(cur->clear_scavenge_root_marked()); 344 assert(cur->scavenge_root_not_marked(), ""); 345 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 346 347 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 348 #ifndef PRODUCT 349 if (TraceScavenge) { 350 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 351 } 352 #endif //PRODUCT 353 if (is_live) { 354 // Perform cur->oops_do(f), maybe just once per nmethod. 355 f->do_code_blob(cur); 356 } 357 } 358 359 // Check for stray marks. 360 debug_only(verify_perm_nmethods(NULL)); 361 } 362 363 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 364 assert_locked_or_safepoint(CodeCache_lock); 365 366 if (UseG1GC) { 367 return; 368 } 369 370 nm->set_on_scavenge_root_list(); 371 nm->set_scavenge_root_link(_scavenge_root_nmethods); 372 set_scavenge_root_nmethods(nm); 373 print_trace("add_scavenge_root", nm); 374 } 375 376 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 377 assert_locked_or_safepoint(CodeCache_lock); 378 379 if (UseG1GC) { 380 return; 381 } 382 383 print_trace("drop_scavenge_root", nm); 384 nmethod* last = NULL; 385 nmethod* cur = scavenge_root_nmethods(); 386 while (cur != NULL) { 387 nmethod* next = cur->scavenge_root_link(); 388 if (cur == nm) { 389 if (last != NULL) 390 last->set_scavenge_root_link(next); 391 else set_scavenge_root_nmethods(next); 392 nm->set_scavenge_root_link(NULL); 393 nm->clear_on_scavenge_root_list(); 394 return; 395 } 396 last = cur; 397 cur = next; 398 } 399 assert(false, "should have been on list"); 400 } 401 402 void CodeCache::prune_scavenge_root_nmethods() { 403 assert_locked_or_safepoint(CodeCache_lock); 404 405 if (UseG1GC) { 406 return; 407 } 408 409 debug_only(mark_scavenge_root_nmethods()); 410 411 nmethod* last = NULL; 412 nmethod* cur = scavenge_root_nmethods(); 413 while (cur != NULL) { 414 nmethod* next = cur->scavenge_root_link(); 415 debug_only(cur->clear_scavenge_root_marked()); 416 assert(cur->scavenge_root_not_marked(), ""); 417 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 418 419 if (!cur->is_zombie() && !cur->is_unloaded() 420 && cur->detect_scavenge_root_oops()) { 421 // Keep it. Advance 'last' to prevent deletion. 422 last = cur; 423 } else { 424 // Prune it from the list, so we don't have to look at it any more. 425 print_trace("prune_scavenge_root", cur); 426 cur->set_scavenge_root_link(NULL); 427 cur->clear_on_scavenge_root_list(); 428 if (last != NULL) 429 last->set_scavenge_root_link(next); 430 else set_scavenge_root_nmethods(next); 431 } 432 cur = next; 433 } 434 435 // Check for stray marks. 436 debug_only(verify_perm_nmethods(NULL)); 437 } 438 439 #ifndef PRODUCT 440 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 441 if (UseG1GC) { 442 return; 443 } 444 445 // While we are here, verify the integrity of the list. 446 mark_scavenge_root_nmethods(); 447 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 448 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 449 cur->clear_scavenge_root_marked(); 450 } 451 verify_perm_nmethods(f); 452 } 453 454 // Temporarily mark nmethods that are claimed to be on the non-perm list. 455 void CodeCache::mark_scavenge_root_nmethods() { 456 FOR_ALL_ALIVE_BLOBS(cb) { 457 if (cb->is_nmethod()) { 458 nmethod *nm = (nmethod*)cb; 459 assert(nm->scavenge_root_not_marked(), "clean state"); 460 if (nm->on_scavenge_root_list()) 461 nm->set_scavenge_root_marked(); 462 } 463 } 464 } 465 466 // If the closure is given, run it on the unlisted nmethods. 467 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 468 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 469 FOR_ALL_ALIVE_BLOBS(cb) { 470 bool call_f = (f_or_null != NULL); 471 if (cb->is_nmethod()) { 472 nmethod *nm = (nmethod*)cb; 473 assert(nm->scavenge_root_not_marked(), "must be already processed"); 474 if (nm->on_scavenge_root_list()) 475 call_f = false; // don't show this one to the client 476 nm->verify_scavenge_root_oops(); 477 } else { 478 call_f = false; // not an nmethod 479 } 480 if (call_f) f_or_null->do_code_blob(cb); 481 } 482 } 483 #endif //PRODUCT 484 485 void CodeCache::verify_clean_inline_caches() { 486 #ifdef ASSERT 487 FOR_ALL_ALIVE_BLOBS(cb) { 488 if (cb->is_nmethod()) { 489 nmethod* nm = (nmethod*)cb; 490 assert(!nm->is_unloaded(), "Tautology"); 491 nm->verify_clean_inline_caches(); 492 nm->verify(); 493 } 494 } 495 #endif 496 } 497 498 void CodeCache::verify_icholder_relocations() { 499 #ifdef ASSERT 500 // make sure that we aren't leaking icholders 501 int count = 0; 502 FOR_ALL_BLOBS(cb) { 503 if (cb->is_nmethod()) { 504 nmethod* nm = (nmethod*)cb; 505 count += nm->verify_icholder_relocations(); 506 } 507 } 508 509 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 510 CompiledICHolder::live_count(), "must agree"); 511 #endif 512 } 513 514 void CodeCache::gc_prologue() { 515 } 516 517 void CodeCache::gc_epilogue() { 518 assert_locked_or_safepoint(CodeCache_lock); 519 FOR_ALL_ALIVE_BLOBS(cb) { 520 if (cb->is_nmethod()) { 521 nmethod *nm = (nmethod*)cb; 522 assert(!nm->is_unloaded(), "Tautology"); 523 if (needs_cache_clean()) { 524 nm->cleanup_inline_caches(); 525 } 526 DEBUG_ONLY(nm->verify()); 527 DEBUG_ONLY(nm->verify_oop_relocations()); 528 } 529 } 530 set_needs_cache_clean(false); 531 prune_scavenge_root_nmethods(); 532 533 verify_icholder_relocations(); 534 } 535 536 void CodeCache::verify_oops() { 537 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 538 VerifyOopClosure voc; 539 FOR_ALL_ALIVE_BLOBS(cb) { 540 if (cb->is_nmethod()) { 541 nmethod *nm = (nmethod*)cb; 542 nm->oops_do(&voc); 543 nm->verify_oop_relocations(); 544 } 545 } 546 } 547 548 549 address CodeCache::first_address() { 550 assert_locked_or_safepoint(CodeCache_lock); 551 return (address)_heap->low_boundary(); 552 } 553 554 555 address CodeCache::last_address() { 556 assert_locked_or_safepoint(CodeCache_lock); 557 return (address)_heap->high(); 558 } 559 560 /** 561 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 562 * is free, reverse_free_ratio() returns 4. 563 */ 564 double CodeCache::reverse_free_ratio() { 565 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace); 566 double max_capacity = (double)CodeCache::max_capacity(); 567 return max_capacity / unallocated_capacity; 568 } 569 570 void icache_init(); 571 572 void CodeCache::initialize() { 573 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 574 #ifdef COMPILER2 575 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 576 #endif 577 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 578 // This was originally just a check of the alignment, causing failure, instead, round 579 // the code cache to the page size. In particular, Solaris is moving to a larger 580 // default page size. 581 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 582 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); 583 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); 584 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { 585 vm_exit_during_initialization("Could not reserve enough space for code cache"); 586 } 587 588 MemoryService::add_code_heap_memory_pool(_heap); 589 590 // Initialize ICache flush mechanism 591 // This service is needed for os::register_code_area 592 icache_init(); 593 594 // Give OS a chance to register generated code area. 595 // This is used on Windows 64 bit platforms to register 596 // Structured Exception Handlers for our generated code. 597 os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); 598 } 599 600 601 void codeCache_init() { 602 CodeCache::initialize(); 603 } 604 605 //------------------------------------------------------------------------------------------------ 606 607 int CodeCache::number_of_nmethods_with_dependencies() { 608 return _number_of_nmethods_with_dependencies; 609 } 610 611 void CodeCache::clear_inline_caches() { 612 assert_locked_or_safepoint(CodeCache_lock); 613 FOR_ALL_ALIVE_NMETHODS(nm) { 614 nm->clear_inline_caches(); 615 } 616 } 617 618 // Keeps track of time spent for checking dependencies 619 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 620 621 int CodeCache::mark_for_deoptimization(DepChange& changes) { 622 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 623 int number_of_marked_CodeBlobs = 0; 624 625 // search the hierarchy looking for nmethods which are affected by the loading of this class 626 627 // then search the interfaces this class implements looking for nmethods 628 // which might be dependent of the fact that an interface only had one 629 // implementor. 630 // nmethod::check_all_dependencies works only correctly, if no safepoint 631 // can happen 632 No_Safepoint_Verifier nsv; 633 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 634 Klass* d = str.klass(); 635 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 636 } 637 638 #ifndef PRODUCT 639 if (VerifyDependencies) { 640 // Object pointers are used as unique identifiers for dependency arguments. This 641 // is only possible if no safepoint, i.e., GC occurs during the verification code. 642 dependentCheckTime.start(); 643 nmethod::check_all_dependencies(changes); 644 dependentCheckTime.stop(); 645 } 646 #endif 647 648 return number_of_marked_CodeBlobs; 649 } 650 651 652 #ifdef HOTSWAP 653 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 654 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 655 int number_of_marked_CodeBlobs = 0; 656 657 // Deoptimize all methods of the evolving class itself 658 Array<Method*>* old_methods = dependee->methods(); 659 for (int i = 0; i < old_methods->length(); i++) { 660 ResourceMark rm; 661 Method* old_method = old_methods->at(i); 662 nmethod *nm = old_method->code(); 663 if (nm != NULL) { 664 nm->mark_for_deoptimization(); 665 number_of_marked_CodeBlobs++; 666 } 667 } 668 669 FOR_ALL_ALIVE_NMETHODS(nm) { 670 if (nm->is_marked_for_deoptimization()) { 671 // ...Already marked in the previous pass; don't count it again. 672 } else if (nm->is_evol_dependent_on(dependee())) { 673 ResourceMark rm; 674 nm->mark_for_deoptimization(); 675 number_of_marked_CodeBlobs++; 676 } else { 677 // flush caches in case they refer to a redefined Method* 678 nm->clear_inline_caches(); 679 } 680 } 681 682 return number_of_marked_CodeBlobs; 683 } 684 #endif // HOTSWAP 685 686 687 // Deoptimize all methods 688 void CodeCache::mark_all_nmethods_for_deoptimization() { 689 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 690 FOR_ALL_ALIVE_NMETHODS(nm) { 691 if (!nm->method()->is_method_handle_intrinsic()) { 692 nm->mark_for_deoptimization(); 693 } 694 } 695 } 696 697 698 int CodeCache::mark_for_deoptimization(Method* dependee) { 699 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 700 int number_of_marked_CodeBlobs = 0; 701 702 FOR_ALL_ALIVE_NMETHODS(nm) { 703 if (nm->is_dependent_on_method(dependee)) { 704 ResourceMark rm; 705 nm->mark_for_deoptimization(); 706 number_of_marked_CodeBlobs++; 707 } 708 } 709 710 return number_of_marked_CodeBlobs; 711 } 712 713 void CodeCache::make_marked_nmethods_zombies() { 714 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 715 FOR_ALL_ALIVE_NMETHODS(nm) { 716 if (nm->is_marked_for_deoptimization()) { 717 718 // If the nmethod has already been made non-entrant and it can be converted 719 // then zombie it now. Otherwise make it non-entrant and it will eventually 720 // be zombied when it is no longer seen on the stack. Note that the nmethod 721 // might be "entrant" and not on the stack and so could be zombied immediately 722 // but we can't tell because we don't track it on stack until it becomes 723 // non-entrant. 724 725 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 726 nm->make_zombie(); 727 } else { 728 nm->make_not_entrant(); 729 } 730 } 731 } 732 } 733 734 void CodeCache::make_marked_nmethods_not_entrant() { 735 assert_locked_or_safepoint(CodeCache_lock); 736 FOR_ALL_ALIVE_NMETHODS(nm) { 737 if (nm->is_marked_for_deoptimization()) { 738 nm->make_not_entrant(); 739 } 740 } 741 } 742 743 void CodeCache::verify() { 744 _heap->verify(); 745 FOR_ALL_ALIVE_BLOBS(p) { 746 p->verify(); 747 } 748 } 749 750 void CodeCache::report_codemem_full() { 751 _codemem_full_count++; 752 EventCodeCacheFull event; 753 if (event.should_commit()) { 754 event.set_startAddress((u8)low_bound()); 755 event.set_commitedTopAddress((u8)high()); 756 event.set_reservedTopAddress((u8)high_bound()); 757 event.set_entryCount(nof_blobs()); 758 event.set_methodCount(nof_nmethods()); 759 event.set_adaptorCount(nof_adapters()); 760 event.set_unallocatedCapacity(unallocated_capacity()/K); 761 event.set_fullCount(_codemem_full_count); 762 event.commit(); 763 } 764 } 765 766 void CodeCache::print_memory_overhead() { 767 size_t wasted_bytes = 0; 768 CodeBlob *cb; 769 for (cb = first(); cb != NULL; cb = next(cb)) { 770 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 771 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 772 } 773 // Print bytes that are allocated in the freelist 774 ttyLocker ttl; 775 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelist_length()); 776 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelist()/K); 777 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K)); 778 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment 779 } 780 781 //------------------------------------------------------------------------------------------------ 782 // Non-product version 783 784 #ifndef PRODUCT 785 786 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 787 if (PrintCodeCache2) { // Need to add a new flag 788 ResourceMark rm; 789 if (size == 0) size = cb->size(); 790 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 791 } 792 } 793 794 void CodeCache::print_internals() { 795 int nmethodCount = 0; 796 int runtimeStubCount = 0; 797 int adapterCount = 0; 798 int deoptimizationStubCount = 0; 799 int uncommonTrapStubCount = 0; 800 int bufferBlobCount = 0; 801 int total = 0; 802 int nmethodAlive = 0; 803 int nmethodNotEntrant = 0; 804 int nmethodZombie = 0; 805 int nmethodUnloaded = 0; 806 int nmethodJava = 0; 807 int nmethodNative = 0; 808 int max_nm_size = 0; 809 ResourceMark rm; 810 811 CodeBlob *cb; 812 for (cb = first(); cb != NULL; cb = next(cb)) { 813 total++; 814 if (cb->is_nmethod()) { 815 nmethod* nm = (nmethod*)cb; 816 817 if (Verbose && nm->method() != NULL) { 818 ResourceMark rm; 819 char *method_name = nm->method()->name_and_sig_as_C_string(); 820 tty->print("%s", method_name); 821 if(nm->is_alive()) { tty->print_cr(" alive"); } 822 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 823 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 824 } 825 826 nmethodCount++; 827 828 if(nm->is_alive()) { nmethodAlive++; } 829 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 830 if(nm->is_zombie()) { nmethodZombie++; } 831 if(nm->is_unloaded()) { nmethodUnloaded++; } 832 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 833 834 if(nm->method() != NULL && nm->is_java_method()) { 835 nmethodJava++; 836 max_nm_size = MAX2(max_nm_size, nm->size()); 837 } 838 } else if (cb->is_runtime_stub()) { 839 runtimeStubCount++; 840 } else if (cb->is_deoptimization_stub()) { 841 deoptimizationStubCount++; 842 } else if (cb->is_uncommon_trap_stub()) { 843 uncommonTrapStubCount++; 844 } else if (cb->is_adapter_blob()) { 845 adapterCount++; 846 } else if (cb->is_buffer_blob()) { 847 bufferBlobCount++; 848 } 849 } 850 851 int bucketSize = 512; 852 int bucketLimit = max_nm_size / bucketSize + 1; 853 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 854 memset(buckets, 0, sizeof(int) * bucketLimit); 855 856 for (cb = first(); cb != NULL; cb = next(cb)) { 857 if (cb->is_nmethod()) { 858 nmethod* nm = (nmethod*)cb; 859 if(nm->is_java_method()) { 860 buckets[nm->size() / bucketSize]++; 861 } 862 } 863 } 864 865 tty->print_cr("Code Cache Entries (total of %d)",total); 866 tty->print_cr("-------------------------------------------------"); 867 tty->print_cr("nmethods: %d",nmethodCount); 868 tty->print_cr("\talive: %d",nmethodAlive); 869 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 870 tty->print_cr("\tzombie: %d",nmethodZombie); 871 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 872 tty->print_cr("\tjava: %d",nmethodJava); 873 tty->print_cr("\tnative: %d",nmethodNative); 874 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 875 tty->print_cr("adapters: %d",adapterCount); 876 tty->print_cr("buffer blobs: %d",bufferBlobCount); 877 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 878 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 879 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 880 tty->print_cr("-------------------------------------------------"); 881 882 for(int i=0; i<bucketLimit; i++) { 883 if(buckets[i] != 0) { 884 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 885 tty->fill_to(40); 886 tty->print_cr("%d",buckets[i]); 887 } 888 } 889 890 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 891 print_memory_overhead(); 892 } 893 894 #endif // !PRODUCT 895 896 void CodeCache::print() { 897 print_summary(tty); 898 899 #ifndef PRODUCT 900 if (!Verbose) return; 901 902 CodeBlob_sizes live; 903 CodeBlob_sizes dead; 904 905 FOR_ALL_BLOBS(p) { 906 if (!p->is_alive()) { 907 dead.add(p); 908 } else { 909 live.add(p); 910 } 911 } 912 913 tty->print_cr("CodeCache:"); 914 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 915 916 if (!live.is_empty()) { 917 live.print("live"); 918 } 919 if (!dead.is_empty()) { 920 dead.print("dead"); 921 } 922 923 924 if (WizardMode) { 925 // print the oop_map usage 926 int code_size = 0; 927 int number_of_blobs = 0; 928 int number_of_oop_maps = 0; 929 int map_size = 0; 930 FOR_ALL_BLOBS(p) { 931 if (p->is_alive()) { 932 number_of_blobs++; 933 code_size += p->code_size(); 934 OopMapSet* set = p->oop_maps(); 935 if (set != NULL) { 936 number_of_oop_maps += set->size(); 937 map_size += set->heap_size(); 938 } 939 } 940 } 941 tty->print_cr("OopMaps"); 942 tty->print_cr(" #blobs = %d", number_of_blobs); 943 tty->print_cr(" code size = %d", code_size); 944 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 945 tty->print_cr(" map size = %d", map_size); 946 } 947 948 #endif // !PRODUCT 949 } 950 951 void CodeCache::print_summary(outputStream* st, bool detailed) { 952 size_t total = (_heap->high_boundary() - _heap->low_boundary()); 953 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 954 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 955 total/K, (total - unallocated_capacity())/K, 956 maxCodeCacheUsed/K, unallocated_capacity()/K); 957 958 if (detailed) { 959 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 960 p2i(_heap->low_boundary()), 961 p2i(_heap->high()), 962 p2i(_heap->high_boundary())); 963 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 964 " adapters=" UINT32_FORMAT, 965 nof_blobs(), nof_nmethods(), nof_adapters()); 966 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 967 "enabled" : Arguments::mode() == Arguments::_int ? 968 "disabled (interpreter mode)" : 969 "disabled (not enough contiguous free space left)"); 970 } 971 } 972 973 void CodeCache::print_codelist(outputStream* st) { 974 assert_locked_or_safepoint(CodeCache_lock); 975 976 FOR_ALL_ALIVE_NMETHODS(p) { 977 ResourceMark rm; 978 char *method_name = p->method()->name_and_sig_as_C_string(); 979 st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]", 980 p->compile_id(), p->comp_level(), method_name, (intptr_t)p->header_begin(), 981 (intptr_t)p->code_begin(), (intptr_t)p->code_end()); 982 } 983 } 984 985 void CodeCache::print_layout(outputStream* st) { 986 assert_locked_or_safepoint(CodeCache_lock); 987 ResourceMark rm; 988 989 print_summary(st, true); 990 } 991 992 void CodeCache::log_state(outputStream* st) { 993 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 994 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 995 nof_blobs(), nof_nmethods(), nof_adapters(), 996 unallocated_capacity()); 997 } 998