1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "interpreter/abstractInterpreter.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/gcLocker.hpp" 38 #include "memory/iterator.hpp" 39 #include "memory/resourceArea.hpp" 40 #include "oops/method.hpp" 41 #include "oops/objArrayOop.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/arguments.hpp" 45 #include "runtime/icache.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/mutexLocker.hpp" 48 #include "services/memoryService.hpp" 49 #include "trace/tracing.hpp" 50 #include "utilities/xmlstream.hpp" 51 52 // Helper class for printing in CodeCache 53 54 class CodeBlob_sizes { 55 private: 56 int count; 57 int total_size; 58 int header_size; 59 int code_size; 60 int stub_size; 61 int relocation_size; 62 int scopes_oop_size; 63 int scopes_metadata_size; 64 int scopes_data_size; 65 int scopes_pcs_size; 66 67 public: 68 CodeBlob_sizes() { 69 count = 0; 70 total_size = 0; 71 header_size = 0; 72 code_size = 0; 73 stub_size = 0; 74 relocation_size = 0; 75 scopes_oop_size = 0; 76 scopes_metadata_size = 0; 77 scopes_data_size = 0; 78 scopes_pcs_size = 0; 79 } 80 81 int total() { return total_size; } 82 bool is_empty() { return count == 0; } 83 84 void print(const char* title) { 85 if (count == 0) { 86 tty->print_cr(" #%d %s", count, title); 87 } else { 88 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 89 count, 90 title, 91 total() / K, 92 header_size * 100 / total_size, 93 relocation_size * 100 / total_size, 94 code_size * 100 / total_size, 95 stub_size * 100 / total_size, 96 scopes_oop_size * 100 / total_size, 97 scopes_metadata_size * 100 / total_size, 98 scopes_data_size * 100 / total_size, 99 scopes_pcs_size * 100 / total_size); 100 } 101 } 102 103 void add(CodeBlob* cb) { 104 count++; 105 total_size += cb->size(); 106 header_size += cb->header_size(); 107 relocation_size += cb->relocation_size(); 108 if (cb->is_nmethod()) { 109 nmethod* nm = cb->as_nmethod_or_null(); 110 code_size += nm->insts_size(); 111 stub_size += nm->stub_size(); 112 113 scopes_oop_size += nm->oops_size(); 114 scopes_metadata_size += nm->metadata_size(); 115 scopes_data_size += nm->scopes_data_size(); 116 scopes_pcs_size += nm->scopes_pcs_size(); 117 } else { 118 code_size += cb->code_size(); 119 } 120 } 121 }; 122 123 // CodeCache implementation 124 125 CodeHeap * CodeCache::_heap = new CodeHeap(); 126 int CodeCache::_number_of_blobs = 0; 127 int CodeCache::_number_of_adapters = 0; 128 int CodeCache::_number_of_nmethods = 0; 129 int CodeCache::_number_of_nmethods_with_dependencies = 0; 130 bool CodeCache::_needs_cache_clean = false; 131 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 132 nmethod* CodeCache::_saved_nmethods = NULL; 133 134 int CodeCache::_codemem_full_count = 0; 135 136 CodeBlob* CodeCache::first() { 137 assert_locked_or_safepoint(CodeCache_lock); 138 return (CodeBlob*)_heap->first(); 139 } 140 141 142 CodeBlob* CodeCache::next(CodeBlob* cb) { 143 assert_locked_or_safepoint(CodeCache_lock); 144 return (CodeBlob*)_heap->next(cb); 145 } 146 147 148 CodeBlob* CodeCache::alive(CodeBlob *cb) { 149 assert_locked_or_safepoint(CodeCache_lock); 150 while (cb != NULL && !cb->is_alive()) cb = next(cb); 151 return cb; 152 } 153 154 155 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { 156 assert_locked_or_safepoint(CodeCache_lock); 157 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); 158 return (nmethod*)cb; 159 } 160 161 nmethod* CodeCache::first_nmethod() { 162 assert_locked_or_safepoint(CodeCache_lock); 163 CodeBlob* cb = first(); 164 while (cb != NULL && !cb->is_nmethod()) { 165 cb = next(cb); 166 } 167 return (nmethod*)cb; 168 } 169 170 nmethod* CodeCache::next_nmethod (CodeBlob* cb) { 171 assert_locked_or_safepoint(CodeCache_lock); 172 cb = next(cb); 173 while (cb != NULL && !cb->is_nmethod()) { 174 cb = next(cb); 175 } 176 return (nmethod*)cb; 177 } 178 179 static size_t maxCodeCacheUsed = 0; 180 181 CodeBlob* CodeCache::allocate(int size, bool is_critical) { 182 // Do not seize the CodeCache lock here--if the caller has not 183 // already done so, we are going to lose bigtime, since the code 184 // cache will contain a garbage CodeBlob until the caller can 185 // run the constructor for the CodeBlob subclass he is busy 186 // instantiating. 187 guarantee(size >= 0, "allocation request must be reasonable"); 188 assert_locked_or_safepoint(CodeCache_lock); 189 CodeBlob* cb = NULL; 190 _number_of_blobs++; 191 while (true) { 192 cb = (CodeBlob*)_heap->allocate(size, is_critical); 193 if (cb != NULL) break; 194 if (!_heap->expand_by(CodeCacheExpansionSize)) { 195 // Expansion failed 196 return NULL; 197 } 198 if (PrintCodeCacheExtension) { 199 ResourceMark rm; 200 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", 201 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), 202 (address)_heap->high() - (address)_heap->low_boundary()); 203 } 204 } 205 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - 206 (address)_heap->low_boundary()) - unallocated_capacity()); 207 verify_if_often(); 208 print_trace("allocation", cb, size); 209 return cb; 210 } 211 212 void CodeCache::free(CodeBlob* cb) { 213 assert_locked_or_safepoint(CodeCache_lock); 214 verify_if_often(); 215 216 print_trace("free", cb); 217 if (cb->is_nmethod()) { 218 _number_of_nmethods--; 219 if (((nmethod *)cb)->has_dependencies()) { 220 _number_of_nmethods_with_dependencies--; 221 } 222 } 223 if (cb->is_adapter_blob()) { 224 _number_of_adapters--; 225 } 226 _number_of_blobs--; 227 228 _heap->deallocate(cb); 229 230 verify_if_often(); 231 assert(_number_of_blobs >= 0, "sanity check"); 232 } 233 234 235 void CodeCache::commit(CodeBlob* cb) { 236 // this is called by nmethod::nmethod, which must already own CodeCache_lock 237 assert_locked_or_safepoint(CodeCache_lock); 238 if (cb->is_nmethod()) { 239 _number_of_nmethods++; 240 if (((nmethod *)cb)->has_dependencies()) { 241 _number_of_nmethods_with_dependencies++; 242 } 243 } 244 if (cb->is_adapter_blob()) { 245 _number_of_adapters++; 246 } 247 248 // flush the hardware I-cache 249 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 250 } 251 252 253 void CodeCache::flush() { 254 assert_locked_or_safepoint(CodeCache_lock); 255 Unimplemented(); 256 } 257 258 259 // Iteration over CodeBlobs 260 261 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) 262 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) 263 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) 264 265 266 bool CodeCache::contains(void *p) { 267 // It should be ok to call contains without holding a lock 268 return _heap->contains(p); 269 } 270 271 272 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not 273 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain 274 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 275 CodeBlob* CodeCache::find_blob(void* start) { 276 CodeBlob* result = find_blob_unsafe(start); 277 if (result == NULL) return NULL; 278 // We could potientially look up non_entrant methods 279 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 280 return result; 281 } 282 283 nmethod* CodeCache::find_nmethod(void* start) { 284 CodeBlob *cb = find_blob(start); 285 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); 286 return (nmethod*)cb; 287 } 288 289 290 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 291 assert_locked_or_safepoint(CodeCache_lock); 292 FOR_ALL_BLOBS(p) { 293 f(p); 294 } 295 } 296 297 298 void CodeCache::nmethods_do(void f(nmethod* nm)) { 299 assert_locked_or_safepoint(CodeCache_lock); 300 FOR_ALL_BLOBS(nm) { 301 if (nm->is_nmethod()) f((nmethod*)nm); 302 } 303 } 304 305 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 306 assert_locked_or_safepoint(CodeCache_lock); 307 FOR_ALL_ALIVE_NMETHODS(nm) { 308 f(nm); 309 } 310 } 311 312 int CodeCache::alignment_unit() { 313 return (int)_heap->alignment_unit(); 314 } 315 316 317 int CodeCache::alignment_offset() { 318 return (int)_heap->alignment_offset(); 319 } 320 321 322 // Mark nmethods for unloading if they contain otherwise unreachable 323 // oops. 324 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 325 assert_locked_or_safepoint(CodeCache_lock); 326 FOR_ALL_ALIVE_NMETHODS(nm) { 327 nm->do_unloading(is_alive, unloading_occurred); 328 } 329 } 330 331 void CodeCache::blobs_do(CodeBlobClosure* f) { 332 assert_locked_or_safepoint(CodeCache_lock); 333 FOR_ALL_ALIVE_BLOBS(cb) { 334 f->do_code_blob(cb); 335 336 #ifdef ASSERT 337 if (cb->is_nmethod()) 338 ((nmethod*)cb)->verify_scavenge_root_oops(); 339 #endif //ASSERT 340 } 341 } 342 343 // Walk the list of methods which might contain non-perm oops. 344 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 345 assert_locked_or_safepoint(CodeCache_lock); 346 debug_only(mark_scavenge_root_nmethods()); 347 348 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 349 debug_only(cur->clear_scavenge_root_marked()); 350 assert(cur->scavenge_root_not_marked(), ""); 351 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 352 353 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 354 #ifndef PRODUCT 355 if (TraceScavenge) { 356 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 357 } 358 #endif //PRODUCT 359 if (is_live) { 360 // Perform cur->oops_do(f), maybe just once per nmethod. 361 f->do_code_blob(cur); 362 } 363 } 364 365 // Check for stray marks. 366 debug_only(verify_perm_nmethods(NULL)); 367 } 368 369 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 370 assert_locked_or_safepoint(CodeCache_lock); 371 nm->set_on_scavenge_root_list(); 372 nm->set_scavenge_root_link(_scavenge_root_nmethods); 373 set_scavenge_root_nmethods(nm); 374 print_trace("add_scavenge_root", nm); 375 } 376 377 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 378 assert_locked_or_safepoint(CodeCache_lock); 379 print_trace("drop_scavenge_root", nm); 380 nmethod* last = NULL; 381 nmethod* cur = scavenge_root_nmethods(); 382 while (cur != NULL) { 383 nmethod* next = cur->scavenge_root_link(); 384 if (cur == nm) { 385 if (last != NULL) 386 last->set_scavenge_root_link(next); 387 else set_scavenge_root_nmethods(next); 388 nm->set_scavenge_root_link(NULL); 389 nm->clear_on_scavenge_root_list(); 390 return; 391 } 392 last = cur; 393 cur = next; 394 } 395 assert(false, "should have been on list"); 396 } 397 398 void CodeCache::prune_scavenge_root_nmethods() { 399 assert_locked_or_safepoint(CodeCache_lock); 400 debug_only(mark_scavenge_root_nmethods()); 401 402 nmethod* last = NULL; 403 nmethod* cur = scavenge_root_nmethods(); 404 while (cur != NULL) { 405 nmethod* next = cur->scavenge_root_link(); 406 debug_only(cur->clear_scavenge_root_marked()); 407 assert(cur->scavenge_root_not_marked(), ""); 408 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 409 410 if (!cur->is_zombie() && !cur->is_unloaded() 411 && cur->detect_scavenge_root_oops()) { 412 // Keep it. Advance 'last' to prevent deletion. 413 last = cur; 414 } else { 415 // Prune it from the list, so we don't have to look at it any more. 416 print_trace("prune_scavenge_root", cur); 417 cur->set_scavenge_root_link(NULL); 418 cur->clear_on_scavenge_root_list(); 419 if (last != NULL) 420 last->set_scavenge_root_link(next); 421 else set_scavenge_root_nmethods(next); 422 } 423 cur = next; 424 } 425 426 // Check for stray marks. 427 debug_only(verify_perm_nmethods(NULL)); 428 } 429 430 #ifndef PRODUCT 431 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 432 // While we are here, verify the integrity of the list. 433 mark_scavenge_root_nmethods(); 434 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 435 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 436 cur->clear_scavenge_root_marked(); 437 } 438 verify_perm_nmethods(f); 439 } 440 441 // Temporarily mark nmethods that are claimed to be on the non-perm list. 442 void CodeCache::mark_scavenge_root_nmethods() { 443 FOR_ALL_ALIVE_BLOBS(cb) { 444 if (cb->is_nmethod()) { 445 nmethod *nm = (nmethod*)cb; 446 assert(nm->scavenge_root_not_marked(), "clean state"); 447 if (nm->on_scavenge_root_list()) 448 nm->set_scavenge_root_marked(); 449 } 450 } 451 } 452 453 // If the closure is given, run it on the unlisted nmethods. 454 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 455 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 456 FOR_ALL_ALIVE_BLOBS(cb) { 457 bool call_f = (f_or_null != NULL); 458 if (cb->is_nmethod()) { 459 nmethod *nm = (nmethod*)cb; 460 assert(nm->scavenge_root_not_marked(), "must be already processed"); 461 if (nm->on_scavenge_root_list()) 462 call_f = false; // don't show this one to the client 463 nm->verify_scavenge_root_oops(); 464 } else { 465 call_f = false; // not an nmethod 466 } 467 if (call_f) f_or_null->do_code_blob(cb); 468 } 469 } 470 #endif //PRODUCT 471 472 /** 473 * Remove and return nmethod from the saved code list in order to reanimate it. 474 */ 475 nmethod* CodeCache::reanimate_saved_code(Method* m) { 476 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 477 nmethod* saved = _saved_nmethods; 478 nmethod* prev = NULL; 479 while (saved != NULL) { 480 if (saved->is_in_use() && saved->method() == m) { 481 if (prev != NULL) { 482 prev->set_saved_nmethod_link(saved->saved_nmethod_link()); 483 } else { 484 _saved_nmethods = saved->saved_nmethod_link(); 485 } 486 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods"); 487 saved->set_speculatively_disconnected(false); 488 saved->set_saved_nmethod_link(NULL); 489 if (PrintMethodFlushing) { 490 saved->print_on(tty, " ### nmethod is reconnected"); 491 } 492 if (LogCompilation && (xtty != NULL)) { 493 ttyLocker ttyl; 494 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id()); 495 xtty->method(m); 496 xtty->stamp(); 497 xtty->end_elem(); 498 } 499 return saved; 500 } 501 prev = saved; 502 saved = saved->saved_nmethod_link(); 503 } 504 return NULL; 505 } 506 507 /** 508 * Remove nmethod from the saved code list in order to discard it permanently 509 */ 510 void CodeCache::remove_saved_code(nmethod* nm) { 511 // For conc swpr this will be called with CodeCache_lock taken by caller 512 assert_locked_or_safepoint(CodeCache_lock); 513 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods"); 514 nmethod* saved = _saved_nmethods; 515 nmethod* prev = NULL; 516 while (saved != NULL) { 517 if (saved == nm) { 518 if (prev != NULL) { 519 prev->set_saved_nmethod_link(saved->saved_nmethod_link()); 520 } else { 521 _saved_nmethods = saved->saved_nmethod_link(); 522 } 523 if (LogCompilation && (xtty != NULL)) { 524 ttyLocker ttyl; 525 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id()); 526 xtty->stamp(); 527 xtty->end_elem(); 528 } 529 return; 530 } 531 prev = saved; 532 saved = saved->saved_nmethod_link(); 533 } 534 ShouldNotReachHere(); 535 } 536 537 void CodeCache::speculatively_disconnect(nmethod* nm) { 538 assert_locked_or_safepoint(CodeCache_lock); 539 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods"); 540 nm->set_saved_nmethod_link(_saved_nmethods); 541 _saved_nmethods = nm; 542 if (PrintMethodFlushing) { 543 nm->print_on(tty, " ### nmethod is speculatively disconnected"); 544 } 545 if (LogCompilation && (xtty != NULL)) { 546 ttyLocker ttyl; 547 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id()); 548 xtty->method(nm->method()); 549 xtty->stamp(); 550 xtty->end_elem(); 551 } 552 nm->method()->clear_code(); 553 nm->set_speculatively_disconnected(true); 554 } 555 556 557 void CodeCache::gc_prologue() { 558 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); 559 } 560 561 562 void CodeCache::gc_epilogue() { 563 assert_locked_or_safepoint(CodeCache_lock); 564 FOR_ALL_ALIVE_BLOBS(cb) { 565 if (cb->is_nmethod()) { 566 nmethod *nm = (nmethod*)cb; 567 assert(!nm->is_unloaded(), "Tautology"); 568 if (needs_cache_clean()) { 569 nm->cleanup_inline_caches(); 570 } 571 DEBUG_ONLY(nm->verify()); 572 nm->fix_oop_relocations(); 573 } 574 } 575 set_needs_cache_clean(false); 576 prune_scavenge_root_nmethods(); 577 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); 578 579 #ifdef ASSERT 580 // make sure that we aren't leaking icholders 581 int count = 0; 582 FOR_ALL_BLOBS(cb) { 583 if (cb->is_nmethod()) { 584 RelocIterator iter((nmethod*)cb); 585 while(iter.next()) { 586 if (iter.type() == relocInfo::virtual_call_type) { 587 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { 588 CompiledIC *ic = CompiledIC_at(iter.reloc()); 589 if (TraceCompiledIC) { 590 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder()); 591 ic->print(); 592 } 593 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 594 count++; 595 } 596 } 597 } 598 } 599 } 600 601 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 602 CompiledICHolder::live_count(), "must agree"); 603 #endif 604 } 605 606 607 void CodeCache::verify_oops() { 608 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 609 VerifyOopClosure voc; 610 FOR_ALL_ALIVE_BLOBS(cb) { 611 if (cb->is_nmethod()) { 612 nmethod *nm = (nmethod*)cb; 613 nm->oops_do(&voc); 614 nm->verify_oop_relocations(); 615 } 616 } 617 } 618 619 620 address CodeCache::first_address() { 621 assert_locked_or_safepoint(CodeCache_lock); 622 return (address)_heap->low_boundary(); 623 } 624 625 626 address CodeCache::last_address() { 627 assert_locked_or_safepoint(CodeCache_lock); 628 return (address)_heap->high(); 629 } 630 631 /** 632 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 633 * is free, reverse_free_ratio() returns 4. 634 */ 635 double CodeCache::reverse_free_ratio() { 636 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace); 637 double max_capacity = (double)CodeCache::max_capacity(); 638 return max_capacity / unallocated_capacity; 639 } 640 641 void icache_init(); 642 643 void CodeCache::initialize() { 644 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 645 #ifdef COMPILER2 646 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 647 #endif 648 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 649 // This was originally just a check of the alignment, causing failure, instead, round 650 // the code cache to the page size. In particular, Solaris is moving to a larger 651 // default page size. 652 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 653 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); 654 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); 655 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { 656 vm_exit_during_initialization("Could not reserve enough space for code cache"); 657 } 658 659 MemoryService::add_code_heap_memory_pool(_heap); 660 661 // Initialize ICache flush mechanism 662 // This service is needed for os::register_code_area 663 icache_init(); 664 665 // Give OS a chance to register generated code area. 666 // This is used on Windows 64 bit platforms to register 667 // Structured Exception Handlers for our generated code. 668 os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); 669 } 670 671 672 void codeCache_init() { 673 CodeCache::initialize(); 674 } 675 676 //------------------------------------------------------------------------------------------------ 677 678 int CodeCache::number_of_nmethods_with_dependencies() { 679 return _number_of_nmethods_with_dependencies; 680 } 681 682 void CodeCache::clear_inline_caches() { 683 assert_locked_or_safepoint(CodeCache_lock); 684 FOR_ALL_ALIVE_NMETHODS(nm) { 685 nm->clear_inline_caches(); 686 } 687 } 688 689 #ifndef PRODUCT 690 // used to keep track of how much time is spent in mark_for_deoptimization 691 static elapsedTimer dependentCheckTime; 692 static int dependentCheckCount = 0; 693 #endif // PRODUCT 694 695 696 int CodeCache::mark_for_deoptimization(DepChange& changes) { 697 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 698 699 #ifndef PRODUCT 700 dependentCheckTime.start(); 701 dependentCheckCount++; 702 #endif // PRODUCT 703 704 int number_of_marked_CodeBlobs = 0; 705 706 // search the hierarchy looking for nmethods which are affected by the loading of this class 707 708 // then search the interfaces this class implements looking for nmethods 709 // which might be dependent of the fact that an interface only had one 710 // implementor. 711 712 { No_Safepoint_Verifier nsv; 713 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 714 Klass* d = str.klass(); 715 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 716 } 717 } 718 719 if (VerifyDependencies) { 720 // Turn off dependency tracing while actually testing deps. 721 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); 722 FOR_ALL_ALIVE_NMETHODS(nm) { 723 if (!nm->is_marked_for_deoptimization() && 724 nm->check_all_dependencies()) { 725 ResourceMark rm; 726 tty->print_cr("Should have been marked for deoptimization:"); 727 changes.print(); 728 nm->print(); 729 nm->print_dependencies(); 730 } 731 } 732 } 733 734 #ifndef PRODUCT 735 dependentCheckTime.stop(); 736 #endif // PRODUCT 737 738 return number_of_marked_CodeBlobs; 739 } 740 741 742 #ifdef HOTSWAP 743 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 744 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 745 int number_of_marked_CodeBlobs = 0; 746 747 // Deoptimize all methods of the evolving class itself 748 Array<Method*>* old_methods = dependee->methods(); 749 for (int i = 0; i < old_methods->length(); i++) { 750 ResourceMark rm; 751 Method* old_method = old_methods->at(i); 752 nmethod *nm = old_method->code(); 753 if (nm != NULL) { 754 nm->mark_for_deoptimization(); 755 number_of_marked_CodeBlobs++; 756 } 757 } 758 759 FOR_ALL_ALIVE_NMETHODS(nm) { 760 if (nm->is_marked_for_deoptimization()) { 761 // ...Already marked in the previous pass; don't count it again. 762 } else if (nm->is_evol_dependent_on(dependee())) { 763 ResourceMark rm; 764 nm->mark_for_deoptimization(); 765 number_of_marked_CodeBlobs++; 766 } else { 767 // flush caches in case they refer to a redefined Method* 768 nm->clear_inline_caches(); 769 } 770 } 771 772 return number_of_marked_CodeBlobs; 773 } 774 #endif // HOTSWAP 775 776 777 // Deoptimize all methods 778 void CodeCache::mark_all_nmethods_for_deoptimization() { 779 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 780 FOR_ALL_ALIVE_NMETHODS(nm) { 781 nm->mark_for_deoptimization(); 782 } 783 } 784 785 786 int CodeCache::mark_for_deoptimization(Method* dependee) { 787 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 788 int number_of_marked_CodeBlobs = 0; 789 790 FOR_ALL_ALIVE_NMETHODS(nm) { 791 if (nm->is_dependent_on_method(dependee)) { 792 ResourceMark rm; 793 nm->mark_for_deoptimization(); 794 number_of_marked_CodeBlobs++; 795 } 796 } 797 798 return number_of_marked_CodeBlobs; 799 } 800 801 void CodeCache::make_marked_nmethods_zombies() { 802 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 803 FOR_ALL_ALIVE_NMETHODS(nm) { 804 if (nm->is_marked_for_deoptimization()) { 805 806 // If the nmethod has already been made non-entrant and it can be converted 807 // then zombie it now. Otherwise make it non-entrant and it will eventually 808 // be zombied when it is no longer seen on the stack. Note that the nmethod 809 // might be "entrant" and not on the stack and so could be zombied immediately 810 // but we can't tell because we don't track it on stack until it becomes 811 // non-entrant. 812 813 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 814 nm->make_zombie(); 815 } else { 816 nm->make_not_entrant(); 817 } 818 } 819 } 820 } 821 822 void CodeCache::make_marked_nmethods_not_entrant() { 823 assert_locked_or_safepoint(CodeCache_lock); 824 FOR_ALL_ALIVE_NMETHODS(nm) { 825 if (nm->is_marked_for_deoptimization()) { 826 nm->make_not_entrant(); 827 } 828 } 829 } 830 831 void CodeCache::verify() { 832 _heap->verify(); 833 FOR_ALL_ALIVE_BLOBS(p) { 834 p->verify(); 835 } 836 } 837 838 void CodeCache::report_codemem_full() { 839 _codemem_full_count++; 840 EventCodeCacheFull event; 841 if (event.should_commit()) { 842 event.set_startAddress((u8)low_bound()); 843 event.set_commitedTopAddress((u8)high()); 844 event.set_reservedTopAddress((u8)high_bound()); 845 event.set_entryCount(nof_blobs()); 846 event.set_methodCount(nof_nmethods()); 847 event.set_adaptorCount(nof_adapters()); 848 event.set_unallocatedCapacity(unallocated_capacity()/K); 849 event.set_fullCount(_codemem_full_count); 850 event.commit(); 851 } 852 } 853 854 //------------------------------------------------------------------------------------------------ 855 // Non-product version 856 857 #ifndef PRODUCT 858 859 void CodeCache::verify_if_often() { 860 if (VerifyCodeCacheOften) { 861 _heap->verify(); 862 } 863 } 864 865 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 866 if (PrintCodeCache2) { // Need to add a new flag 867 ResourceMark rm; 868 if (size == 0) size = cb->size(); 869 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size); 870 } 871 } 872 873 void CodeCache::print_internals() { 874 int nmethodCount = 0; 875 int runtimeStubCount = 0; 876 int adapterCount = 0; 877 int deoptimizationStubCount = 0; 878 int uncommonTrapStubCount = 0; 879 int bufferBlobCount = 0; 880 int total = 0; 881 int nmethodAlive = 0; 882 int nmethodNotEntrant = 0; 883 int nmethodZombie = 0; 884 int nmethodUnloaded = 0; 885 int nmethodJava = 0; 886 int nmethodNative = 0; 887 int maxCodeSize = 0; 888 ResourceMark rm; 889 890 CodeBlob *cb; 891 for (cb = first(); cb != NULL; cb = next(cb)) { 892 total++; 893 if (cb->is_nmethod()) { 894 nmethod* nm = (nmethod*)cb; 895 896 if (Verbose && nm->method() != NULL) { 897 ResourceMark rm; 898 char *method_name = nm->method()->name_and_sig_as_C_string(); 899 tty->print("%s", method_name); 900 if(nm->is_alive()) { tty->print_cr(" alive"); } 901 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 902 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 903 } 904 905 nmethodCount++; 906 907 if(nm->is_alive()) { nmethodAlive++; } 908 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 909 if(nm->is_zombie()) { nmethodZombie++; } 910 if(nm->is_unloaded()) { nmethodUnloaded++; } 911 if(nm->is_native_method()) { nmethodNative++; } 912 913 if(nm->method() != NULL && nm->is_java_method()) { 914 nmethodJava++; 915 if (nm->insts_size() > maxCodeSize) { 916 maxCodeSize = nm->insts_size(); 917 } 918 } 919 } else if (cb->is_runtime_stub()) { 920 runtimeStubCount++; 921 } else if (cb->is_deoptimization_stub()) { 922 deoptimizationStubCount++; 923 } else if (cb->is_uncommon_trap_stub()) { 924 uncommonTrapStubCount++; 925 } else if (cb->is_adapter_blob()) { 926 adapterCount++; 927 } else if (cb->is_buffer_blob()) { 928 bufferBlobCount++; 929 } 930 } 931 932 int bucketSize = 512; 933 int bucketLimit = maxCodeSize / bucketSize + 1; 934 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 935 memset(buckets,0,sizeof(int) * bucketLimit); 936 937 for (cb = first(); cb != NULL; cb = next(cb)) { 938 if (cb->is_nmethod()) { 939 nmethod* nm = (nmethod*)cb; 940 if(nm->is_java_method()) { 941 buckets[nm->insts_size() / bucketSize]++; 942 } 943 } 944 } 945 tty->print_cr("Code Cache Entries (total of %d)",total); 946 tty->print_cr("-------------------------------------------------"); 947 tty->print_cr("nmethods: %d",nmethodCount); 948 tty->print_cr("\talive: %d",nmethodAlive); 949 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 950 tty->print_cr("\tzombie: %d",nmethodZombie); 951 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 952 tty->print_cr("\tjava: %d",nmethodJava); 953 tty->print_cr("\tnative: %d",nmethodNative); 954 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 955 tty->print_cr("adapters: %d",adapterCount); 956 tty->print_cr("buffer blobs: %d",bufferBlobCount); 957 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 958 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 959 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 960 tty->print_cr("-------------------------------------------------"); 961 962 for(int i=0; i<bucketLimit; i++) { 963 if(buckets[i] != 0) { 964 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 965 tty->fill_to(40); 966 tty->print_cr("%d",buckets[i]); 967 } 968 } 969 970 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 971 } 972 973 #endif // !PRODUCT 974 975 void CodeCache::print() { 976 print_summary(tty); 977 978 CodeBlob_sizes live_java_methods [CompLevel_full_optimization + 1]; 979 CodeBlob_sizes live_osr_methods [CompLevel_full_optimization + 1]; 980 CodeBlob_sizes live_native_methods; 981 982 CodeBlob_sizes dead_java_methods [CompLevel_full_optimization + 1]; 983 CodeBlob_sizes dead_osr_methods [CompLevel_full_optimization + 1]; 984 CodeBlob_sizes dead_native_methods; 985 986 CodeBlob_sizes runtime_stubs; 987 CodeBlob_sizes deoptimization_stubs; 988 CodeBlob_sizes uncommon_trap_stubs; 989 CodeBlob_sizes exception_stubs; 990 CodeBlob_sizes safepoint_stubs; 991 CodeBlob_sizes adapter_blobs; 992 CodeBlob_sizes method_handles_adapter_blobs; 993 CodeBlob_sizes other_stubs; 994 995 size_t total_live_nmethods = 0; 996 size_t total_dead_nmethods = 0; 997 size_t total_stubs_size = 0; 998 999 FOR_ALL_BLOBS(p) { 1000 // live or not-entrant methods 1001 if (p->is_alive()) { 1002 if (p->is_nmethod()) { 1003 total_live_nmethods++; 1004 // Cannot return NULL, since we checked that it is an nmethod 1005 nmethod* nm = p->as_nmethod_or_null(); 1006 const int comp_level = nm->comp_level(); 1007 1008 if (nm->is_osr_method()) { 1009 live_osr_methods[comp_level].add(nm); 1010 } else if (nm->is_native_method()) { 1011 live_native_methods.add(nm); 1012 } else if (nm->is_java_method()) { 1013 live_java_methods[comp_level].add(nm); 1014 } 1015 } else { 1016 total_stubs_size++; 1017 if (p->is_runtime_stub()) { 1018 runtime_stubs.add(p); 1019 } else if (p->is_deoptimization_stub()) { 1020 deoptimization_stubs.add(p); 1021 } else if (p->is_uncommon_trap_stub()) { 1022 uncommon_trap_stubs.add(p); 1023 } else if (p->is_exception_stub()) { 1024 exception_stubs.add(p); 1025 } else if (p->is_safepoint_stub()) { 1026 safepoint_stubs.add(p); 1027 } else if (p->is_adapter_blob()) { 1028 adapter_blobs.add(p); 1029 } else if (p->is_method_handles_adapter_blob()) { 1030 method_handles_adapter_blobs.add(p); 1031 } else { 1032 other_stubs.add(p); 1033 } 1034 } 1035 // zombie or unloaded methods 1036 } else { 1037 if (p->is_nmethod()) { 1038 total_dead_nmethods++; 1039 nmethod* nm = p->as_nmethod_or_null(); 1040 const int comp_level = nm->comp_level(); 1041 1042 if (nm->is_osr_method()) { 1043 dead_osr_methods[comp_level].add(nm); 1044 } else if (nm->is_native_method()) { 1045 dead_native_methods.add(nm); 1046 } else if (nm->is_java_method()) { 1047 dead_java_methods[comp_level].add(nm); 1048 } 1049 } 1050 } 1051 } 1052 1053 // Tier 0 (interpreter) 1054 StubQueue* code = AbstractInterpreter::code(); 1055 tty->print("\nInterpreter:"); 1056 tty->print_cr(" total=%dk, used=%dk", code->total_space() / K, code->used_space() / K); 1057 1058 // Print live methods 1059 tty->print_cr("\nTotal number of live methods: %u", total_live_nmethods); 1060 for (int i = 1; i < CompLevel_full_optimization + 1; i++) { 1061 tty->print_cr(" Tier %d:", i); 1062 live_java_methods[i].print("Java methods"); 1063 live_osr_methods[i].print("OSR methods"); 1064 } 1065 tty->print_cr(" Native methods:"); 1066 live_native_methods.print("Native methods"); 1067 1068 // Print dead methods 1069 tty->print_cr("\nTotal number of dead methods: %u", total_dead_nmethods); 1070 for (int i = 1; i < CompLevel_full_optimization + 1; i++) { 1071 tty->print_cr(" Tier %d:", i); 1072 dead_java_methods[i].print("Java methods"); 1073 dead_osr_methods[i].print("OSR methods"); 1074 } 1075 tty->print_cr(" Native methods:"); 1076 dead_native_methods.print("Native methods"); 1077 1078 // Print stubs 1079 tty->print_cr("\nTotal number of stubs: %u", total_stubs_size); 1080 runtime_stubs.print("runtime"); 1081 deoptimization_stubs.print("deoptimization"); 1082 uncommon_trap_stubs.print("uncommon trap"); 1083 exception_stubs.print("exception"); 1084 safepoint_stubs.print("safepoint"); 1085 adapter_blobs.print("C2I/I2C adapter"); 1086 method_handles_adapter_blobs.print("method handles adapter"); 1087 other_stubs.print("other"); 1088 1089 #ifndef PRODUCT 1090 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(), 1091 dependentCheckTime.seconds() / dependentCheckCount); 1092 #endif 1093 1094 if (WizardMode) { 1095 // print the oop_map usage 1096 int code_size = 0; 1097 int number_of_blobs = 0; 1098 int number_of_oop_maps = 0; 1099 int map_size = 0; 1100 FOR_ALL_BLOBS(p) { 1101 if (p->is_alive()) { 1102 number_of_blobs++; 1103 code_size += p->code_size(); 1104 OopMapSet* set = p->oop_maps(); 1105 if (set != NULL) { 1106 number_of_oop_maps += set->size(); 1107 map_size += set->heap_size(); 1108 } 1109 } 1110 } 1111 tty->print_cr("OopMaps"); 1112 tty->print_cr(" #blobs = %d", number_of_blobs); 1113 tty->print_cr(" code size = %d", code_size); 1114 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1115 tty->print_cr(" map size = %d", map_size); 1116 } 1117 } 1118 1119 void CodeCache::print_summary(outputStream* st, bool detailed) { 1120 size_t total = (_heap->high_boundary() - _heap->low_boundary()); 1121 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1122 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1123 total/K, (total - unallocated_capacity())/K, 1124 maxCodeCacheUsed/K, unallocated_capacity()/K); 1125 1126 if (detailed) { 1127 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1128 _heap->low_boundary(), 1129 _heap->high(), 1130 _heap->high_boundary()); 1131 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT 1132 " adapters=" UINT32_FORMAT, 1133 nof_blobs(), nof_nmethods(), nof_adapters()); 1134 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1135 "enabled" : Arguments::mode() == Arguments::_int ? 1136 "disabled (interpreter mode)" : 1137 "disabled (not enough contiguous free space left)"); 1138 } 1139 } 1140 1141 void CodeCache::log_state(outputStream* st) { 1142 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1143 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", 1144 nof_blobs(), nof_nmethods(), nof_adapters(), 1145 unallocated_capacity()); 1146 } 1147