1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/compiledIC.hpp" 29 #include "code/dependencies.hpp" 30 #include "code/icBuffer.hpp" 31 #include "code/nmethod.hpp" 32 #include "code/pcDesc.hpp" 33 #include "compiler/compileBroker.hpp" 34 #include "gc_implementation/shared/markSweep.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/gcLocker.hpp" 37 #include "memory/iterator.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "oops/method.hpp" 40 #include "oops/objArrayOop.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/icache.hpp" 45 #include "runtime/java.hpp" 46 #include "runtime/mutexLocker.hpp" 47 #include "services/memoryService.hpp" 48 #include "trace/tracing.hpp" 49 #include "utilities/xmlstream.hpp" 50 51 52 // Helper class for printing in CodeCache 53 class CodeBlob_sizes { 54 private: 55 int count; 56 int total_size; 57 int header_size; 58 int code_size; 59 int stub_size; 60 int relocation_size; 61 int scopes_oop_size; 62 int scopes_metadata_size; 63 int scopes_data_size; 64 int scopes_pcs_size; 65 66 public: 67 CodeBlob_sizes() { 68 count = 0; 69 total_size = 0; 70 header_size = 0; 71 code_size = 0; 72 stub_size = 0; 73 relocation_size = 0; 74 scopes_oop_size = 0; 75 scopes_metadata_size = 0; 76 scopes_data_size = 0; 77 scopes_pcs_size = 0; 78 } 79 80 int total() { return total_size; } 81 bool is_empty() { return count == 0; } 82 83 void print(const char* title) { 84 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 85 count, 86 title, 87 total() / K, 88 header_size * 100 / total_size, 89 relocation_size * 100 / total_size, 90 code_size * 100 / total_size, 91 stub_size * 100 / total_size, 92 scopes_oop_size * 100 / total_size, 93 scopes_metadata_size * 100 / total_size, 94 scopes_data_size * 100 / total_size, 95 scopes_pcs_size * 100 / total_size); 96 } 97 98 void add(CodeBlob* cb) { 99 count++; 100 total_size += cb->size(); 101 header_size += cb->header_size(); 102 relocation_size += cb->relocation_size(); 103 if (cb->is_nmethod()) { 104 nmethod* nm = cb->as_nmethod_or_null(); 105 code_size += nm->insts_size(); 106 stub_size += nm->stub_size(); 107 108 scopes_oop_size += nm->oops_size(); 109 scopes_metadata_size += nm->metadata_size(); 110 scopes_data_size += nm->scopes_data_size(); 111 scopes_pcs_size += nm->scopes_pcs_size(); 112 } else { 113 code_size += cb->code_size(); 114 } 115 } 116 }; 117 118 // Iterate over all CodeHeaps 119 #define FOR_ALL_HEAPS(it) for (GrowableArrayIterator<CodeHeap*> it = _heaps->begin(); it != _heaps->end(); ++it) 120 // Iterate over all CodeHeaps containing nmethods 121 #define FOR_ALL_METHOD_HEAPS(it) for (GrowableArrayFilterIterator<CodeHeap*, IsMethodPredicate> it(_heaps->begin(), IsMethodPredicate()); it != _heaps->end(); ++it) 122 // Iterate over all CodeBlobs (cb) on the given CodeHeap 123 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) 124 // Iterate over all alive CodeBlobs (cb) on the given CodeHeap 125 #define FOR_ALL_ALIVE_BLOBS(cb, heap) for (CodeBlob* cb = first_alive_blob(heap); cb != NULL; cb = next_alive_blob(heap, cb)) 126 127 address CodeCache::_low_bound = 0; 128 address CodeCache::_high_bound = 0; 129 int CodeCache::_number_of_blobs = 0; 130 int CodeCache::_number_of_adapters = 0; 131 int CodeCache::_number_of_nmethods = 0; 132 int CodeCache::_number_of_nmethods_with_dependencies = 0; 133 bool CodeCache::_needs_cache_clean = false; 134 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 135 int CodeCache::_codemem_full_count = 0; 136 137 // Initialize array of CodeHeaps 138 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (3, true); 139 140 void CodeCache::initialize_heaps() { 141 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap 142 if(!heap_available(CodeBlobType::MethodProfile)) { 143 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); 144 } 145 146 // Compute reserved sizes of CodeHeaps, we have 147 // ReservedCodeCacheSize = non_method_size + NonProfiledCodeHeapSize + ProfiledCodeHeapSize 148 // where by default NonProfiledCodeHeapSize is approximately ProfiledCodeHeapSize * 2 149 size_t no_profile_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize); 150 size_t profile_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize); 151 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(ReservedCodeCacheSize - (no_profile_size + profile_size)); 152 153 // Compute initial sizes of CodeHeaps 154 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size); 155 size_t init_no_profile_size = MIN2(InitialCodeCacheSize, no_profile_size); 156 size_t init_profile_size = MIN2(InitialCodeCacheSize, profile_size); 157 158 // Reserve one continuous chunk of memory for CodeHeaps and split it into 159 // parts for the individual heaps. The memory layout looks like this: 160 // ---------- high ----------- 161 // Non-methods 162 // Tier 2 and tier 3 methods 163 // Tier 1 and Tier 4 methods 164 // ---------- low ------------ 165 ReservedCodeSpace rs = reserve_heap_memory(no_profile_size + profile_size + non_method_size); 166 ReservedSpace no_profile_space = rs.first_part(no_profile_size); 167 ReservedSpace rest = rs.last_part(no_profile_size); 168 ReservedSpace profile_space = rest.first_part(profile_size); 169 ReservedSpace non_method_space = rest.last_part(profile_size); 170 171 // Tier 1 and tier 4 methods (+ native) 172 add_heap(no_profile_space, "Tier 1 and tier 4 methods", init_no_profile_size, CodeBlobType::MethodNoProfile); 173 // Tier 2 and tier 3 methods 174 add_heap(profile_space, "Tier 2 and tier 3 methods", init_profile_size, CodeBlobType::MethodProfile); 175 // Non-methods 176 add_heap(non_method_space, "Non-methods", init_non_method_size, CodeBlobType::NonMethod); 177 } 178 179 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { 180 // Determine alignment 181 const size_t page_size = os::can_execute_large_page_memory() ? 182 os::page_size_for_region(InitialCodeCacheSize, size, 8) : 183 os::vm_page_size(); 184 const size_t granularity = os::vm_allocation_granularity(); 185 const size_t r_align = MAX2(page_size, granularity); 186 const size_t r_size = align_size_up(size, r_align); 187 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : 188 MAX2(page_size, granularity); 189 190 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); 191 192 // Initialize bounds 193 _low_bound = (address)rs.base(); 194 _high_bound = _low_bound + rs.size(); 195 guarantee(low_bound() < high_bound(), "Bound check"); 196 197 return rs; 198 } 199 200 bool CodeCache::heap_available(int code_blob_type) { 201 if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) { 202 // Use all heaps for TieredCompilation 203 return true; 204 } else { 205 // Without TieredCompilation we only need the non-profiled heap 206 return (code_blob_type == CodeBlobType::MethodNoProfile); 207 } 208 } 209 210 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) { 211 // Check if heap is needed 212 if (!heap_available(code_blob_type)) { 213 return; 214 } 215 216 // Create CodeHeap 217 CodeHeap* heap = new CodeHeap(name, code_blob_type); 218 _heaps->append(heap); 219 220 // Reserve Space 221 size_initial = round_to(size_initial, os::vm_page_size()); 222 223 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 224 vm_exit_during_initialization("Could not reserve enough space for code cache"); 225 } 226 227 // Register the CodeHeap 228 MemoryService::add_code_heap_memory_pool(heap, name); 229 } 230 231 CodeHeap* CodeCache::get_code_heap(int code_blob_type) { 232 FOR_ALL_HEAPS(it) { 233 if ((*it)->accepts(code_blob_type)) { 234 return (*it); 235 } 236 } 237 return NULL; 238 } 239 240 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 241 assert_locked_or_safepoint(CodeCache_lock); 242 if (heap != NULL) { 243 return (CodeBlob*)heap->first(); 244 } 245 return NULL; 246 } 247 248 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 249 assert_locked_or_safepoint(CodeCache_lock); 250 if (heap != NULL) { 251 return (CodeBlob*)heap->next(cb); 252 } 253 return NULL; 254 } 255 256 CodeBlob* CodeCache::first_alive_blob(CodeHeap* heap) { 257 assert_locked_or_safepoint(CodeCache_lock); 258 CodeBlob* cb = first_blob(heap); 259 while (cb != NULL && !cb->is_alive()) { 260 cb = next_blob(heap, cb); 261 } 262 return cb; 263 } 264 265 CodeBlob* CodeCache::next_alive_blob(CodeHeap* heap, CodeBlob* cb) { 266 assert_locked_or_safepoint(CodeCache_lock); 267 cb = next_blob(heap, cb); 268 while (cb != NULL && !cb->is_alive()) { 269 cb = next_blob(heap, cb); 270 } 271 return cb; 272 } 273 274 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { 275 // Do not seize the CodeCache lock here--if the caller has not 276 // already done so, we are going to lose bigtime, since the code 277 // cache will contain a garbage CodeBlob until the caller can 278 // run the constructor for the CodeBlob subclass he is busy 279 // instantiating. 280 guarantee(size >= 0, "allocation request must be reasonable"); 281 assert_locked_or_safepoint(CodeCache_lock); 282 CodeBlob* cb = NULL; 283 _number_of_blobs++; 284 285 // Get CodeHeap for the given CodeBlobType 286 CodeHeap* heap = get_code_heap(code_blob_type); 287 assert (heap != NULL, "Heap exists"); 288 289 while (true) { 290 cb = (CodeBlob*)heap->allocate(size, is_critical); 291 if (cb != NULL) break; 292 if (!heap->expand_by(CodeCacheExpansionSize)) { 293 // Expansion failed 294 return NULL; 295 } 296 if (PrintCodeCacheExtension) { 297 ResourceMark rm; 298 tty->print_cr("CodeHeap '%s' extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", 299 heap->name(), (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 300 (address)heap->high() - (address)heap->low_boundary()); 301 } 302 } 303 304 verify_if_often(); 305 print_trace("allocation", cb, size); 306 307 return cb; 308 } 309 310 void CodeCache::free(CodeBlob* cb, int code_blob_type) { 311 assert_locked_or_safepoint(CodeCache_lock); 312 verify_if_often(); 313 314 print_trace("free", cb); 315 if (cb->is_nmethod()) { 316 _number_of_nmethods--; 317 if (((nmethod *)cb)->has_dependencies()) { 318 _number_of_nmethods_with_dependencies--; 319 } 320 } 321 if (cb->is_adapter_blob()) { 322 _number_of_adapters--; 323 } 324 _number_of_blobs--; 325 326 // Get heap for given CodeBlobType and deallocate 327 get_code_heap(code_blob_type)->deallocate(cb); 328 329 verify_if_often(); 330 assert(_number_of_blobs >= 0, "sanity check"); 331 } 332 333 void CodeCache::commit(CodeBlob* cb) { 334 // this is called by nmethod::nmethod, which must already own CodeCache_lock 335 assert_locked_or_safepoint(CodeCache_lock); 336 if (cb->is_nmethod()) { 337 _number_of_nmethods++; 338 if (((nmethod *)cb)->has_dependencies()) { 339 _number_of_nmethods_with_dependencies++; 340 } 341 } 342 if (cb->is_adapter_blob()) { 343 _number_of_adapters++; 344 } 345 346 // flush the hardware I-cache 347 ICache::invalidate_range(cb->content_begin(), cb->content_size()); 348 } 349 350 bool CodeCache::contains(void *p) { 351 // It should be ok to call contains without holding a lock 352 FOR_ALL_HEAPS(it) { 353 if ((*it)->contains(p)) { 354 return true; 355 } 356 } 357 return false; 358 } 359 360 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not 361 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain 362 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 363 CodeBlob* CodeCache::find_blob(void* start) { 364 CodeBlob* result = find_blob_unsafe(start); 365 // We could potentially look up non_entrant methods 366 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 367 return result; 368 } 369 370 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know 371 // what you are doing) 372 CodeBlob* CodeCache::find_blob_unsafe(void* start) { 373 // NMT can walk the stack before code cache is created 374 if (_heaps->first() == NULL) return NULL; 375 376 FOR_ALL_HEAPS(it) { 377 CodeBlob* result = (CodeBlob*) (*it)->find_start(start); 378 if (result != NULL && result->blob_contains((address)start)) { 379 return result; 380 } 381 } 382 return NULL; 383 } 384 385 nmethod* CodeCache::find_nmethod(void* start) { 386 CodeBlob* cb = find_blob(start); 387 assert(cb->is_nmethod(), "did not find an nmethod"); 388 return (nmethod*)cb; 389 } 390 391 bool CodeCache::contains_nmethod(nmethod* nm) { 392 FOR_ALL_METHOD_HEAPS(it) { 393 if ((*it)->contains(nm)) { 394 return true; 395 } 396 } 397 return false; 398 } 399 400 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 401 assert_locked_or_safepoint(CodeCache_lock); 402 FOR_ALL_HEAPS(it) { 403 FOR_ALL_BLOBS(cb, *it) { 404 f(cb); 405 } 406 } 407 } 408 409 void CodeCache::nmethods_do(void f(nmethod* nm)) { 410 assert_locked_or_safepoint(CodeCache_lock); 411 FOR_ALL_METHOD_HEAPS(it) { 412 FOR_ALL_BLOBS(cb, *it) { 413 f((nmethod*)cb); 414 } 415 } 416 } 417 418 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { 419 assert_locked_or_safepoint(CodeCache_lock); 420 FOR_ALL_METHOD_HEAPS(it) { 421 FOR_ALL_ALIVE_BLOBS(cb, *it) { 422 f((nmethod*)cb); 423 } 424 } 425 } 426 427 int CodeCache::alignment_unit() { 428 return (int)_heaps->first()->alignment_unit(); 429 } 430 431 int CodeCache::alignment_offset() { 432 return (int)_heaps->first()->alignment_offset(); 433 } 434 435 // Mark nmethods for unloading if they contain otherwise unreachable oops. 436 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { 437 assert_locked_or_safepoint(CodeCache_lock); 438 FOR_ALL_METHOD_HEAPS(it) { 439 FOR_ALL_ALIVE_BLOBS(cb, *it) { 440 nmethod* nm = (nmethod*)cb; 441 nm->do_unloading(is_alive, unloading_occurred); 442 } 443 } 444 } 445 446 void CodeCache::blobs_do(CodeBlobClosure* f) { 447 assert_locked_or_safepoint(CodeCache_lock); 448 FOR_ALL_HEAPS(it) { 449 FOR_ALL_BLOBS(cb, *it) { 450 if (cb->is_alive()) { 451 f->do_code_blob(cb); 452 453 #ifdef ASSERT 454 if (cb->is_nmethod()) 455 ((nmethod*)cb)->verify_scavenge_root_oops(); 456 #endif //ASSERT 457 } 458 } 459 } 460 } 461 462 // Walk the list of methods which might contain non-perm oops. 463 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 464 assert_locked_or_safepoint(CodeCache_lock); 465 debug_only(mark_scavenge_root_nmethods()); 466 467 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 468 debug_only(cur->clear_scavenge_root_marked()); 469 assert(cur->scavenge_root_not_marked(), ""); 470 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 471 472 bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); 473 #ifndef PRODUCT 474 if (TraceScavenge) { 475 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 476 } 477 #endif //PRODUCT 478 if (is_live) { 479 // Perform cur->oops_do(f), maybe just once per nmethod. 480 f->do_code_blob(cur); 481 } 482 } 483 484 // Check for stray marks. 485 debug_only(verify_perm_nmethods(NULL)); 486 } 487 488 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { 489 assert_locked_or_safepoint(CodeCache_lock); 490 nm->set_on_scavenge_root_list(); 491 nm->set_scavenge_root_link(_scavenge_root_nmethods); 492 set_scavenge_root_nmethods(nm); 493 print_trace("add_scavenge_root", nm); 494 } 495 496 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { 497 assert_locked_or_safepoint(CodeCache_lock); 498 print_trace("drop_scavenge_root", nm); 499 nmethod* last = NULL; 500 nmethod* cur = scavenge_root_nmethods(); 501 while (cur != NULL) { 502 nmethod* next = cur->scavenge_root_link(); 503 if (cur == nm) { 504 if (last != NULL) 505 last->set_scavenge_root_link(next); 506 else set_scavenge_root_nmethods(next); 507 nm->set_scavenge_root_link(NULL); 508 nm->clear_on_scavenge_root_list(); 509 return; 510 } 511 last = cur; 512 cur = next; 513 } 514 assert(false, "should have been on list"); 515 } 516 517 void CodeCache::prune_scavenge_root_nmethods() { 518 assert_locked_or_safepoint(CodeCache_lock); 519 debug_only(mark_scavenge_root_nmethods()); 520 521 nmethod* last = NULL; 522 nmethod* cur = scavenge_root_nmethods(); 523 while (cur != NULL) { 524 nmethod* next = cur->scavenge_root_link(); 525 debug_only(cur->clear_scavenge_root_marked()); 526 assert(cur->scavenge_root_not_marked(), ""); 527 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 528 529 if (!cur->is_zombie() && !cur->is_unloaded() 530 && cur->detect_scavenge_root_oops()) { 531 // Keep it. Advance 'last' to prevent deletion. 532 last = cur; 533 } else { 534 // Prune it from the list, so we don't have to look at it any more. 535 print_trace("prune_scavenge_root", cur); 536 cur->set_scavenge_root_link(NULL); 537 cur->clear_on_scavenge_root_list(); 538 if (last != NULL) 539 last->set_scavenge_root_link(next); 540 else set_scavenge_root_nmethods(next); 541 } 542 cur = next; 543 } 544 545 // Check for stray marks. 546 debug_only(verify_perm_nmethods(NULL)); 547 } 548 549 #ifndef PRODUCT 550 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { 551 // While we are here, verify the integrity of the list. 552 mark_scavenge_root_nmethods(); 553 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { 554 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); 555 cur->clear_scavenge_root_marked(); 556 } 557 verify_perm_nmethods(f); 558 } 559 560 // Temporarily mark nmethods that are claimed to be on the non-perm list. 561 void CodeCache::mark_scavenge_root_nmethods() { 562 FOR_ALL_METHOD_HEAPS(it) { 563 FOR_ALL_ALIVE_BLOBS(cb, *it) { 564 nmethod* nm = (nmethod*)cb; 565 assert(nm->scavenge_root_not_marked(), "clean state"); 566 if (nm->on_scavenge_root_list()) 567 nm->set_scavenge_root_marked(); 568 } 569 } 570 } 571 572 // If the closure is given, run it on the unlisted nmethods. 573 // Also make sure that the effects of mark_scavenge_root_nmethods is gone. 574 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { 575 FOR_ALL_METHOD_HEAPS(it) { 576 FOR_ALL_ALIVE_BLOBS(cb, *it) { 577 nmethod* nm = (nmethod*)cb; 578 bool call_f = (f_or_null != NULL); 579 assert(nm->scavenge_root_not_marked(), "must be already processed"); 580 if (nm->on_scavenge_root_list()) 581 call_f = false; // don't show this one to the client 582 nm->verify_scavenge_root_oops(); 583 if (call_f) f_or_null->do_code_blob(nm); 584 } 585 } 586 } 587 #endif //PRODUCT 588 589 void CodeCache::gc_prologue() { 590 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); 591 } 592 593 void CodeCache::gc_epilogue() { 594 assert_locked_or_safepoint(CodeCache_lock); 595 FOR_ALL_METHOD_HEAPS(it) { 596 FOR_ALL_ALIVE_BLOBS(cb, *it) { 597 nmethod* nm = (nmethod*)cb; 598 assert(!nm->is_unloaded(), "Tautology"); 599 if (needs_cache_clean()) { 600 nm->cleanup_inline_caches(); 601 } 602 DEBUG_ONLY(nm->verify()); 603 nm->fix_oop_relocations(); 604 } 605 } 606 set_needs_cache_clean(false); 607 prune_scavenge_root_nmethods(); 608 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); 609 610 #ifdef ASSERT 611 // make sure that we aren't leaking icholders 612 int count = 0; 613 FOR_ALL_METHOD_HEAPS(it) { 614 FOR_ALL_BLOBS(cb, *it) { 615 RelocIterator iter((nmethod*)cb); 616 while(iter.next()) { 617 if (iter.type() == relocInfo::virtual_call_type) { 618 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { 619 CompiledIC *ic = CompiledIC_at(iter.reloc()); 620 if (TraceCompiledIC) { 621 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder()); 622 ic->print(); 623 } 624 assert(ic->cached_icholder() != NULL, "must be non-NULL"); 625 count++; 626 } 627 } 628 } 629 } 630 } 631 632 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == 633 CompiledICHolder::live_count(), "must agree"); 634 #endif 635 } 636 637 void CodeCache::verify_oops() { 638 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 639 VerifyOopClosure voc; 640 FOR_ALL_METHOD_HEAPS(it) { 641 FOR_ALL_ALIVE_BLOBS(cb, *it) { 642 nmethod* nm = (nmethod*)cb; 643 nm->oops_do(&voc); 644 nm->verify_oop_relocations(); 645 } 646 } 647 } 648 649 size_t CodeCache::capacity() { 650 size_t cap = 0; 651 FOR_ALL_HEAPS(it) { 652 cap += (*it)->capacity(); 653 } 654 return cap; 655 } 656 657 size_t CodeCache::unallocated_capacity() { 658 size_t unallocated_cap = 0; 659 FOR_ALL_HEAPS(it) { 660 unallocated_cap += (*it)->unallocated_capacity(); 661 } 662 return unallocated_cap; 663 } 664 665 size_t CodeCache::max_capacity() { 666 size_t max_cap = 0; 667 FOR_ALL_HEAPS(it) { 668 max_cap += (*it)->max_capacity(); 669 } 670 return max_cap; 671 } 672 673 /** 674 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 675 * is free, reverse_free_ratio() returns 4. 676 */ 677 double CodeCache::reverse_free_ratio(int code_blob_type) { 678 CodeHeap* heap = get_code_heap(code_blob_type); 679 if (heap == NULL) { 680 return 0; 681 } 682 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); 683 double max_capacity = (double)heap->max_capacity(); 684 return max_capacity / unallocated_capacity; 685 } 686 687 void icache_init(); 688 689 void CodeCache::initialize() { 690 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 691 #ifdef COMPILER2 692 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 693 #endif 694 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 695 // This was originally just a check of the alignment, causing failure, instead, round 696 // the code cache to the page size. In particular, Solaris is moving to a larger 697 // default page size. 698 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 699 700 // Reserve space and create heaps 701 initialize_heaps(); 702 703 // Initialize ICache flush mechanism 704 // This service is needed for os::register_code_area 705 icache_init(); 706 707 // Give OS a chance to register generated code area. 708 // This is used on Windows 64 bit platforms to register 709 // Structured Exception Handlers for our generated code. 710 os::register_code_area((char*)low_bound(), (char*)high_bound()); 711 } 712 713 void codeCache_init() { 714 CodeCache::initialize(); 715 } 716 717 //------------------------------------------------------------------------------------------------ 718 719 int CodeCache::number_of_nmethods_with_dependencies() { 720 return _number_of_nmethods_with_dependencies; 721 } 722 723 #ifndef PRODUCT 724 // used to keep track of how much time is spent in mark_for_deoptimization 725 static elapsedTimer dependentCheckTime; 726 static int dependentCheckCount = 0; 727 #endif // PRODUCT 728 729 730 int CodeCache::mark_for_deoptimization(DepChange& changes) { 731 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 732 733 #ifndef PRODUCT 734 dependentCheckTime.start(); 735 dependentCheckCount++; 736 #endif // PRODUCT 737 738 int number_of_marked_CodeBlobs = 0; 739 740 // search the hierarchy looking for nmethods which are affected by the loading of this class 741 742 // then search the interfaces this class implements looking for nmethods 743 // which might be dependent of the fact that an interface only had one 744 // implementor. 745 746 { No_Safepoint_Verifier nsv; 747 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 748 Klass* d = str.klass(); 749 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); 750 } 751 } 752 753 if (VerifyDependencies) { 754 // Turn off dependency tracing while actually testing deps. 755 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); 756 FOR_ALL_METHOD_HEAPS(it) { 757 FOR_ALL_ALIVE_BLOBS(cb, *it) { 758 nmethod* nm = (nmethod*)cb; 759 if (!nm->is_marked_for_deoptimization() && 760 nm->check_all_dependencies()) { 761 ResourceMark rm; 762 tty->print_cr("Should have been marked for deoptimization:"); 763 changes.print(); 764 nm->print(); 765 nm->print_dependencies(); 766 } 767 } 768 } 769 } 770 771 #ifndef PRODUCT 772 dependentCheckTime.stop(); 773 #endif // PRODUCT 774 775 return number_of_marked_CodeBlobs; 776 } 777 778 779 #ifdef HOTSWAP 780 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 781 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 782 int number_of_marked_CodeBlobs = 0; 783 784 // Deoptimize all methods of the evolving class itself 785 Array<Method*>* old_methods = dependee->methods(); 786 for (int i = 0; i < old_methods->length(); i++) { 787 ResourceMark rm; 788 Method* old_method = old_methods->at(i); 789 nmethod *nm = old_method->code(); 790 if (nm != NULL) { 791 nm->mark_for_deoptimization(); 792 number_of_marked_CodeBlobs++; 793 } 794 } 795 796 FOR_ALL_METHOD_HEAPS(it) { 797 FOR_ALL_ALIVE_BLOBS(cb, *it) { 798 nmethod* nm = (nmethod*)cb; 799 if (nm->is_marked_for_deoptimization()) { 800 // ...Already marked in the previous pass; don't count it again. 801 } else if (nm->is_evol_dependent_on(dependee())) { 802 ResourceMark rm; 803 nm->mark_for_deoptimization(); 804 number_of_marked_CodeBlobs++; 805 } else { 806 // flush caches in case they refer to a redefined Method* 807 nm->clear_inline_caches(); 808 } 809 } 810 } 811 812 return number_of_marked_CodeBlobs; 813 } 814 #endif // HOTSWAP 815 816 817 // Deoptimize all methods 818 void CodeCache::mark_all_nmethods_for_deoptimization() { 819 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 820 FOR_ALL_METHOD_HEAPS(it) { 821 FOR_ALL_ALIVE_BLOBS(cb, *it) { 822 nmethod* nm = (nmethod*)cb; 823 nm->mark_for_deoptimization(); 824 } 825 } 826 } 827 828 int CodeCache::mark_for_deoptimization(Method* dependee) { 829 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 830 int number_of_marked_CodeBlobs = 0; 831 832 FOR_ALL_METHOD_HEAPS(it) { 833 FOR_ALL_ALIVE_BLOBS(cb, *it) { 834 nmethod* nm = (nmethod*)cb; 835 if (nm->is_dependent_on_method(dependee)) { 836 ResourceMark rm; 837 nm->mark_for_deoptimization(); 838 number_of_marked_CodeBlobs++; 839 } 840 } 841 } 842 843 return number_of_marked_CodeBlobs; 844 } 845 846 void CodeCache::make_marked_nmethods_zombies() { 847 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 848 FOR_ALL_METHOD_HEAPS(it) { 849 FOR_ALL_ALIVE_BLOBS(cb, *it) { 850 nmethod* nm = (nmethod*)cb; 851 if (nm->is_marked_for_deoptimization()) { 852 853 // If the nmethod has already been made non-entrant and it can be converted 854 // then zombie it now. Otherwise make it non-entrant and it will eventually 855 // be zombied when it is no longer seen on the stack. Note that the nmethod 856 // might be "entrant" and not on the stack and so could be zombied immediately 857 // but we can't tell because we don't track it on stack until it becomes 858 // non-entrant. 859 860 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 861 nm->make_zombie(); 862 } else { 863 nm->make_not_entrant(); 864 } 865 } 866 } 867 } 868 } 869 870 void CodeCache::make_marked_nmethods_not_entrant() { 871 assert_locked_or_safepoint(CodeCache_lock); 872 FOR_ALL_METHOD_HEAPS(it) { 873 FOR_ALL_ALIVE_BLOBS(cb, *it) { 874 nmethod* nm = (nmethod*)cb; 875 if (nm->is_marked_for_deoptimization()) { 876 nm->make_not_entrant(); 877 } 878 } 879 } 880 } 881 882 void CodeCache::verify() { 883 assert_locked_or_safepoint(CodeCache_lock); 884 FOR_ALL_HEAPS(it) { 885 CodeHeap* heap = *it; 886 heap->verify(); 887 FOR_ALL_BLOBS(cb, heap) { 888 if (cb->is_alive()) { 889 cb->verify(); 890 } 891 } 892 } 893 } 894 895 // A CodeHeap is full. Print out warning and report event. 896 void CodeCache::report_codemem_full(int code_blob_type) { 897 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 898 CodeHeap* heap = get_code_heap(code_blob_type); 899 900 if (!heap->was_full()) { 901 // Not yet reported for this heap, report 902 heap->report_full(); 903 warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_heap_name(code_blob_type)); 904 warning("Try increasing the code heap size using -XX:%s=", 905 (code_blob_type == CodeBlobType::MethodNoProfile) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize"); 906 907 ResourceMark rm; 908 stringStream s; 909 // Dump CodeCache summary into a buffer before locking the tty 910 { 911 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 912 print_summary(&s, true); 913 } 914 ttyLocker ttyl; 915 tty->print(s.as_string()); 916 } 917 918 _codemem_full_count++; 919 EventCodeCacheFull event; 920 if (event.should_commit()) { 921 event.set_codeBlobType(code_blob_type); 922 event.set_startAddress((u8)heap->low_boundary()); 923 event.set_commitedTopAddress((u8)heap->high()); 924 event.set_reservedTopAddress((u8)heap->high_boundary()); 925 event.set_entryCount(nof_blobs()); 926 event.set_methodCount(nof_nmethods()); 927 event.set_adaptorCount(nof_adapters()); 928 event.set_unallocatedCapacity(heap->unallocated_capacity()/K); 929 event.set_fullCount(_codemem_full_count); 930 event.commit(); 931 } 932 } 933 934 //------------------------------------------------------------------------------------------------ 935 // Non-product version 936 937 #ifndef PRODUCT 938 939 void CodeCache::verify_if_often() { 940 if (VerifyCodeCacheOften) { 941 FOR_ALL_HEAPS(it) { 942 (*it)->verify(); 943 } 944 } 945 } 946 947 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { 948 if (PrintCodeCache2) { // Need to add a new flag 949 ResourceMark rm; 950 if (size == 0) size = cb->size(); 951 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size); 952 } 953 } 954 955 void CodeCache::print_internals() { 956 int nmethodCount = 0; 957 int runtimeStubCount = 0; 958 int adapterCount = 0; 959 int deoptimizationStubCount = 0; 960 int uncommonTrapStubCount = 0; 961 int bufferBlobCount = 0; 962 int total = 0; 963 int nmethodAlive = 0; 964 int nmethodNotEntrant = 0; 965 int nmethodZombie = 0; 966 int nmethodUnloaded = 0; 967 int nmethodJava = 0; 968 int nmethodNative = 0; 969 int maxCodeSize = 0; 970 ResourceMark rm; 971 972 int i = 0; 973 FOR_ALL_HEAPS(it) { 974 if (Verbose) { 975 tty->print_cr("## Heap '%s' ##", (*it)->name()); 976 } 977 FOR_ALL_BLOBS(cb, *it) { 978 total++; 979 if (cb->is_nmethod()) { 980 nmethod* nm = (nmethod*)cb; 981 982 if (Verbose && nm->method() != NULL) { 983 ResourceMark rm; 984 char *method_name = nm->method()->name_and_sig_as_C_string(); 985 tty->print("%s %d", method_name, nm->comp_level()); 986 if(nm->is_alive()) { tty->print_cr(" alive"); } 987 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 988 if(nm->is_zombie()) { tty->print_cr(" zombie"); } 989 } 990 991 nmethodCount++; 992 993 if(nm->is_alive()) { nmethodAlive++; } 994 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 995 if(nm->is_zombie()) { nmethodZombie++; } 996 if(nm->is_unloaded()) { nmethodUnloaded++; } 997 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } 998 999 if(nm->method() != NULL && nm->is_java_method()) { 1000 nmethodJava++; 1001 if (nm->insts_size() > maxCodeSize) { 1002 maxCodeSize = nm->insts_size(); 1003 } 1004 } 1005 } else if (cb->is_runtime_stub()) { 1006 runtimeStubCount++; 1007 } else if (cb->is_deoptimization_stub()) { 1008 deoptimizationStubCount++; 1009 } else if (cb->is_uncommon_trap_stub()) { 1010 uncommonTrapStubCount++; 1011 } else if (cb->is_adapter_blob()) { 1012 adapterCount++; 1013 } else if (cb->is_buffer_blob()) { 1014 bufferBlobCount++; 1015 } 1016 } 1017 } 1018 1019 int bucketSize = 512; 1020 int bucketLimit = maxCodeSize / bucketSize + 1; 1021 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1022 memset(buckets,0,sizeof(int) * bucketLimit); 1023 1024 FOR_ALL_METHOD_HEAPS(it) { 1025 FOR_ALL_BLOBS(cb, *it) { 1026 nmethod* nm = (nmethod*)cb; 1027 if(nm->method() != NULL && nm->is_java_method()) { 1028 buckets[nm->insts_size() / bucketSize]++; 1029 } 1030 } 1031 } 1032 tty->print_cr("Code Cache Entries (total of %d)",total); 1033 tty->print_cr("-------------------------------------------------"); 1034 tty->print_cr("nmethods: %d",nmethodCount); 1035 tty->print_cr("\talive: %d",nmethodAlive); 1036 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1037 tty->print_cr("\tzombie: %d",nmethodZombie); 1038 tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1039 tty->print_cr("\tjava: %d",nmethodJava); 1040 tty->print_cr("\tnative: %d",nmethodNative); 1041 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1042 tty->print_cr("adapters: %d",adapterCount); 1043 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1044 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1045 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1046 tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1047 tty->print_cr("-------------------------------------------------"); 1048 1049 for(int i = 0; i < bucketLimit; ++i) { 1050 if(buckets[i] != 0) { 1051 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1052 tty->fill_to(40); 1053 tty->print_cr("%d",buckets[i]); 1054 } 1055 } 1056 1057 FREE_C_HEAP_ARRAY(int, buckets, mtCode); 1058 } 1059 1060 #endif // !PRODUCT 1061 1062 void CodeCache::print() { 1063 print_summary(tty); 1064 1065 #ifndef PRODUCT 1066 if (!Verbose) return; 1067 1068 CodeBlob_sizes live; 1069 CodeBlob_sizes dead; 1070 1071 FOR_ALL_HEAPS(it) { 1072 FOR_ALL_BLOBS(cb, *it) { 1073 if (!cb->is_alive()) { 1074 dead.add(cb); 1075 } else { 1076 live.add(cb); 1077 } 1078 } 1079 } 1080 1081 tty->print_cr("CodeCache:"); 1082 1083 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(), 1084 dependentCheckTime.seconds() / dependentCheckCount); 1085 1086 if (!live.is_empty()) { 1087 live.print("live"); 1088 } 1089 if (!dead.is_empty()) { 1090 dead.print("dead"); 1091 } 1092 1093 if (WizardMode) { 1094 // print the oop_map usage 1095 int code_size = 0; 1096 int number_of_blobs = 0; 1097 int number_of_oop_maps = 0; 1098 int map_size = 0; 1099 FOR_ALL_HEAPS(it) { 1100 FOR_ALL_BLOBS(cb, *it) { 1101 if (cb->is_alive()) { 1102 number_of_blobs++; 1103 code_size += cb->code_size(); 1104 OopMapSet* set = cb->oop_maps(); 1105 if (set != NULL) { 1106 number_of_oop_maps += set->size(); 1107 map_size += set->heap_size(); 1108 } 1109 } 1110 } 1111 } 1112 tty->print_cr("OopMaps"); 1113 tty->print_cr(" #blobs = %d", number_of_blobs); 1114 tty->print_cr(" code size = %d", code_size); 1115 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1116 tty->print_cr(" map size = %d", map_size); 1117 } 1118 1119 #endif // !PRODUCT 1120 } 1121 1122 void CodeCache::print_summary(outputStream* st, bool detailed) { 1123 st->print_cr("CodeCache Summary:"); 1124 FOR_ALL_HEAPS(it) { 1125 CodeHeap* heap = (*it); 1126 size_t total = (heap->high_boundary() - heap->low_boundary()); 1127 st->print_cr("Heap '%s': size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT 1128 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", 1129 heap->name(), total/K, (total - heap->unallocated_capacity())/K, 1130 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); 1131 1132 if (detailed) { 1133 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1134 heap->low_boundary(), 1135 heap->high(), 1136 heap->high_boundary()); 1137 1138 } 1139 } 1140 1141 if (detailed) { 1142 log_state(st); 1143 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? 1144 "enabled" : Arguments::mode() == Arguments::_int ? 1145 "disabled (interpreter mode)" : 1146 "disabled (not enough contiguous free space left)"); 1147 } 1148 } 1149 1150 void CodeCache::log_state(outputStream* st) { 1151 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1152 " adapters='" UINT32_FORMAT "'", 1153 nof_blobs(), nof_nmethods(), nof_adapters()); 1154 } 1155