1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/dependencies.hpp"
  29 #include "code/nmethod.hpp"
  30 #include "code/pcDesc.hpp"
  31 #include "gc_implementation/shared/markSweep.hpp"
  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/gcLocker.hpp"
  34 #include "memory/iterator.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/methodOop.hpp"
  37 #include "oops/objArrayOop.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/icache.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/mutexLocker.hpp"
  43 #include "services/memoryService.hpp"
  44 #include "utilities/xmlstream.hpp"
  45 
  46 // Helper class for printing in CodeCache
  47 
  48 class CodeBlob_sizes {
  49  private:
  50   int count;
  51   int total_size;
  52   int header_size;
  53   int code_size;
  54   int stub_size;
  55   int relocation_size;
  56   int scopes_oop_size;
  57   int scopes_data_size;
  58   int scopes_pcs_size;
  59 
  60  public:
  61   CodeBlob_sizes() {
  62     count            = 0;
  63     total_size       = 0;
  64     header_size      = 0;
  65     code_size        = 0;
  66     stub_size        = 0;
  67     relocation_size  = 0;
  68     scopes_oop_size  = 0;
  69     scopes_data_size = 0;
  70     scopes_pcs_size  = 0;
  71   }
  72 
  73   int total()                                    { return total_size; }
  74   bool is_empty()                                { return count == 0; }
  75 
  76   void print(const char* title) {
  77     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
  78                   count,
  79                   title,
  80                   total() / K,
  81                   header_size             * 100 / total_size,
  82                   relocation_size         * 100 / total_size,
  83                   code_size               * 100 / total_size,
  84                   stub_size               * 100 / total_size,
  85                   scopes_oop_size         * 100 / total_size,
  86                   scopes_data_size        * 100 / total_size,
  87                   scopes_pcs_size         * 100 / total_size);
  88   }
  89 
  90   void add(CodeBlob* cb) {
  91     count++;
  92     total_size       += cb->size();
  93     header_size      += cb->header_size();
  94     relocation_size  += cb->relocation_size();
  95     if (cb->is_nmethod()) {
  96       nmethod* nm = cb->as_nmethod_or_null();
  97       code_size        += nm->insts_size();
  98       stub_size        += nm->stub_size();
  99 
 100       scopes_oop_size  += nm->oops_size();
 101       scopes_data_size += nm->scopes_data_size();
 102       scopes_pcs_size  += nm->scopes_pcs_size();
 103     } else {
 104       code_size        += cb->code_size();
 105     }
 106   }
 107 };
 108 
 109 
 110 // CodeCache implementation
 111 
 112 CodeHeap * CodeCache::_heap = new CodeHeap();
 113 int CodeCache::_number_of_blobs = 0;
 114 int CodeCache::_number_of_adapters = 0;
 115 int CodeCache::_number_of_nmethods = 0;
 116 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 117 bool CodeCache::_needs_cache_clean = false;
 118 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 119 nmethod* CodeCache::_saved_nmethods = NULL;
 120 
 121 
 122 CodeBlob* CodeCache::first() {
 123   assert_locked_or_safepoint(CodeCache_lock);
 124   return (CodeBlob*)_heap->first();
 125 }
 126 
 127 
 128 CodeBlob* CodeCache::next(CodeBlob* cb) {
 129   assert_locked_or_safepoint(CodeCache_lock);
 130   return (CodeBlob*)_heap->next(cb);
 131 }
 132 
 133 
 134 CodeBlob* CodeCache::alive(CodeBlob *cb) {
 135   assert_locked_or_safepoint(CodeCache_lock);
 136   while (cb != NULL && !cb->is_alive()) cb = next(cb);
 137   return cb;
 138 }
 139 
 140 
 141 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
 142   assert_locked_or_safepoint(CodeCache_lock);
 143   while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
 144   return (nmethod*)cb;
 145 }
 146 
 147 nmethod* CodeCache::first_nmethod() {
 148   assert_locked_or_safepoint(CodeCache_lock);
 149   CodeBlob* cb = first();
 150   while (cb != NULL && !cb->is_nmethod()) {
 151     cb = next(cb);
 152   }
 153   return (nmethod*)cb;
 154 }
 155 
 156 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
 157   assert_locked_or_safepoint(CodeCache_lock);
 158   cb = next(cb);
 159   while (cb != NULL && !cb->is_nmethod()) {
 160     cb = next(cb);
 161   }
 162   return (nmethod*)cb;
 163 }
 164 
 165 CodeBlob* CodeCache::allocate(int size) {
 166   // Do not seize the CodeCache lock here--if the caller has not
 167   // already done so, we are going to lose bigtime, since the code
 168   // cache will contain a garbage CodeBlob until the caller can
 169   // run the constructor for the CodeBlob subclass he is busy
 170   // instantiating.
 171   guarantee(size >= 0, "allocation request must be reasonable");
 172   assert_locked_or_safepoint(CodeCache_lock);
 173   CodeBlob* cb = NULL;
 174   _number_of_blobs++;
 175   while (true) {
 176     cb = (CodeBlob*)_heap->allocate(size);
 177     if (cb != NULL) break;
 178     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 179       // Expansion failed
 180       return NULL;
 181     }
 182     if (PrintCodeCacheExtension) {
 183       ResourceMark rm;
 184       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
 185                     (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
 186                     (address)_heap->end() - (address)_heap->begin());
 187     }
 188   }
 189   verify_if_often();
 190   print_trace("allocation", cb, size);
 191   return cb;
 192 }
 193 
 194 void CodeCache::free(CodeBlob* cb) {
 195   assert_locked_or_safepoint(CodeCache_lock);
 196   verify_if_often();
 197 
 198   print_trace("free", cb);
 199   if (cb->is_nmethod()) {
 200     _number_of_nmethods--;
 201     if (((nmethod *)cb)->has_dependencies()) {
 202       _number_of_nmethods_with_dependencies--;
 203     }
 204   }
 205   if (cb->is_adapter_blob()) {
 206     _number_of_adapters--;
 207   }
 208   _number_of_blobs--;
 209 
 210   _heap->deallocate(cb);
 211 
 212   verify_if_often();
 213   assert(_number_of_blobs >= 0, "sanity check");
 214 }
 215 
 216 
 217 void CodeCache::commit(CodeBlob* cb) {
 218   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 219   assert_locked_or_safepoint(CodeCache_lock);
 220   if (cb->is_nmethod()) {
 221     _number_of_nmethods++;
 222     if (((nmethod *)cb)->has_dependencies()) {
 223       _number_of_nmethods_with_dependencies++;
 224     }
 225   }
 226   if (cb->is_adapter_blob()) {
 227     _number_of_adapters++;
 228   }
 229 
 230   // flush the hardware I-cache
 231   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 232 }
 233 
 234 
 235 void CodeCache::flush() {
 236   assert_locked_or_safepoint(CodeCache_lock);
 237   Unimplemented();
 238 }
 239 
 240 
 241 // Iteration over CodeBlobs
 242 
 243 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 244 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 245 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 246 
 247 
 248 bool CodeCache::contains(void *p) {
 249   // It should be ok to call contains without holding a lock
 250   return _heap->contains(p);
 251 }
 252 
 253 
 254 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 255 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 256 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 257 CodeBlob* CodeCache::find_blob(void* start) {
 258   CodeBlob* result = find_blob_unsafe(start);
 259   if (result == NULL) return NULL;
 260   // We could potientially look up non_entrant methods
 261   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 262   return result;
 263 }
 264 
 265 nmethod* CodeCache::find_nmethod(void* start) {
 266   CodeBlob *cb = find_blob(start);
 267   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 268   return (nmethod*)cb;
 269 }
 270 
 271 
 272 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 273   assert_locked_or_safepoint(CodeCache_lock);
 274   FOR_ALL_BLOBS(p) {
 275     f(p);
 276   }
 277 }
 278 
 279 
 280 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 281   assert_locked_or_safepoint(CodeCache_lock);
 282   FOR_ALL_BLOBS(nm) {
 283     if (nm->is_nmethod()) f((nmethod*)nm);
 284   }
 285 }
 286 
 287 
 288 int CodeCache::alignment_unit() {
 289   return (int)_heap->alignment_unit();
 290 }
 291 
 292 
 293 int CodeCache::alignment_offset() {
 294   return (int)_heap->alignment_offset();
 295 }
 296 
 297 
 298 // Mark nmethods for unloading if they contain otherwise unreachable
 299 // oops.
 300 void CodeCache::do_unloading(BoolObjectClosure* is_alive,
 301                              OopClosure* keep_alive,
 302                              bool unloading_occurred) {
 303   assert_locked_or_safepoint(CodeCache_lock);
 304   FOR_ALL_ALIVE_NMETHODS(nm) {
 305     nm->do_unloading(is_alive, keep_alive, unloading_occurred);
 306   }
 307 }
 308 
 309 void CodeCache::blobs_do(CodeBlobClosure* f) {
 310   assert_locked_or_safepoint(CodeCache_lock);
 311   FOR_ALL_ALIVE_BLOBS(cb) {
 312     f->do_code_blob(cb);
 313 
 314 #ifdef ASSERT
 315     if (cb->is_nmethod())
 316       ((nmethod*)cb)->verify_scavenge_root_oops();
 317 #endif //ASSERT
 318   }
 319 }
 320 
 321 // Walk the list of methods which might contain non-perm oops.
 322 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
 323   assert_locked_or_safepoint(CodeCache_lock);
 324   debug_only(mark_scavenge_root_nmethods());
 325 
 326   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 327     debug_only(cur->clear_scavenge_root_marked());
 328     assert(cur->scavenge_root_not_marked(), "");
 329     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 330 
 331     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 332 #ifndef PRODUCT
 333     if (TraceScavenge) {
 334       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 335     }
 336 #endif //PRODUCT
 337     if (is_live) {
 338       // Perform cur->oops_do(f), maybe just once per nmethod.
 339       f->do_code_blob(cur);
 340       cur->fix_oop_relocations();
 341     }
 342   }
 343 
 344   // Check for stray marks.
 345   debug_only(verify_perm_nmethods(NULL));
 346 }
 347 
 348 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 349   assert_locked_or_safepoint(CodeCache_lock);
 350   nm->set_on_scavenge_root_list();
 351   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 352   set_scavenge_root_nmethods(nm);
 353   print_trace("add_scavenge_root", nm);
 354 }
 355 
 356 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 357   assert_locked_or_safepoint(CodeCache_lock);
 358   print_trace("drop_scavenge_root", nm);
 359   nmethod* last = NULL;
 360   nmethod* cur = scavenge_root_nmethods();
 361   while (cur != NULL) {
 362     nmethod* next = cur->scavenge_root_link();
 363     if (cur == nm) {
 364       if (last != NULL)
 365             last->set_scavenge_root_link(next);
 366       else  set_scavenge_root_nmethods(next);
 367       nm->set_scavenge_root_link(NULL);
 368       nm->clear_on_scavenge_root_list();
 369       return;
 370     }
 371     last = cur;
 372     cur = next;
 373   }
 374   assert(false, "should have been on list");
 375 }
 376 
 377 void CodeCache::prune_scavenge_root_nmethods() {
 378   assert_locked_or_safepoint(CodeCache_lock);
 379   debug_only(mark_scavenge_root_nmethods());
 380 
 381   nmethod* last = NULL;
 382   nmethod* cur = scavenge_root_nmethods();
 383   while (cur != NULL) {
 384     nmethod* next = cur->scavenge_root_link();
 385     debug_only(cur->clear_scavenge_root_marked());
 386     assert(cur->scavenge_root_not_marked(), "");
 387     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 388 
 389     if (!cur->is_zombie() && !cur->is_unloaded()
 390         && cur->detect_scavenge_root_oops()) {
 391       // Keep it.  Advance 'last' to prevent deletion.
 392       last = cur;
 393     } else {
 394       // Prune it from the list, so we don't have to look at it any more.
 395       print_trace("prune_scavenge_root", cur);
 396       cur->set_scavenge_root_link(NULL);
 397       cur->clear_on_scavenge_root_list();
 398       if (last != NULL)
 399             last->set_scavenge_root_link(next);
 400       else  set_scavenge_root_nmethods(next);
 401     }
 402     cur = next;
 403   }
 404 
 405   // Check for stray marks.
 406   debug_only(verify_perm_nmethods(NULL));
 407 }
 408 
 409 #ifndef PRODUCT
 410 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 411   // While we are here, verify the integrity of the list.
 412   mark_scavenge_root_nmethods();
 413   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 414     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 415     cur->clear_scavenge_root_marked();
 416   }
 417   verify_perm_nmethods(f);
 418 }
 419 
 420 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 421 void CodeCache::mark_scavenge_root_nmethods() {
 422   FOR_ALL_ALIVE_BLOBS(cb) {
 423     if (cb->is_nmethod()) {
 424       nmethod *nm = (nmethod*)cb;
 425       assert(nm->scavenge_root_not_marked(), "clean state");
 426       if (nm->on_scavenge_root_list())
 427         nm->set_scavenge_root_marked();
 428     }
 429   }
 430 }
 431 
 432 // If the closure is given, run it on the unlisted nmethods.
 433 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 434 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 435   FOR_ALL_ALIVE_BLOBS(cb) {
 436     bool call_f = (f_or_null != NULL);
 437     if (cb->is_nmethod()) {
 438       nmethod *nm = (nmethod*)cb;
 439       assert(nm->scavenge_root_not_marked(), "must be already processed");
 440       if (nm->on_scavenge_root_list())
 441         call_f = false;  // don't show this one to the client
 442       nm->verify_scavenge_root_oops();
 443     } else {
 444       call_f = false;   // not an nmethod
 445     }
 446     if (call_f)  f_or_null->do_code_blob(cb);
 447   }
 448 }
 449 #endif //PRODUCT
 450 
 451 
 452 nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
 453   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 454   nmethod* saved = _saved_nmethods;
 455   nmethod* prev = NULL;
 456   while (saved != NULL) {
 457     if (saved->is_in_use() && saved->method() == m) {
 458       if (prev != NULL) {
 459         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 460       } else {
 461         _saved_nmethods = saved->saved_nmethod_link();
 462       }
 463       assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 464       saved->set_speculatively_disconnected(false);
 465       saved->set_saved_nmethod_link(NULL);
 466       if (PrintMethodFlushing) {
 467         saved->print_on(tty, " ### nmethod is reconnected\n");
 468       }
 469       if (LogCompilation && (xtty != NULL)) {
 470         ttyLocker ttyl;
 471         xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
 472         xtty->method(methodOop(m));
 473         xtty->stamp();
 474         xtty->end_elem();
 475       }
 476       return saved;
 477     }
 478     prev = saved;
 479     saved = saved->saved_nmethod_link();
 480   }
 481   return NULL;
 482 }
 483 
 484 void CodeCache::remove_saved_code(nmethod* nm) {
 485   // For conc swpr this will be called with CodeCache_lock taken by caller
 486   assert_locked_or_safepoint(CodeCache_lock);
 487   assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 488   nmethod* saved = _saved_nmethods;
 489   nmethod* prev = NULL;
 490   while (saved != NULL) {
 491     if (saved == nm) {
 492       if (prev != NULL) {
 493         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 494       } else {
 495         _saved_nmethods = saved->saved_nmethod_link();
 496       }
 497       if (LogCompilation && (xtty != NULL)) {
 498         ttyLocker ttyl;
 499         xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
 500         xtty->stamp();
 501         xtty->end_elem();
 502       }
 503       return;
 504     }
 505     prev = saved;
 506     saved = saved->saved_nmethod_link();
 507   }
 508   ShouldNotReachHere();
 509 }
 510 
 511 void CodeCache::speculatively_disconnect(nmethod* nm) {
 512   assert_locked_or_safepoint(CodeCache_lock);
 513   assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
 514   nm->set_saved_nmethod_link(_saved_nmethods);
 515   _saved_nmethods = nm;
 516   if (PrintMethodFlushing) {
 517     nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
 518   }
 519   if (LogCompilation && (xtty != NULL)) {
 520     ttyLocker ttyl;
 521     xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
 522     xtty->method(methodOop(nm->method()));
 523     xtty->stamp();
 524     xtty->end_elem();
 525   }
 526   nm->method()->clear_code();
 527   nm->set_speculatively_disconnected(true);
 528 }
 529 
 530 
 531 void CodeCache::gc_prologue() {
 532   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 533 }
 534 
 535 
 536 void CodeCache::gc_epilogue() {
 537   assert_locked_or_safepoint(CodeCache_lock);
 538   FOR_ALL_ALIVE_BLOBS(cb) {
 539     if (cb->is_nmethod()) {
 540       nmethod *nm = (nmethod*)cb;
 541       assert(!nm->is_unloaded(), "Tautology");
 542       if (needs_cache_clean()) {
 543         nm->cleanup_inline_caches();
 544       }
 545       DEBUG_ONLY(nm->verify());
 546       nm->fix_oop_relocations();
 547     }
 548   }
 549   set_needs_cache_clean(false);
 550   prune_scavenge_root_nmethods();
 551   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 552 }
 553 
 554 
 555 address CodeCache::first_address() {
 556   assert_locked_or_safepoint(CodeCache_lock);
 557   return (address)_heap->begin();
 558 }
 559 
 560 
 561 address CodeCache::last_address() {
 562   assert_locked_or_safepoint(CodeCache_lock);
 563   return (address)_heap->end();
 564 }
 565 
 566 
 567 void icache_init();
 568 
 569 void CodeCache::initialize() {
 570   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 571 #ifdef COMPILER2
 572   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 573 #endif
 574   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 575   // This was originally just a check of the alignment, causing failure, instead, round
 576   // the code cache to the page size.  In particular, Solaris is moving to a larger
 577   // default page size.
 578   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 579   InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
 580   ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
 581   if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
 582     vm_exit_during_initialization("Could not reserve enough space for code cache");
 583   }
 584 
 585   MemoryService::add_code_heap_memory_pool(_heap);
 586 
 587   // Initialize ICache flush mechanism
 588   // This service is needed for os::register_code_area
 589   icache_init();
 590 
 591   // Give OS a chance to register generated code area.
 592   // This is used on Windows 64 bit platforms to register
 593   // Structured Exception Handlers for our generated code.
 594   os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
 595 }
 596 
 597 
 598 void codeCache_init() {
 599   CodeCache::initialize();
 600 }
 601 
 602 //------------------------------------------------------------------------------------------------
 603 
 604 int CodeCache::number_of_nmethods_with_dependencies() {
 605   return _number_of_nmethods_with_dependencies;
 606 }
 607 
 608 void CodeCache::clear_inline_caches() {
 609   assert_locked_or_safepoint(CodeCache_lock);
 610   FOR_ALL_ALIVE_NMETHODS(nm) {
 611     nm->clear_inline_caches();
 612   }
 613 }
 614 
 615 #ifndef PRODUCT
 616 // used to keep track of how much time is spent in mark_for_deoptimization
 617 static elapsedTimer dependentCheckTime;
 618 static int dependentCheckCount = 0;
 619 #endif // PRODUCT
 620 
 621 
 622 int CodeCache::mark_for_deoptimization(DepChange& changes) {
 623   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 624 
 625 #ifndef PRODUCT
 626   dependentCheckTime.start();
 627   dependentCheckCount++;
 628 #endif // PRODUCT
 629 
 630   int number_of_marked_CodeBlobs = 0;
 631 
 632   // search the hierarchy looking for nmethods which are affected by the loading of this class
 633 
 634   // then search the interfaces this class implements looking for nmethods
 635   // which might be dependent of the fact that an interface only had one
 636   // implementor.
 637 
 638   { No_Safepoint_Verifier nsv;
 639     for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
 640       klassOop d = str.klass();
 641       number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
 642     }
 643   }
 644 
 645   if (VerifyDependencies) {
 646     // Turn off dependency tracing while actually testing deps.
 647     NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
 648     FOR_ALL_ALIVE_NMETHODS(nm) {
 649       if (!nm->is_marked_for_deoptimization() &&
 650           nm->check_all_dependencies()) {
 651         ResourceMark rm;
 652         tty->print_cr("Should have been marked for deoptimization:");
 653         changes.print();
 654         nm->print();
 655         nm->print_dependencies();
 656       }
 657     }
 658   }
 659 
 660 #ifndef PRODUCT
 661   dependentCheckTime.stop();
 662 #endif // PRODUCT
 663 
 664   return number_of_marked_CodeBlobs;
 665 }
 666 
 667 
 668 #ifdef HOTSWAP
 669 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
 670   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 671   int number_of_marked_CodeBlobs = 0;
 672 
 673   // Deoptimize all methods of the evolving class itself
 674   objArrayOop old_methods = dependee->methods();
 675   for (int i = 0; i < old_methods->length(); i++) {
 676     ResourceMark rm;
 677     methodOop old_method = (methodOop) old_methods->obj_at(i);
 678     nmethod *nm = old_method->code();
 679     if (nm != NULL) {
 680       nm->mark_for_deoptimization();
 681       number_of_marked_CodeBlobs++;
 682     }
 683   }
 684 
 685   FOR_ALL_ALIVE_NMETHODS(nm) {
 686     if (nm->is_marked_for_deoptimization()) {
 687       // ...Already marked in the previous pass; don't count it again.
 688     } else if (nm->is_evol_dependent_on(dependee())) {
 689       ResourceMark rm;
 690       nm->mark_for_deoptimization();
 691       number_of_marked_CodeBlobs++;
 692     } else  {
 693       // flush caches in case they refer to a redefined methodOop
 694       nm->clear_inline_caches();
 695     }
 696   }
 697 
 698   return number_of_marked_CodeBlobs;
 699 }
 700 #endif // HOTSWAP
 701 
 702 
 703 // Deoptimize all methods
 704 void CodeCache::mark_all_nmethods_for_deoptimization() {
 705   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 706   FOR_ALL_ALIVE_NMETHODS(nm) {
 707     nm->mark_for_deoptimization();
 708   }
 709 }
 710 
 711 
 712 int CodeCache::mark_for_deoptimization(methodOop dependee) {
 713   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 714   int number_of_marked_CodeBlobs = 0;
 715 
 716   FOR_ALL_ALIVE_NMETHODS(nm) {
 717     if (nm->is_dependent_on_method(dependee)) {
 718       ResourceMark rm;
 719       nm->mark_for_deoptimization();
 720       number_of_marked_CodeBlobs++;
 721     }
 722   }
 723 
 724   return number_of_marked_CodeBlobs;
 725 }
 726 
 727 void CodeCache::make_marked_nmethods_zombies() {
 728   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 729   FOR_ALL_ALIVE_NMETHODS(nm) {
 730     if (nm->is_marked_for_deoptimization()) {
 731 
 732       // If the nmethod has already been made non-entrant and it can be converted
 733       // then zombie it now. Otherwise make it non-entrant and it will eventually
 734       // be zombied when it is no longer seen on the stack. Note that the nmethod
 735       // might be "entrant" and not on the stack and so could be zombied immediately
 736       // but we can't tell because we don't track it on stack until it becomes
 737       // non-entrant.
 738 
 739       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 740         nm->make_zombie();
 741       } else {
 742         nm->make_not_entrant();
 743       }
 744     }
 745   }
 746 }
 747 
 748 void CodeCache::make_marked_nmethods_not_entrant() {
 749   assert_locked_or_safepoint(CodeCache_lock);
 750   FOR_ALL_ALIVE_NMETHODS(nm) {
 751     if (nm->is_marked_for_deoptimization()) {
 752       nm->make_not_entrant();
 753     }
 754   }
 755 }
 756 
 757 void CodeCache::verify() {
 758   _heap->verify();
 759   FOR_ALL_ALIVE_BLOBS(p) {
 760     p->verify();
 761   }
 762 }
 763 
 764 //------------------------------------------------------------------------------------------------
 765 // Non-product version
 766 
 767 #ifndef PRODUCT
 768 
 769 void CodeCache::verify_if_often() {
 770   if (VerifyCodeCacheOften) {
 771     _heap->verify();
 772   }
 773 }
 774 
 775 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 776   if (PrintCodeCache2) {  // Need to add a new flag
 777     ResourceMark rm;
 778     if (size == 0)  size = cb->size();
 779     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
 780   }
 781 }
 782 
 783 void CodeCache::print_internals() {
 784   int nmethodCount = 0;
 785   int runtimeStubCount = 0;
 786   int adapterCount = 0;
 787   int deoptimizationStubCount = 0;
 788   int uncommonTrapStubCount = 0;
 789   int bufferBlobCount = 0;
 790   int total = 0;
 791   int nmethodAlive = 0;
 792   int nmethodNotEntrant = 0;
 793   int nmethodZombie = 0;
 794   int nmethodUnloaded = 0;
 795   int nmethodJava = 0;
 796   int nmethodNative = 0;
 797   int maxCodeSize = 0;
 798   ResourceMark rm;
 799 
 800   CodeBlob *cb;
 801   for (cb = first(); cb != NULL; cb = next(cb)) {
 802     total++;
 803     if (cb->is_nmethod()) {
 804       nmethod* nm = (nmethod*)cb;
 805 
 806       if (Verbose && nm->method() != NULL) {
 807         ResourceMark rm;
 808         char *method_name = nm->method()->name_and_sig_as_C_string();
 809         tty->print("%s", method_name);
 810         if(nm->is_alive()) { tty->print_cr(" alive"); }
 811         if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
 812         if(nm->is_zombie()) { tty->print_cr(" zombie"); }
 813       }
 814 
 815       nmethodCount++;
 816 
 817       if(nm->is_alive()) { nmethodAlive++; }
 818       if(nm->is_not_entrant()) { nmethodNotEntrant++; }
 819       if(nm->is_zombie()) { nmethodZombie++; }
 820       if(nm->is_unloaded()) { nmethodUnloaded++; }
 821       if(nm->is_native_method()) { nmethodNative++; }
 822 
 823       if(nm->method() != NULL && nm->is_java_method()) {
 824         nmethodJava++;
 825         if (nm->insts_size() > maxCodeSize) {
 826           maxCodeSize = nm->insts_size();
 827         }
 828       }
 829     } else if (cb->is_runtime_stub()) {
 830       runtimeStubCount++;
 831     } else if (cb->is_deoptimization_stub()) {
 832       deoptimizationStubCount++;
 833     } else if (cb->is_uncommon_trap_stub()) {
 834       uncommonTrapStubCount++;
 835     } else if (cb->is_adapter_blob()) {
 836       adapterCount++;
 837     } else if (cb->is_buffer_blob()) {
 838       bufferBlobCount++;
 839     }
 840   }
 841 
 842   int bucketSize = 512;
 843   int bucketLimit = maxCodeSize / bucketSize + 1;
 844   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
 845   memset(buckets,0,sizeof(int) * bucketLimit);
 846 
 847   for (cb = first(); cb != NULL; cb = next(cb)) {
 848     if (cb->is_nmethod()) {
 849       nmethod* nm = (nmethod*)cb;
 850       if(nm->is_java_method()) {
 851         buckets[nm->insts_size() / bucketSize]++;
 852       }
 853     }
 854   }
 855   tty->print_cr("Code Cache Entries (total of %d)",total);
 856   tty->print_cr("-------------------------------------------------");
 857   tty->print_cr("nmethods: %d",nmethodCount);
 858   tty->print_cr("\talive: %d",nmethodAlive);
 859   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
 860   tty->print_cr("\tzombie: %d",nmethodZombie);
 861   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
 862   tty->print_cr("\tjava: %d",nmethodJava);
 863   tty->print_cr("\tnative: %d",nmethodNative);
 864   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
 865   tty->print_cr("adapters: %d",adapterCount);
 866   tty->print_cr("buffer blobs: %d",bufferBlobCount);
 867   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
 868   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
 869   tty->print_cr("\nnmethod size distribution (non-zombie java)");
 870   tty->print_cr("-------------------------------------------------");
 871 
 872   for(int i=0; i<bucketLimit; i++) {
 873     if(buckets[i] != 0) {
 874       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
 875       tty->fill_to(40);
 876       tty->print_cr("%d",buckets[i]);
 877     }
 878   }
 879 
 880   FREE_C_HEAP_ARRAY(int, buckets);
 881 }
 882 
 883 void CodeCache::print() {
 884   CodeBlob_sizes live;
 885   CodeBlob_sizes dead;
 886 
 887   FOR_ALL_BLOBS(p) {
 888     if (!p->is_alive()) {
 889       dead.add(p);
 890     } else {
 891       live.add(p);
 892     }
 893   }
 894 
 895   tty->print_cr("CodeCache:");
 896 
 897   tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
 898                 dependentCheckTime.seconds() / dependentCheckCount);
 899 
 900   if (!live.is_empty()) {
 901     live.print("live");
 902   }
 903   if (!dead.is_empty()) {
 904     dead.print("dead");
 905   }
 906 
 907 
 908   if (Verbose) {
 909      // print the oop_map usage
 910     int code_size = 0;
 911     int number_of_blobs = 0;
 912     int number_of_oop_maps = 0;
 913     int map_size = 0;
 914     FOR_ALL_BLOBS(p) {
 915       if (p->is_alive()) {
 916         number_of_blobs++;
 917         code_size += p->code_size();
 918         OopMapSet* set = p->oop_maps();
 919         if (set != NULL) {
 920           number_of_oop_maps += set->size();
 921           map_size           += set->heap_size();
 922         }
 923       }
 924     }
 925     tty->print_cr("OopMaps");
 926     tty->print_cr("  #blobs    = %d", number_of_blobs);
 927     tty->print_cr("  code size = %d", code_size);
 928     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
 929     tty->print_cr("  map size  = %d", map_size);
 930   }
 931 
 932 }
 933 
 934 #endif // PRODUCT