1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "gc_implementation/shared/markSweep.hpp"
  35 #include "interpreter/abstractInterpreter.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/gcLocker.hpp"
  38 #include "memory/iterator.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/method.hpp"
  41 #include "oops/objArrayOop.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/icache.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutexLocker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "trace/tracing.hpp"
  50 #include "utilities/xmlstream.hpp"
  51 
  52 // Helper class for printing in CodeCache
  53 
  54 class CodeBlob_sizes {
  55  private:
  56   size_t count;
  57   size_t total_size;
  58   size_t header_size;
  59   size_t code_size;
  60   size_t stub_size;
  61   size_t relocation_size;
  62   size_t scopes_oop_size;
  63   size_t scopes_metadata_size;
  64   size_t scopes_data_size;
  65   size_t scopes_pcs_size;
  66 
  67  public:
  68   CodeBlob_sizes() {
  69     count                = 0;
  70     total_size           = 0;
  71     header_size          = 0;
  72     code_size            = 0;
  73     stub_size            = 0;
  74     relocation_size      = 0;
  75     scopes_oop_size      = 0;
  76     scopes_metadata_size = 0;
  77     scopes_data_size     = 0;
  78     scopes_pcs_size      = 0;
  79   }
  80 
  81   size_t get_total_size()  { return total_size; }
  82   size_t get_total_count() { return count; }
  83   bool is_empty()   { return count == 0; }
  84 
  85   void print(const char* title) {
  86     if (count == 0) {
  87       tty->print_cr("  #%u %s", count, title);
  88     } else {
  89       tty->print_cr("  #%d %s = %dkB (hdr %u%%,  loc %u%%, code %u%%, stub %u%%, [oops %u%%, data %u%%, pcs %u%%])",
  90                     count,
  91                     title,
  92                     total_size / K,
  93                     header_size             * 100 / total_size,
  94                     relocation_size         * 100 / total_size,
  95                     code_size               * 100 / total_size,
  96                     stub_size               * 100 / total_size,
  97                     scopes_oop_size         * 100 / total_size,
  98                     scopes_metadata_size    * 100 / total_size,
  99                     scopes_data_size        * 100 / total_size,
 100                     scopes_pcs_size         * 100 / total_size);
 101         }
 102   }
 103 
 104   void add(CodeBlob* cb) {
 105     count++;
 106     total_size       += cb->size();
 107     header_size      += cb->header_size();
 108     relocation_size  += cb->relocation_size();
 109     if (cb->is_nmethod()) {
 110       nmethod* nm = cb->as_nmethod_or_null();
 111       code_size        += nm->insts_size();
 112       stub_size        += nm->stub_size();
 113 
 114       scopes_oop_size  += nm->oops_size();
 115       scopes_metadata_size  += nm->metadata_size();
 116       scopes_data_size += nm->scopes_data_size();
 117       scopes_pcs_size  += nm->scopes_pcs_size();
 118     } else {
 119       code_size        += cb->code_size();
 120     }
 121   }
 122 };
 123 
 124 // CodeCache implementation
 125 
 126 CodeHeap * CodeCache::_heap = new CodeHeap();
 127 int CodeCache::_number_of_blobs = 0;
 128 int CodeCache::_number_of_adapters = 0;
 129 int CodeCache::_number_of_nmethods = 0;
 130 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 131 bool CodeCache::_needs_cache_clean = false;
 132 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 133 nmethod* CodeCache::_saved_nmethods = NULL;
 134 
 135 int CodeCache::_codemem_full_count = 0;
 136 
 137 CodeBlob* CodeCache::first() {
 138   assert_locked_or_safepoint(CodeCache_lock);
 139   return (CodeBlob*)_heap->first();
 140 }
 141 
 142 
 143 CodeBlob* CodeCache::next(CodeBlob* cb) {
 144   assert_locked_or_safepoint(CodeCache_lock);
 145   return (CodeBlob*)_heap->next(cb);
 146 }
 147 
 148 
 149 CodeBlob* CodeCache::alive(CodeBlob *cb) {
 150   assert_locked_or_safepoint(CodeCache_lock);
 151   while (cb != NULL && !cb->is_alive()) cb = next(cb);
 152   return cb;
 153 }
 154 
 155 
 156 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
 157   assert_locked_or_safepoint(CodeCache_lock);
 158   while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
 159   return (nmethod*)cb;
 160 }
 161 
 162 nmethod* CodeCache::first_nmethod() {
 163   assert_locked_or_safepoint(CodeCache_lock);
 164   CodeBlob* cb = first();
 165   while (cb != NULL && !cb->is_nmethod()) {
 166     cb = next(cb);
 167   }
 168   return (nmethod*)cb;
 169 }
 170 
 171 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
 172   assert_locked_or_safepoint(CodeCache_lock);
 173   cb = next(cb);
 174   while (cb != NULL && !cb->is_nmethod()) {
 175     cb = next(cb);
 176   }
 177   return (nmethod*)cb;
 178 }
 179 
 180 static size_t maxCodeCacheUsed = 0;
 181 
 182 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
 183   // Do not seize the CodeCache lock here--if the caller has not
 184   // already done so, we are going to lose bigtime, since the code
 185   // cache will contain a garbage CodeBlob until the caller can
 186   // run the constructor for the CodeBlob subclass he is busy
 187   // instantiating.
 188   guarantee(size >= 0, "allocation request must be reasonable");
 189   assert_locked_or_safepoint(CodeCache_lock);
 190   CodeBlob* cb = NULL;
 191   _number_of_blobs++;
 192   while (true) {
 193     cb = (CodeBlob*)_heap->allocate(size, is_critical);
 194     if (cb != NULL) break;
 195     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 196       // Expansion failed
 197       return NULL;
 198     }
 199     if (PrintCodeCacheExtension) {
 200       ResourceMark rm;
 201       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
 202                     (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
 203                     (address)_heap->high() - (address)_heap->low_boundary());
 204     }
 205   }
 206   maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
 207                           (address)_heap->low_boundary()) - unallocated_capacity());
 208   verify_if_often();
 209   print_trace("allocation", cb, size);
 210   return cb;
 211 }
 212 
 213 void CodeCache::free(CodeBlob* cb) {
 214   assert_locked_or_safepoint(CodeCache_lock);
 215   verify_if_often();
 216 
 217   print_trace("free", cb);
 218   if (cb->is_nmethod()) {
 219     _number_of_nmethods--;
 220     if (((nmethod *)cb)->has_dependencies()) {
 221       _number_of_nmethods_with_dependencies--;
 222     }
 223   }
 224   if (cb->is_adapter_blob()) {
 225     _number_of_adapters--;
 226   }
 227   _number_of_blobs--;
 228 
 229   _heap->deallocate(cb);
 230 
 231   verify_if_often();
 232   assert(_number_of_blobs >= 0, "sanity check");
 233 }
 234 
 235 
 236 void CodeCache::commit(CodeBlob* cb) {
 237   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 238   assert_locked_or_safepoint(CodeCache_lock);
 239   if (cb->is_nmethod()) {
 240     _number_of_nmethods++;
 241     if (((nmethod *)cb)->has_dependencies()) {
 242       _number_of_nmethods_with_dependencies++;
 243     }
 244   }
 245   if (cb->is_adapter_blob()) {
 246     _number_of_adapters++;
 247   }
 248 
 249   // flush the hardware I-cache
 250   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 251 }
 252 
 253 
 254 void CodeCache::flush() {
 255   assert_locked_or_safepoint(CodeCache_lock);
 256   Unimplemented();
 257 }
 258 
 259 
 260 // Iteration over CodeBlobs
 261 
 262 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 263 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 264 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 265 
 266 
 267 bool CodeCache::contains(void *p) {
 268   // It should be ok to call contains without holding a lock
 269   return _heap->contains(p);
 270 }
 271 
 272 
 273 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 274 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 275 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 276 CodeBlob* CodeCache::find_blob(void* start) {
 277   CodeBlob* result = find_blob_unsafe(start);
 278   if (result == NULL) return NULL;
 279   // We could potientially look up non_entrant methods
 280   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 281   return result;
 282 }
 283 
 284 nmethod* CodeCache::find_nmethod(void* start) {
 285   CodeBlob *cb = find_blob(start);
 286   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 287   return (nmethod*)cb;
 288 }
 289 
 290 
 291 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 292   assert_locked_or_safepoint(CodeCache_lock);
 293   FOR_ALL_BLOBS(p) {
 294     f(p);
 295   }
 296 }
 297 
 298 
 299 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 300   assert_locked_or_safepoint(CodeCache_lock);
 301   FOR_ALL_BLOBS(nm) {
 302     if (nm->is_nmethod()) f((nmethod*)nm);
 303   }
 304 }
 305 
 306 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
 307   assert_locked_or_safepoint(CodeCache_lock);
 308   FOR_ALL_ALIVE_NMETHODS(nm) {
 309     f(nm);
 310   }
 311 }
 312 
 313 int CodeCache::alignment_unit() {
 314   return (int)_heap->alignment_unit();
 315 }
 316 
 317 
 318 int CodeCache::alignment_offset() {
 319   return (int)_heap->alignment_offset();
 320 }
 321 
 322 
 323 // Mark nmethods for unloading if they contain otherwise unreachable
 324 // oops.
 325 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 326   assert_locked_or_safepoint(CodeCache_lock);
 327   FOR_ALL_ALIVE_NMETHODS(nm) {
 328     nm->do_unloading(is_alive, unloading_occurred);
 329   }
 330 }
 331 
 332 void CodeCache::blobs_do(CodeBlobClosure* f) {
 333   assert_locked_or_safepoint(CodeCache_lock);
 334   FOR_ALL_ALIVE_BLOBS(cb) {
 335     f->do_code_blob(cb);
 336 
 337 #ifdef ASSERT
 338     if (cb->is_nmethod())
 339       ((nmethod*)cb)->verify_scavenge_root_oops();
 340 #endif //ASSERT
 341   }
 342 }
 343 
 344 // Walk the list of methods which might contain non-perm oops.
 345 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
 346   assert_locked_or_safepoint(CodeCache_lock);
 347   debug_only(mark_scavenge_root_nmethods());
 348 
 349   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 350     debug_only(cur->clear_scavenge_root_marked());
 351     assert(cur->scavenge_root_not_marked(), "");
 352     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 353 
 354     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 355 #ifndef PRODUCT
 356     if (TraceScavenge) {
 357       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 358     }
 359 #endif //PRODUCT
 360     if (is_live) {
 361       // Perform cur->oops_do(f), maybe just once per nmethod.
 362       f->do_code_blob(cur);
 363     }
 364   }
 365 
 366   // Check for stray marks.
 367   debug_only(verify_perm_nmethods(NULL));
 368 }
 369 
 370 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 371   assert_locked_or_safepoint(CodeCache_lock);
 372   nm->set_on_scavenge_root_list();
 373   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 374   set_scavenge_root_nmethods(nm);
 375   print_trace("add_scavenge_root", nm);
 376 }
 377 
 378 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 379   assert_locked_or_safepoint(CodeCache_lock);
 380   print_trace("drop_scavenge_root", nm);
 381   nmethod* last = NULL;
 382   nmethod* cur = scavenge_root_nmethods();
 383   while (cur != NULL) {
 384     nmethod* next = cur->scavenge_root_link();
 385     if (cur == nm) {
 386       if (last != NULL)
 387             last->set_scavenge_root_link(next);
 388       else  set_scavenge_root_nmethods(next);
 389       nm->set_scavenge_root_link(NULL);
 390       nm->clear_on_scavenge_root_list();
 391       return;
 392     }
 393     last = cur;
 394     cur = next;
 395   }
 396   assert(false, "should have been on list");
 397 }
 398 
 399 void CodeCache::prune_scavenge_root_nmethods() {
 400   assert_locked_or_safepoint(CodeCache_lock);
 401   debug_only(mark_scavenge_root_nmethods());
 402 
 403   nmethod* last = NULL;
 404   nmethod* cur = scavenge_root_nmethods();
 405   while (cur != NULL) {
 406     nmethod* next = cur->scavenge_root_link();
 407     debug_only(cur->clear_scavenge_root_marked());
 408     assert(cur->scavenge_root_not_marked(), "");
 409     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 410 
 411     if (!cur->is_zombie() && !cur->is_unloaded()
 412         && cur->detect_scavenge_root_oops()) {
 413       // Keep it.  Advance 'last' to prevent deletion.
 414       last = cur;
 415     } else {
 416       // Prune it from the list, so we don't have to look at it any more.
 417       print_trace("prune_scavenge_root", cur);
 418       cur->set_scavenge_root_link(NULL);
 419       cur->clear_on_scavenge_root_list();
 420       if (last != NULL)
 421             last->set_scavenge_root_link(next);
 422       else  set_scavenge_root_nmethods(next);
 423     }
 424     cur = next;
 425   }
 426 
 427   // Check for stray marks.
 428   debug_only(verify_perm_nmethods(NULL));
 429 }
 430 
 431 #ifndef PRODUCT
 432 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 433   // While we are here, verify the integrity of the list.
 434   mark_scavenge_root_nmethods();
 435   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 436     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 437     cur->clear_scavenge_root_marked();
 438   }
 439   verify_perm_nmethods(f);
 440 }
 441 
 442 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 443 void CodeCache::mark_scavenge_root_nmethods() {
 444   FOR_ALL_ALIVE_BLOBS(cb) {
 445     if (cb->is_nmethod()) {
 446       nmethod *nm = (nmethod*)cb;
 447       assert(nm->scavenge_root_not_marked(), "clean state");
 448       if (nm->on_scavenge_root_list())
 449         nm->set_scavenge_root_marked();
 450     }
 451   }
 452 }
 453 
 454 // If the closure is given, run it on the unlisted nmethods.
 455 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 456 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 457   FOR_ALL_ALIVE_BLOBS(cb) {
 458     bool call_f = (f_or_null != NULL);
 459     if (cb->is_nmethod()) {
 460       nmethod *nm = (nmethod*)cb;
 461       assert(nm->scavenge_root_not_marked(), "must be already processed");
 462       if (nm->on_scavenge_root_list())
 463         call_f = false;  // don't show this one to the client
 464       nm->verify_scavenge_root_oops();
 465     } else {
 466       call_f = false;   // not an nmethod
 467     }
 468     if (call_f)  f_or_null->do_code_blob(cb);
 469   }
 470 }
 471 #endif //PRODUCT
 472 
 473 /**
 474  * Remove and return nmethod from the saved code list in order to reanimate it.
 475  */
 476 nmethod* CodeCache::reanimate_saved_code(Method* m) {
 477   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 478   nmethod* saved = _saved_nmethods;
 479   nmethod* prev = NULL;
 480   while (saved != NULL) {
 481     if (saved->is_in_use() && saved->method() == m) {
 482       if (prev != NULL) {
 483         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 484       } else {
 485         _saved_nmethods = saved->saved_nmethod_link();
 486       }
 487       assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 488       saved->set_speculatively_disconnected(false);
 489       saved->set_saved_nmethod_link(NULL);
 490       if (PrintMethodFlushing) {
 491         saved->print_on(tty, " ### nmethod is reconnected");
 492       }
 493       if (LogCompilation && (xtty != NULL)) {
 494         ttyLocker ttyl;
 495         xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
 496         xtty->method(m);
 497         xtty->stamp();
 498         xtty->end_elem();
 499       }
 500       return saved;
 501     }
 502     prev = saved;
 503     saved = saved->saved_nmethod_link();
 504   }
 505   return NULL;
 506 }
 507 
 508 /**
 509  * Remove nmethod from the saved code list in order to discard it permanently
 510  */
 511 void CodeCache::remove_saved_code(nmethod* nm) {
 512   // For conc swpr this will be called with CodeCache_lock taken by caller
 513   assert_locked_or_safepoint(CodeCache_lock);
 514   assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 515   nmethod* saved = _saved_nmethods;
 516   nmethod* prev = NULL;
 517   while (saved != NULL) {
 518     if (saved == nm) {
 519       if (prev != NULL) {
 520         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 521       } else {
 522         _saved_nmethods = saved->saved_nmethod_link();
 523       }
 524       if (LogCompilation && (xtty != NULL)) {
 525         ttyLocker ttyl;
 526         xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
 527         xtty->stamp();
 528         xtty->end_elem();
 529       }
 530       return;
 531     }
 532     prev = saved;
 533     saved = saved->saved_nmethod_link();
 534   }
 535   ShouldNotReachHere();
 536 }
 537 
 538 void CodeCache::speculatively_disconnect(nmethod* nm) {
 539   assert_locked_or_safepoint(CodeCache_lock);
 540   assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
 541   nm->set_saved_nmethod_link(_saved_nmethods);
 542   _saved_nmethods = nm;
 543   if (PrintMethodFlushing) {
 544     nm->print_on(tty, " ### nmethod is speculatively disconnected");
 545   }
 546   if (LogCompilation && (xtty != NULL)) {
 547     ttyLocker ttyl;
 548     xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
 549     xtty->method(nm->method());
 550     xtty->stamp();
 551     xtty->end_elem();
 552   }
 553   nm->method()->clear_code();
 554   nm->set_speculatively_disconnected(true);
 555 }
 556 
 557 
 558 void CodeCache::gc_prologue() {
 559   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 560 }
 561 
 562 
 563 void CodeCache::gc_epilogue() {
 564   assert_locked_or_safepoint(CodeCache_lock);
 565   FOR_ALL_ALIVE_BLOBS(cb) {
 566     if (cb->is_nmethod()) {
 567       nmethod *nm = (nmethod*)cb;
 568       assert(!nm->is_unloaded(), "Tautology");
 569       if (needs_cache_clean()) {
 570         nm->cleanup_inline_caches();
 571       }
 572       DEBUG_ONLY(nm->verify());
 573       nm->fix_oop_relocations();
 574     }
 575   }
 576   set_needs_cache_clean(false);
 577   prune_scavenge_root_nmethods();
 578   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 579 
 580 #ifdef ASSERT
 581   // make sure that we aren't leaking icholders
 582   int count = 0;
 583   FOR_ALL_BLOBS(cb) {
 584     if (cb->is_nmethod()) {
 585       RelocIterator iter((nmethod*)cb);
 586       while(iter.next()) {
 587         if (iter.type() == relocInfo::virtual_call_type) {
 588           if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
 589             CompiledIC *ic = CompiledIC_at(iter.reloc());
 590             if (TraceCompiledIC) {
 591               tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
 592               ic->print();
 593             }
 594             assert(ic->cached_icholder() != NULL, "must be non-NULL");
 595             count++;
 596           }
 597         }
 598       }
 599     }
 600   }
 601 
 602   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 603          CompiledICHolder::live_count(), "must agree");
 604 #endif
 605 }
 606 
 607 
 608 void CodeCache::verify_oops() {
 609   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 610   VerifyOopClosure voc;
 611   FOR_ALL_ALIVE_BLOBS(cb) {
 612     if (cb->is_nmethod()) {
 613       nmethod *nm = (nmethod*)cb;
 614       nm->oops_do(&voc);
 615       nm->verify_oop_relocations();
 616     }
 617   }
 618 }
 619 
 620 
 621 address CodeCache::first_address() {
 622   assert_locked_or_safepoint(CodeCache_lock);
 623   return (address)_heap->low_boundary();
 624 }
 625 
 626 
 627 address CodeCache::last_address() {
 628   assert_locked_or_safepoint(CodeCache_lock);
 629   return (address)_heap->high();
 630 }
 631 
 632 /**
 633  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
 634  * is free, reverse_free_ratio() returns 4.
 635  */
 636 double CodeCache::reverse_free_ratio() {
 637   double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
 638   double max_capacity = (double)CodeCache::max_capacity();
 639   return max_capacity / unallocated_capacity;
 640 }
 641 
 642 void icache_init();
 643 
 644 void CodeCache::initialize() {
 645   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 646 #ifdef COMPILER2
 647   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 648 #endif
 649   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 650   // This was originally just a check of the alignment, causing failure, instead, round
 651   // the code cache to the page size.  In particular, Solaris is moving to a larger
 652   // default page size.
 653   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 654   InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
 655   ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
 656   if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
 657     vm_exit_during_initialization("Could not reserve enough space for code cache");
 658   }
 659 
 660   MemoryService::add_code_heap_memory_pool(_heap);
 661 
 662   // Initialize ICache flush mechanism
 663   // This service is needed for os::register_code_area
 664   icache_init();
 665 
 666   // Give OS a chance to register generated code area.
 667   // This is used on Windows 64 bit platforms to register
 668   // Structured Exception Handlers for our generated code.
 669   os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
 670 }
 671 
 672 
 673 void codeCache_init() {
 674   CodeCache::initialize();
 675 }
 676 
 677 //------------------------------------------------------------------------------------------------
 678 
 679 int CodeCache::number_of_nmethods_with_dependencies() {
 680   return _number_of_nmethods_with_dependencies;
 681 }
 682 
 683 void CodeCache::clear_inline_caches() {
 684   assert_locked_or_safepoint(CodeCache_lock);
 685   FOR_ALL_ALIVE_NMETHODS(nm) {
 686     nm->clear_inline_caches();
 687   }
 688 }
 689 
 690 #ifndef PRODUCT
 691 // used to keep track of how much time is spent in mark_for_deoptimization
 692 static elapsedTimer dependentCheckTime;
 693 static int dependentCheckCount = 0;
 694 #endif // PRODUCT
 695 
 696 
 697 int CodeCache::mark_for_deoptimization(DepChange& changes) {
 698   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 699 
 700 #ifndef PRODUCT
 701   dependentCheckTime.start();
 702   dependentCheckCount++;
 703 #endif // PRODUCT
 704 
 705   int number_of_marked_CodeBlobs = 0;
 706 
 707   // search the hierarchy looking for nmethods which are affected by the loading of this class
 708 
 709   // then search the interfaces this class implements looking for nmethods
 710   // which might be dependent of the fact that an interface only had one
 711   // implementor.
 712 
 713   { No_Safepoint_Verifier nsv;
 714     for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
 715       Klass* d = str.klass();
 716       number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
 717     }
 718   }
 719 
 720   if (VerifyDependencies) {
 721     // Turn off dependency tracing while actually testing deps.
 722     NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
 723     FOR_ALL_ALIVE_NMETHODS(nm) {
 724       if (!nm->is_marked_for_deoptimization() &&
 725           nm->check_all_dependencies()) {
 726         ResourceMark rm;
 727         tty->print_cr("Should have been marked for deoptimization:");
 728         changes.print();
 729         nm->print();
 730         nm->print_dependencies();
 731       }
 732     }
 733   }
 734 
 735 #ifndef PRODUCT
 736   dependentCheckTime.stop();
 737 #endif // PRODUCT
 738 
 739   return number_of_marked_CodeBlobs;
 740 }
 741 
 742 
 743 #ifdef HOTSWAP
 744 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
 745   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 746   int number_of_marked_CodeBlobs = 0;
 747 
 748   // Deoptimize all methods of the evolving class itself
 749   Array<Method*>* old_methods = dependee->methods();
 750   for (int i = 0; i < old_methods->length(); i++) {
 751     ResourceMark rm;
 752     Method* old_method = old_methods->at(i);
 753     nmethod *nm = old_method->code();
 754     if (nm != NULL) {
 755       nm->mark_for_deoptimization();
 756       number_of_marked_CodeBlobs++;
 757     }
 758   }
 759 
 760   FOR_ALL_ALIVE_NMETHODS(nm) {
 761     if (nm->is_marked_for_deoptimization()) {
 762       // ...Already marked in the previous pass; don't count it again.
 763     } else if (nm->is_evol_dependent_on(dependee())) {
 764       ResourceMark rm;
 765       nm->mark_for_deoptimization();
 766       number_of_marked_CodeBlobs++;
 767     } else  {
 768       // flush caches in case they refer to a redefined Method*
 769       nm->clear_inline_caches();
 770     }
 771   }
 772 
 773   return number_of_marked_CodeBlobs;
 774 }
 775 #endif // HOTSWAP
 776 
 777 
 778 // Deoptimize all methods
 779 void CodeCache::mark_all_nmethods_for_deoptimization() {
 780   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 781   FOR_ALL_ALIVE_NMETHODS(nm) {
 782     nm->mark_for_deoptimization();
 783   }
 784 }
 785 
 786 
 787 int CodeCache::mark_for_deoptimization(Method* dependee) {
 788   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 789   int number_of_marked_CodeBlobs = 0;
 790 
 791   FOR_ALL_ALIVE_NMETHODS(nm) {
 792     if (nm->is_dependent_on_method(dependee)) {
 793       ResourceMark rm;
 794       nm->mark_for_deoptimization();
 795       number_of_marked_CodeBlobs++;
 796     }
 797   }
 798 
 799   return number_of_marked_CodeBlobs;
 800 }
 801 
 802 void CodeCache::make_marked_nmethods_zombies() {
 803   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 804   FOR_ALL_ALIVE_NMETHODS(nm) {
 805     if (nm->is_marked_for_deoptimization()) {
 806 
 807       // If the nmethod has already been made non-entrant and it can be converted
 808       // then zombie it now. Otherwise make it non-entrant and it will eventually
 809       // be zombied when it is no longer seen on the stack. Note that the nmethod
 810       // might be "entrant" and not on the stack and so could be zombied immediately
 811       // but we can't tell because we don't track it on stack until it becomes
 812       // non-entrant.
 813 
 814       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 815         nm->make_zombie();
 816       } else {
 817         nm->make_not_entrant();
 818       }
 819     }
 820   }
 821 }
 822 
 823 void CodeCache::make_marked_nmethods_not_entrant() {
 824   assert_locked_or_safepoint(CodeCache_lock);
 825   FOR_ALL_ALIVE_NMETHODS(nm) {
 826     if (nm->is_marked_for_deoptimization()) {
 827       nm->make_not_entrant();
 828     }
 829   }
 830 }
 831 
 832 void CodeCache::verify() {
 833   _heap->verify();
 834   FOR_ALL_ALIVE_BLOBS(p) {
 835     p->verify();
 836   }
 837 }
 838 
 839 void CodeCache::report_codemem_full() {
 840   _codemem_full_count++;
 841   EventCodeCacheFull event;
 842   if (event.should_commit()) {
 843     event.set_startAddress((u8)low_bound());
 844     event.set_commitedTopAddress((u8)high());
 845     event.set_reservedTopAddress((u8)high_bound());
 846     event.set_entryCount(nof_blobs());
 847     event.set_methodCount(nof_nmethods());
 848     event.set_adaptorCount(nof_adapters());
 849     event.set_unallocatedCapacity(unallocated_capacity()/K);
 850     event.set_fullCount(_codemem_full_count);
 851     event.commit();
 852   }
 853 }
 854 
 855 //------------------------------------------------------------------------------------------------
 856 // Non-product version
 857 
 858 #ifndef PRODUCT
 859 
 860 void CodeCache::verify_if_often() {
 861   if (VerifyCodeCacheOften) {
 862     _heap->verify();
 863   }
 864 }
 865 
 866 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 867   if (PrintCodeCacheAllocation) {  // Need to add a new flag
 868     ResourceMark rm;
 869     if (size == 0)  size = cb->size();
 870     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: %dB", event, cb, size);
 871   }
 872 }
 873 
 874 void CodeCache::print_details() {
 875   size_t total_entries = 0;
 876   size_t max_code_size = 0;
 877 
 878   CodeBlob_sizes runtime_stubs;
 879   CodeBlob_sizes adapters;
 880   CodeBlob_sizes deoptimization_stubs;
 881   CodeBlob_sizes uncommon_trap_stubs;
 882   CodeBlob_sizes buffer_blobs;
 883   CodeBlob_sizes in_use;
 884   CodeBlob_sizes not_entrant;
 885   CodeBlob_sizes zombie;
 886   CodeBlob_sizes unloaded;
 887   CodeBlob_sizes java_methods;
 888   CodeBlob_sizes native_methods;
 889   CodeBlob_sizes other_entries;
 890   CodeBlob_sizes tiers[CompLevel_full_optimization + 1];
 891 
 892   ResourceMark rm;
 893 
 894   FOR_ALL_BLOBS(cb) {
 895     total_entries++;
 896     if (cb->is_nmethod()) {
 897       nmethod* nm = (nmethod*)cb;
 898 
 899       if (Verbose && nm->method() != NULL) {
 900         ResourceMark rm;
 901         char *method_name = nm->method()->name_and_sig_as_C_string();
 902         tty->print("%s", method_name);
 903         if (nm->is_in_use()) {
 904           tty->print_cr(" in-use");
 905         } else if (nm->is_not_entrant()) {
 906           tty->print_cr(" not-entrant");
 907         } else if (nm->is_zombie()) {
 908           tty->print_cr(" zombie");
 909         }
 910       }
 911 
 912       if (nm->is_in_use()) {
 913         in_use.add(nm);
 914       } else if (nm->is_not_entrant()) {
 915         not_entrant.add(nm);
 916       } else if (nm->is_zombie()) {
 917         zombie.add(nm);
 918       } else if (nm->is_unloaded()) {
 919         unloaded.add(nm);
 920       }
 921 
 922       if(nm->is_native_method()) {
 923         native_methods.add(nm);
 924       }
 925 
 926       // Native methods are Tier 0
 927       tiers[nm->comp_level()].add(nm);
 928 
 929       if (nm->method() != NULL && nm->is_java_method()) {
 930         java_methods.add(nm);
 931         if ((size_t)nm->insts_size() > max_code_size) {
 932           max_code_size = nm->insts_size();
 933         }
 934       }
 935     } else if (cb->is_runtime_stub()) {
 936       runtime_stubs.add(cb);
 937     } else if (cb->is_deoptimization_stub()) {
 938       deoptimization_stubs.add(cb);
 939     } else if (cb->is_uncommon_trap_stub()) {
 940       uncommon_trap_stubs.add(cb);
 941     } else if (cb->is_adapter_blob()) {
 942       adapters.add(cb);
 943     } else if (cb->is_buffer_blob()) {
 944       buffer_blobs.add(cb);
 945     } else {
 946       other_entries.add(cb);
 947     }
 948   }
 949 
 950   tty->print_cr("\nCode cache entries: (total of #%u)", total_entries);
 951   size_t total_nm_count = tiers[0].get_total_count() + tiers[1].get_total_count() + tiers[2].get_total_count() +
 952                                   tiers[3].get_total_count() + tiers[4].get_total_count();
 953   size_t total_nm_size =  tiers[0].get_total_size() + tiers[1].get_total_size() + tiers[2].get_total_size() +
 954                                   tiers[3].get_total_size() + tiers[4].get_total_size();
 955   tty->print_cr("nmethods:\t#%u\t%ukB",  total_nm_count, total_nm_size / K);
 956   tty->print_cr("  Java:\t\t#%u\t%ukB",  java_methods.get_total_count(), java_methods.get_total_size() / K);
 957   tty->print_cr("   Tier 1:\t#%u\t%ukB", tiers[1].get_total_count(), tiers[1].get_total_size() / K);
 958   tty->print_cr("   Tier 2:\t#%u\t%ukB", tiers[2].get_total_count(), tiers[2].get_total_size() / K);
 959   tty->print_cr("   Tier 3:\t#%u\t%ukB", tiers[3].get_total_count(), tiers[3].get_total_size() / K);
 960   tty->print_cr("   Tier 4:\t#%u\t%ukB", tiers[4].get_total_count(), tiers[4].get_total_size() / K);
 961   tty->print_cr("  Native:\t#%u\t%ukB",  native_methods.get_total_count(), native_methods.get_total_size() / K);
 962 
 963   tty->print_cr("runtime-stubs:\t#%u\t%ukB",  runtime_stubs.get_total_count(), runtime_stubs.get_total_size() / K);
 964   tty->print_cr("adapters:\t#%u\t%ukB",       adapters.get_total_count(), adapters.get_total_size() / K);
 965   tty->print_cr("buffer blobs:\t#%u\t%ukB",   buffer_blobs.get_total_count(), buffer_blobs.get_total_size() / K);
 966   tty->print_cr("deopt-stubs:\t#%u\t%ukB",    deoptimization_stubs.get_total_count(), deoptimization_stubs.get_total_size() / K);
 967   tty->print_cr("uncommon-traps:\t#%u\t%ukB", uncommon_trap_stubs.get_total_count(), uncommon_trap_stubs.get_total_size() / K);
 968   tty->print_cr("others:\t\t#%u\t%ukB\n",     other_entries.get_total_count(), other_entries.get_total_size() / K);
 969 
 970   tty->print_cr("nmethod state distribution");
 971   tty->print_cr("  in-use:\t#%u\t%ukB",       in_use.get_total_count(), in_use.get_total_size() / K);
 972   tty->print_cr("  not-entrant:\t#%u\t%ukB",  not_entrant.get_total_count(), not_entrant.get_total_size() / K);
 973   tty->print_cr("  zombie:\t#%u\t%ukB",       zombie.get_total_count(), zombie.get_total_size() / K);
 974   tty->print_cr("  unloaded:\t#%u\t%ukB",     unloaded.get_total_count(), unloaded.get_total_size() / K);
 975 
 976   if (Verbose) {
 977     tty->print_cr("\nnmethod size distribution (non-zombie java)");
 978 
 979     int bucketSize = 512;
 980     int bucketLimit = max_code_size / bucketSize + 1;
 981     int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
 982     memset(buckets, 0, sizeof(int) * bucketLimit);
 983 
 984     FOR_ALL_BLOBS(cb) {
 985       if (cb->is_nmethod()) {
 986         nmethod* nm = (nmethod*)cb;
 987         if(nm->is_java_method()) {
 988           buckets[nm->insts_size() / bucketSize]++;
 989         }
 990       }
 991     }
 992 
 993     for(int i=0; i<bucketLimit; i++) {
 994       if (buckets[i] != 0) {
 995         tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
 996         tty->fill_to(40);
 997         tty->print_cr("%d",buckets[i]);
 998       }
 999     }
1000 
1001     FREE_C_HEAP_ARRAY(int, buckets, mtCode);
1002 
1003 #ifndef PRODUCT
1004     tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
1005                   dependentCheckTime.seconds() / dependentCheckCount);
1006 #endif
1007   }
1008 
1009 }
1010 
1011 #endif // !PRODUCT
1012 
1013 void CodeCache::print() {
1014   print_summary(tty);
1015 
1016   if (WizardMode) {
1017      // print the oop_map usage
1018     int code_size = 0;
1019     int number_of_blobs = 0;
1020     int number_of_oop_maps = 0;
1021     int map_size = 0;
1022     FOR_ALL_BLOBS(p) {
1023       if (p->is_alive()) {
1024         number_of_blobs++;
1025         code_size += p->code_size();
1026         OopMapSet* set = p->oop_maps();
1027         if (set != NULL) {
1028           number_of_oop_maps += set->size();
1029           map_size           += set->heap_size();
1030         }
1031       }
1032     }
1033     tty->print_cr("OopMaps");
1034     tty->print_cr("  #blobs    = %d", number_of_blobs);
1035     tty->print_cr("  code size = %d", code_size);
1036     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1037     tty->print_cr("  map size  = %d", map_size);
1038   }
1039 }
1040 
1041 void CodeCache::print_summary(outputStream* st, bool detailed) {
1042   size_t total = (_heap->high_boundary() - _heap->low_boundary());
1043   st->print_cr("CodeCache: size=" SIZE_FORMAT "kB used=" SIZE_FORMAT
1044                "kB max_used=" SIZE_FORMAT "kB free=" SIZE_FORMAT "kB",
1045                total/K, (total - unallocated_capacity())/K,
1046                maxCodeCacheUsed/K, unallocated_capacity()/K);
1047 
1048 
1049   if (detailed) {
1050     size_t interpreter_size = AbstractInterpreter::code()->total_space() / K;
1051     CodeBlob_sizes live_nm;
1052     CodeBlob_sizes dead_nm;
1053     CodeBlob_sizes stubs;
1054     CodeBlob_sizes adapters;
1055     size_t total_size = 0;
1056 
1057     FOR_ALL_BLOBS(p) {
1058       total_size += p->size();
1059       // live or not-entrant methods
1060       if (p->is_nmethod()) {
1061         if (p->is_alive()) {
1062           live_nm.add(p);
1063         } else {
1064           dead_nm.add(p);
1065         }
1066       } else {
1067         if (p->is_adapter_blob()) {
1068           adapters.add(p);
1069         } else {
1070           stubs.add(p);
1071         }
1072       }
1073     }
1074     st->print_cr(" Interpreter=%ukB live_nmethods=%u(%ukB) dead_nmethods=%u(%ukB) stubs=%u(%ukB) adapters=%u(%ukB)",
1075                        interpreter_size, live_nm.get_total_count(), live_nm.get_total_size() / K,
1076                        dead_nm.get_total_count(), dead_nm.get_total_size() / K,
1077                        stubs.get_total_count(), stubs.get_total_size() / K,
1078                        adapters.get_total_count(), adapters.get_total_size() / K);
1079 
1080     st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1081                  _heap->low_boundary(),
1082                  _heap->high(),
1083                  _heap->high_boundary());
1084 
1085     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1086                  "enabled" : Arguments::mode() == Arguments::_int ?
1087                  "disabled (interpreter mode)" :
1088                  "disabled (not enough contiguous free space left)");
1089   }
1090 }
1091 
1092 void CodeCache::log_state(outputStream* st) {
1093   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1094             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1095             nof_blobs(), nof_nmethods(), nof_adapters(),
1096             unallocated_capacity());
1097 }
1098