1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "gc_implementation/shared/markSweep.hpp"
  35 #include "interpreter/abstractInterpreter.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/gcLocker.hpp"
  38 #include "memory/iterator.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/method.hpp"
  41 #include "oops/objArrayOop.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/icache.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutexLocker.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "trace/tracing.hpp"
  50 #include "utilities/xmlstream.hpp"
  51 
  52 // Helper class for printing in CodeCache
  53 
  54 class CodeBlob_sizes {
  55  private:
  56   int count;
  57   int total_size;
  58   int header_size;
  59   int code_size;
  60   int stub_size;
  61   int relocation_size;
  62   int scopes_oop_size;
  63   int scopes_metadata_size;
  64   int scopes_data_size;
  65   int scopes_pcs_size;
  66 
  67  public:
  68   CodeBlob_sizes() {
  69     count                = 0;
  70     total_size           = 0;
  71     header_size          = 0;
  72     code_size            = 0;
  73     stub_size            = 0;
  74     relocation_size      = 0;
  75     scopes_oop_size      = 0;
  76     scopes_metadata_size = 0;
  77     scopes_data_size     = 0;
  78     scopes_pcs_size      = 0;
  79   }
  80 
  81   int get_total_size()  { return total_size; }
  82   int get_total_count() { return count; }
  83   bool is_empty()       { return count == 0; }
  84 
  85   void print(const char* title) {
  86     const int len = strnlen(title, 25);
  87     if (count == 0) {
  88       tty->print_cr("%s%*d#", title, 25-len, count);
  89     } else {
  90       tty->print_cr("%s%*d#%7d kB\t(hdr %2u%%, loc %2u%%, code %2u%%, stub %2u%%, [oops %2u%%, data %2u%%, pcs %2u%%])",
  91                     title,
  92                     25-len,
  93                     count,
  94                     total_size / K,
  95                     header_size             * 100 / total_size,
  96                     relocation_size         * 100 / total_size,
  97                     code_size               * 100 / total_size,
  98                     stub_size               * 100 / total_size,
  99                     scopes_oop_size         * 100 / total_size,
 100                     scopes_metadata_size    * 100 / total_size,
 101                     scopes_data_size        * 100 / total_size,
 102                     scopes_pcs_size         * 100 / total_size);
 103         }
 104   }
 105 
 106   void add(CodeBlob* cb) {
 107     count++;
 108     total_size       += cb->size();
 109     header_size      += cb->header_size();
 110     relocation_size  += cb->relocation_size();
 111     if (cb->is_nmethod()) {
 112       nmethod* nm = cb->as_nmethod_or_null();
 113       code_size        += nm->insts_size();
 114       stub_size        += nm->stub_size();
 115 
 116       scopes_oop_size  += nm->oops_size();
 117       scopes_metadata_size  += nm->metadata_size();
 118       scopes_data_size += nm->scopes_data_size();
 119       scopes_pcs_size  += nm->scopes_pcs_size();
 120     } else {
 121       code_size        += cb->code_size();
 122     }
 123   }
 124 };
 125 
 126 // CodeCache implementation
 127 
 128 CodeHeap* CodeCache::_heap = new CodeHeap();
 129 int CodeCache::_number_of_blobs = 0;
 130 int CodeCache::_number_of_adapters = 0;
 131 int CodeCache::_number_of_nmethods = 0;
 132 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 133 bool CodeCache::_needs_cache_clean = false;
 134 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 135 nmethod* CodeCache::_saved_nmethods = NULL;
 136 
 137 int CodeCache::_codemem_full_count = 0;
 138 static size_t _max_code_cache_used = 0;
 139 
 140 // Code cache printing options
 141 static bool _print_details        = false;
 142 static bool _print_trace          = false;
 143 static bool _print_content        = false;
 144 static bool _print_oop_map_usage  = false;
 145 static bool _print_dep_check_time = false;
 146 
 147 #ifndef PRODUCT
 148 // used to keep track of how much time is spent in mark_for_deoptimization
 149 static elapsedTimer dependentCheckTime;
 150 static int dependentCheckCount = 0;
 151 #endif // PRODUCT
 152 
 153 
 154 CodeBlob* CodeCache::first() {
 155   assert_locked_or_safepoint(CodeCache_lock);
 156   return (CodeBlob*)_heap->first();
 157 }
 158 
 159 
 160 CodeBlob* CodeCache::next(CodeBlob* cb) {
 161   assert_locked_or_safepoint(CodeCache_lock);
 162   return (CodeBlob*)_heap->next(cb);
 163 }
 164 
 165 
 166 CodeBlob* CodeCache::alive(CodeBlob *cb) {
 167   assert_locked_or_safepoint(CodeCache_lock);
 168   while (cb != NULL && !cb->is_alive()) cb = next(cb);
 169   return cb;
 170 }
 171 
 172 
 173 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
 174   assert_locked_or_safepoint(CodeCache_lock);
 175   while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
 176   return (nmethod*)cb;
 177 }
 178 
 179 nmethod* CodeCache::first_nmethod() {
 180   assert_locked_or_safepoint(CodeCache_lock);
 181   CodeBlob* cb = first();
 182   while (cb != NULL && !cb->is_nmethod()) {
 183     cb = next(cb);
 184   }
 185   return (nmethod*)cb;
 186 }
 187 
 188 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
 189   assert_locked_or_safepoint(CodeCache_lock);
 190   cb = next(cb);
 191   while (cb != NULL && !cb->is_nmethod()) {
 192     cb = next(cb);
 193   }
 194   return (nmethod*)cb;
 195 }
 196 
 197 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
 198   // Do not seize the CodeCache lock here--if the caller has not
 199   // already done so, we are going to lose bigtime, since the code
 200   // cache will contain a garbage CodeBlob until the caller can
 201   // run the constructor for the CodeBlob subclass he is busy
 202   // instantiating.
 203   guarantee(size >= 0, "allocation request must be reasonable");
 204   assert_locked_or_safepoint(CodeCache_lock);
 205   CodeBlob* cb = NULL;
 206   _number_of_blobs++;
 207   while (true) {
 208     cb = (CodeBlob*)_heap->allocate(size, is_critical);
 209     if (cb != NULL) break;
 210     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 211       // Expansion failed
 212       return NULL;
 213     }
 214     if (PrintCodeCacheExtension) {
 215       ResourceMark rm;
 216       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
 217                     (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
 218                     (address)_heap->high() - (address)_heap->low_boundary());
 219     }
 220   }
 221   _max_code_cache_used = MAX2(_max_code_cache_used, ((address)_heap->high_boundary() -
 222                           (address)_heap->low_boundary()) - unallocated_capacity());
 223   verify_if_often();
 224   print_trace("allocation", cb, size);
 225   return cb;
 226 }
 227 
 228 void CodeCache::free(CodeBlob* cb) {
 229   assert_locked_or_safepoint(CodeCache_lock);
 230   verify_if_often();
 231 
 232   print_trace("free", cb);
 233   if (cb->is_nmethod()) {
 234     _number_of_nmethods--;
 235     if (((nmethod *)cb)->has_dependencies()) {
 236       _number_of_nmethods_with_dependencies--;
 237     }
 238   }
 239   if (cb->is_adapter_blob()) {
 240     _number_of_adapters--;
 241   }
 242   _number_of_blobs--;
 243 
 244   _heap->deallocate(cb);
 245 
 246   verify_if_often();
 247   assert(_number_of_blobs >= 0, "sanity check");
 248 }
 249 
 250 
 251 void CodeCache::commit(CodeBlob* cb) {
 252   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 253   assert_locked_or_safepoint(CodeCache_lock);
 254   if (cb->is_nmethod()) {
 255     _number_of_nmethods++;
 256     if (((nmethod *)cb)->has_dependencies()) {
 257       _number_of_nmethods_with_dependencies++;
 258     }
 259   }
 260   if (cb->is_adapter_blob()) {
 261     _number_of_adapters++;
 262   }
 263 
 264   // flush the hardware I-cache
 265   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 266 }
 267 
 268 
 269 void CodeCache::flush() {
 270   assert_locked_or_safepoint(CodeCache_lock);
 271   Unimplemented();
 272 }
 273 
 274 
 275 // Iteration over CodeBlobs
 276 
 277 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 278 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 279 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 280 
 281 
 282 bool CodeCache::contains(void *p) {
 283   // It should be ok to call contains without holding a lock
 284   return _heap->contains(p);
 285 }
 286 
 287 
 288 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 289 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 290 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 291 CodeBlob* CodeCache::find_blob(void* start) {
 292   CodeBlob* result = find_blob_unsafe(start);
 293   if (result == NULL) return NULL;
 294   // We could potientially look up non_entrant methods
 295   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 296   return result;
 297 }
 298 
 299 nmethod* CodeCache::find_nmethod(void* start) {
 300   CodeBlob *cb = find_blob(start);
 301   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 302   return (nmethod*)cb;
 303 }
 304 
 305 
 306 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 307   assert_locked_or_safepoint(CodeCache_lock);
 308   FOR_ALL_BLOBS(p) {
 309     f(p);
 310   }
 311 }
 312 
 313 
 314 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 315   assert_locked_or_safepoint(CodeCache_lock);
 316   FOR_ALL_BLOBS(nm) {
 317     if (nm->is_nmethod()) f((nmethod*)nm);
 318   }
 319 }
 320 
 321 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
 322   assert_locked_or_safepoint(CodeCache_lock);
 323   FOR_ALL_ALIVE_NMETHODS(nm) {
 324     f(nm);
 325   }
 326 }
 327 
 328 int CodeCache::alignment_unit() {
 329   return (int)_heap->alignment_unit();
 330 }
 331 
 332 
 333 int CodeCache::alignment_offset() {
 334   return (int)_heap->alignment_offset();
 335 }
 336 
 337 
 338 // Mark nmethods for unloading if they contain otherwise unreachable
 339 // oops.
 340 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 341   assert_locked_or_safepoint(CodeCache_lock);
 342   FOR_ALL_ALIVE_NMETHODS(nm) {
 343     nm->do_unloading(is_alive, unloading_occurred);
 344   }
 345 }
 346 
 347 void CodeCache::blobs_do(CodeBlobClosure* f) {
 348   assert_locked_or_safepoint(CodeCache_lock);
 349   FOR_ALL_ALIVE_BLOBS(cb) {
 350     f->do_code_blob(cb);
 351 
 352 #ifdef ASSERT
 353     if (cb->is_nmethod())
 354       ((nmethod*)cb)->verify_scavenge_root_oops();
 355 #endif //ASSERT
 356   }
 357 }
 358 
 359 // Walk the list of methods which might contain non-perm oops.
 360 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
 361   assert_locked_or_safepoint(CodeCache_lock);
 362   debug_only(mark_scavenge_root_nmethods());
 363 
 364   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 365     debug_only(cur->clear_scavenge_root_marked());
 366     assert(cur->scavenge_root_not_marked(), "");
 367     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 368 
 369     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 370 #ifndef PRODUCT
 371     if (TraceScavenge) {
 372       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 373     }
 374 #endif //PRODUCT
 375     if (is_live) {
 376       // Perform cur->oops_do(f), maybe just once per nmethod.
 377       f->do_code_blob(cur);
 378     }
 379   }
 380 
 381   // Check for stray marks.
 382   debug_only(verify_perm_nmethods(NULL));
 383 }
 384 
 385 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 386   assert_locked_or_safepoint(CodeCache_lock);
 387   nm->set_on_scavenge_root_list();
 388   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 389   set_scavenge_root_nmethods(nm);
 390   print_trace("add_scavenge_root", nm);
 391 }
 392 
 393 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 394   assert_locked_or_safepoint(CodeCache_lock);
 395   print_trace("drop_scavenge_root", nm);
 396   nmethod* last = NULL;
 397   nmethod* cur = scavenge_root_nmethods();
 398   while (cur != NULL) {
 399     nmethod* next = cur->scavenge_root_link();
 400     if (cur == nm) {
 401       if (last != NULL)
 402             last->set_scavenge_root_link(next);
 403       else  set_scavenge_root_nmethods(next);
 404       nm->set_scavenge_root_link(NULL);
 405       nm->clear_on_scavenge_root_list();
 406       return;
 407     }
 408     last = cur;
 409     cur = next;
 410   }
 411   assert(false, "should have been on list");
 412 }
 413 
 414 void CodeCache::prune_scavenge_root_nmethods() {
 415   assert_locked_or_safepoint(CodeCache_lock);
 416   debug_only(mark_scavenge_root_nmethods());
 417 
 418   nmethod* last = NULL;
 419   nmethod* cur = scavenge_root_nmethods();
 420   while (cur != NULL) {
 421     nmethod* next = cur->scavenge_root_link();
 422     debug_only(cur->clear_scavenge_root_marked());
 423     assert(cur->scavenge_root_not_marked(), "");
 424     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 425 
 426     if (!cur->is_zombie() && !cur->is_unloaded()
 427         && cur->detect_scavenge_root_oops()) {
 428       // Keep it.  Advance 'last' to prevent deletion.
 429       last = cur;
 430     } else {
 431       // Prune it from the list, so we don't have to look at it any more.
 432       print_trace("prune_scavenge_root", cur);
 433       cur->set_scavenge_root_link(NULL);
 434       cur->clear_on_scavenge_root_list();
 435       if (last != NULL)
 436             last->set_scavenge_root_link(next);
 437       else  set_scavenge_root_nmethods(next);
 438     }
 439     cur = next;
 440   }
 441 
 442   // Check for stray marks.
 443   debug_only(verify_perm_nmethods(NULL));
 444 }
 445 
 446 #ifndef PRODUCT
 447 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 448   // While we are here, verify the integrity of the list.
 449   mark_scavenge_root_nmethods();
 450   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 451     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 452     cur->clear_scavenge_root_marked();
 453   }
 454   verify_perm_nmethods(f);
 455 }
 456 
 457 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 458 void CodeCache::mark_scavenge_root_nmethods() {
 459   FOR_ALL_ALIVE_BLOBS(cb) {
 460     if (cb->is_nmethod()) {
 461       nmethod *nm = (nmethod*)cb;
 462       assert(nm->scavenge_root_not_marked(), "clean state");
 463       if (nm->on_scavenge_root_list())
 464         nm->set_scavenge_root_marked();
 465     }
 466   }
 467 }
 468 
 469 // If the closure is given, run it on the unlisted nmethods.
 470 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 471 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 472   FOR_ALL_ALIVE_BLOBS(cb) {
 473     bool call_f = (f_or_null != NULL);
 474     if (cb->is_nmethod()) {
 475       nmethod *nm = (nmethod*)cb;
 476       assert(nm->scavenge_root_not_marked(), "must be already processed");
 477       if (nm->on_scavenge_root_list())
 478         call_f = false;  // don't show this one to the client
 479       nm->verify_scavenge_root_oops();
 480     } else {
 481       call_f = false;   // not an nmethod
 482     }
 483     if (call_f)  f_or_null->do_code_blob(cb);
 484   }
 485 }
 486 #endif //PRODUCT
 487 
 488 /**
 489  * Remove and return nmethod from the saved code list in order to reanimate it.
 490  */
 491 nmethod* CodeCache::reanimate_saved_code(Method* m) {
 492   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 493   nmethod* saved = _saved_nmethods;
 494   nmethod* prev = NULL;
 495   while (saved != NULL) {
 496     if (saved->is_in_use() && saved->method() == m) {
 497       if (prev != NULL) {
 498         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 499       } else {
 500         _saved_nmethods = saved->saved_nmethod_link();
 501       }
 502       assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 503       saved->set_speculatively_disconnected(false);
 504       saved->set_saved_nmethod_link(NULL);
 505       if (PrintMethodFlushing) {
 506         saved->print_on(tty, " ### nmethod is reconnected");
 507       }
 508       if (LogCompilation && (xtty != NULL)) {
 509         ttyLocker ttyl;
 510         xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
 511         xtty->method(m);
 512         xtty->stamp();
 513         xtty->end_elem();
 514       }
 515       return saved;
 516     }
 517     prev = saved;
 518     saved = saved->saved_nmethod_link();
 519   }
 520   return NULL;
 521 }
 522 
 523 /**
 524  * Remove nmethod from the saved code list in order to discard it permanently
 525  */
 526 void CodeCache::remove_saved_code(nmethod* nm) {
 527   // For conc swpr this will be called with CodeCache_lock taken by caller
 528   assert_locked_or_safepoint(CodeCache_lock);
 529   assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 530   nmethod* saved = _saved_nmethods;
 531   nmethod* prev = NULL;
 532   while (saved != NULL) {
 533     if (saved == nm) {
 534       if (prev != NULL) {
 535         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 536       } else {
 537         _saved_nmethods = saved->saved_nmethod_link();
 538       }
 539       if (LogCompilation && (xtty != NULL)) {
 540         ttyLocker ttyl;
 541         xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
 542         xtty->stamp();
 543         xtty->end_elem();
 544       }
 545       return;
 546     }
 547     prev = saved;
 548     saved = saved->saved_nmethod_link();
 549   }
 550   ShouldNotReachHere();
 551 }
 552 
 553 void CodeCache::speculatively_disconnect(nmethod* nm) {
 554   assert_locked_or_safepoint(CodeCache_lock);
 555   assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
 556   nm->set_saved_nmethod_link(_saved_nmethods);
 557   _saved_nmethods = nm;
 558   if (PrintMethodFlushing) {
 559     nm->print_on(tty, " ### nmethod is speculatively disconnected");
 560   }
 561   if (LogCompilation && (xtty != NULL)) {
 562     ttyLocker ttyl;
 563     xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
 564     xtty->method(nm->method());
 565     xtty->stamp();
 566     xtty->end_elem();
 567   }
 568   nm->method()->clear_code();
 569   nm->set_speculatively_disconnected(true);
 570 }
 571 
 572 
 573 void CodeCache::gc_prologue() {
 574   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 575 }
 576 
 577 
 578 void CodeCache::gc_epilogue() {
 579   assert_locked_or_safepoint(CodeCache_lock);
 580   FOR_ALL_ALIVE_BLOBS(cb) {
 581     if (cb->is_nmethod()) {
 582       nmethod *nm = (nmethod*)cb;
 583       assert(!nm->is_unloaded(), "Tautology");
 584       if (needs_cache_clean()) {
 585         nm->cleanup_inline_caches();
 586       }
 587       DEBUG_ONLY(nm->verify());
 588       nm->fix_oop_relocations();
 589     }
 590   }
 591   set_needs_cache_clean(false);
 592   prune_scavenge_root_nmethods();
 593   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 594 
 595 #ifdef ASSERT
 596   // make sure that we aren't leaking icholders
 597   int count = 0;
 598   FOR_ALL_BLOBS(cb) {
 599     if (cb->is_nmethod()) {
 600       RelocIterator iter((nmethod*)cb);
 601       while(iter.next()) {
 602         if (iter.type() == relocInfo::virtual_call_type) {
 603           if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
 604             CompiledIC *ic = CompiledIC_at(iter.reloc());
 605             if (TraceCompiledIC) {
 606               tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
 607               ic->print();
 608             }
 609             assert(ic->cached_icholder() != NULL, "must be non-NULL");
 610             count++;
 611           }
 612         }
 613       }
 614     }
 615   }
 616 
 617   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 618          CompiledICHolder::live_count(), "must agree");
 619 #endif
 620 }
 621 
 622 
 623 void CodeCache::verify_oops() {
 624   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 625   VerifyOopClosure voc;
 626   FOR_ALL_ALIVE_BLOBS(cb) {
 627     if (cb->is_nmethod()) {
 628       nmethod *nm = (nmethod*)cb;
 629       nm->oops_do(&voc);
 630       nm->verify_oop_relocations();
 631     }
 632   }
 633 }
 634 
 635 
 636 address CodeCache::first_address() {
 637   assert_locked_or_safepoint(CodeCache_lock);
 638   return (address)_heap->low_boundary();
 639 }
 640 
 641 
 642 address CodeCache::last_address() {
 643   assert_locked_or_safepoint(CodeCache_lock);
 644   return (address)_heap->high();
 645 }
 646 
 647 /**
 648  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
 649  * is free, reverse_free_ratio() returns 4.
 650  */
 651 double CodeCache::reverse_free_ratio() {
 652   double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
 653   double max_capacity = (double)CodeCache::max_capacity();
 654   return max_capacity / unallocated_capacity;
 655 }
 656 
 657 void icache_init();
 658 
 659 void CodeCache::initialize() {
 660   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 661 #ifdef COMPILER2
 662   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 663 #endif
 664   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 665   // This was originally just a check of the alignment, causing failure, instead, round
 666   // the code cache to the page size.  In particular, Solaris is moving to a larger
 667   // default page size.
 668   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 669   InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
 670   ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
 671   if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
 672     vm_exit_during_initialization("Could not reserve enough space for code cache");
 673   }
 674 
 675   MemoryService::add_code_heap_memory_pool(_heap);
 676 
 677   // Initialize ICache flush mechanism
 678   // This service is needed for os::register_code_area
 679   icache_init();
 680 
 681   // Give OS a chance to register generated code area.
 682   // This is used on Windows 64 bit platforms to register
 683   // Structured Exception Handlers for our generated code.
 684   os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
 685 }
 686 
 687 
 688 void codeCache_init() {
 689   CodeCache::initialize();
 690 }
 691 
 692 //------------------------------------------------------------------------------------------------
 693 
 694 int CodeCache::number_of_nmethods_with_dependencies() {
 695   return _number_of_nmethods_with_dependencies;
 696 }
 697 
 698 void CodeCache::clear_inline_caches() {
 699   assert_locked_or_safepoint(CodeCache_lock);
 700   FOR_ALL_ALIVE_NMETHODS(nm) {
 701     nm->clear_inline_caches();
 702   }
 703 }
 704 
 705 
 706 int CodeCache::mark_for_deoptimization(DepChange& changes) {
 707   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 708 
 709 #ifndef PRODUCT
 710   dependentCheckTime.start();
 711   dependentCheckCount++;
 712 #endif // PRODUCT
 713 
 714   int number_of_marked_CodeBlobs = 0;
 715 
 716   // search the hierarchy looking for nmethods which are affected by the loading of this class
 717 
 718   // then search the interfaces this class implements looking for nmethods
 719   // which might be dependent of the fact that an interface only had one
 720   // implementor.
 721 
 722   { No_Safepoint_Verifier nsv;
 723     for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
 724       Klass* d = str.klass();
 725       number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
 726     }
 727   }
 728 
 729   if (VerifyDependencies) {
 730     // Turn off dependency tracing while actually testing deps.
 731     NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
 732     FOR_ALL_ALIVE_NMETHODS(nm) {
 733       if (!nm->is_marked_for_deoptimization() &&
 734           nm->check_all_dependencies()) {
 735         ResourceMark rm;
 736         tty->print_cr("Should have been marked for deoptimization:");
 737         changes.print();
 738         nm->print();
 739         nm->print_dependencies();
 740       }
 741     }
 742   }
 743 
 744 #ifndef PRODUCT
 745   dependentCheckTime.stop();
 746 #endif // PRODUCT
 747 
 748   return number_of_marked_CodeBlobs;
 749 }
 750 
 751 
 752 #ifdef HOTSWAP
 753 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
 754   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 755   int number_of_marked_CodeBlobs = 0;
 756 
 757   // Deoptimize all methods of the evolving class itself
 758   Array<Method*>* old_methods = dependee->methods();
 759   for (int i = 0; i < old_methods->length(); i++) {
 760     ResourceMark rm;
 761     Method* old_method = old_methods->at(i);
 762     nmethod *nm = old_method->code();
 763     if (nm != NULL) {
 764       nm->mark_for_deoptimization();
 765       number_of_marked_CodeBlobs++;
 766     }
 767   }
 768 
 769   FOR_ALL_ALIVE_NMETHODS(nm) {
 770     if (nm->is_marked_for_deoptimization()) {
 771       // ...Already marked in the previous pass; don't count it again.
 772     } else if (nm->is_evol_dependent_on(dependee())) {
 773       ResourceMark rm;
 774       nm->mark_for_deoptimization();
 775       number_of_marked_CodeBlobs++;
 776     } else  {
 777       // flush caches in case they refer to a redefined Method*
 778       nm->clear_inline_caches();
 779     }
 780   }
 781 
 782   return number_of_marked_CodeBlobs;
 783 }
 784 #endif // HOTSWAP
 785 
 786 
 787 // Deoptimize all methods
 788 void CodeCache::mark_all_nmethods_for_deoptimization() {
 789   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 790   FOR_ALL_ALIVE_NMETHODS(nm) {
 791     nm->mark_for_deoptimization();
 792   }
 793 }
 794 
 795 
 796 int CodeCache::mark_for_deoptimization(Method* dependee) {
 797   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 798   int number_of_marked_CodeBlobs = 0;
 799 
 800   FOR_ALL_ALIVE_NMETHODS(nm) {
 801     if (nm->is_dependent_on_method(dependee)) {
 802       ResourceMark rm;
 803       nm->mark_for_deoptimization();
 804       number_of_marked_CodeBlobs++;
 805     }
 806   }
 807 
 808   return number_of_marked_CodeBlobs;
 809 }
 810 
 811 void CodeCache::make_marked_nmethods_zombies() {
 812   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 813   FOR_ALL_ALIVE_NMETHODS(nm) {
 814     if (nm->is_marked_for_deoptimization()) {
 815 
 816       // If the nmethod has already been made non-entrant and it can be converted
 817       // then zombie it now. Otherwise make it non-entrant and it will eventually
 818       // be zombied when it is no longer seen on the stack. Note that the nmethod
 819       // might be "entrant" and not on the stack and so could be zombied immediately
 820       // but we can't tell because we don't track it on stack until it becomes
 821       // non-entrant.
 822 
 823       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 824         nm->make_zombie();
 825       } else {
 826         nm->make_not_entrant();
 827       }
 828     }
 829   }
 830 }
 831 
 832 void CodeCache::make_marked_nmethods_not_entrant() {
 833   assert_locked_or_safepoint(CodeCache_lock);
 834   FOR_ALL_ALIVE_NMETHODS(nm) {
 835     if (nm->is_marked_for_deoptimization()) {
 836       nm->make_not_entrant();
 837     }
 838   }
 839 }
 840 
 841 void CodeCache::verify() {
 842   _heap->verify();
 843   FOR_ALL_ALIVE_BLOBS(p) {
 844     p->verify();
 845   }
 846 }
 847 
 848 void CodeCache::report_codemem_full() {
 849   _codemem_full_count++;
 850   EventCodeCacheFull event;
 851   if (event.should_commit()) {
 852     event.set_startAddress((u8)low_bound());
 853     event.set_commitedTopAddress((u8)high());
 854     event.set_reservedTopAddress((u8)high_bound());
 855     event.set_entryCount(nof_blobs());
 856     event.set_methodCount(nof_nmethods());
 857     event.set_adaptorCount(nof_adapters());
 858     event.set_unallocatedCapacity(unallocated_capacity()/K);
 859     event.set_fullCount(_codemem_full_count);
 860     event.commit();
 861   }
 862 }
 863 
 864 void CodeCache::init_printing_options(const char* str) {
 865   if (strcmp("on", str) == 0) {
 866     _print_details = true;
 867   } else if (strcmp("trace", str) == 0) {
 868     _print_trace = true;
 869   } else if (strcmp("content", str) == 0) {
 870     _print_content = true;
 871   } else if(strcmp("oop_map", str) == 0) {
 872     _print_oop_map_usage = true;
 873   } else if(strcmp("dep_check_time", str) == 0) {
 874     _print_dep_check_time = true;
 875   } else if (strcmp("all", str) == 0) {
 876     _print_details = true;
 877     _print_trace = true;
 878     _print_content = true;
 879     _print_oop_map_usage = true;
 880     _print_dep_check_time = true;
 881   } else if (strcmp("off", str) != 0) {
 882     vm_exit_during_initialization("Syntax error, expecting -XX:PrintCodeCacheDetails=[off|on|all|trace|content|oop_map|dep_check_time]", NULL);
 883   }
 884 }
 885 
 886 void CodeCache::print() {
 887   print_summary(tty);
 888 
 889 #ifndef PRODUCT
 890   if (_print_details) {
 891     print_details();
 892   }
 893   if (_print_content) {
 894     print_content();
 895   }
 896   if (WizardMode || _print_oop_map_usage) {
 897     print_oop_map_usage();
 898   }
 899   if (_print_dep_check_time) {
 900     print_dependency_checking_time();
 901   }
 902 #endif
 903 }
 904 
 905 void CodeCache::print_summary(outputStream* st, bool detailed) {
 906   size_t total = (_heap->high_boundary() - _heap->low_boundary());
 907   st->print_cr("CodeCache: size=" SIZE_FORMAT "kB used=" SIZE_FORMAT
 908                "kB max_used=" SIZE_FORMAT "kB free=" SIZE_FORMAT "kB",
 909                total/K, (total - unallocated_capacity())/K,
 910                _max_code_cache_used/K, unallocated_capacity()/K);
 911 
 912 
 913   if (detailed) {
 914     int interpreter_size = AbstractInterpreter::code()->total_space() / K;
 915     CodeBlob_sizes live_nm;
 916     CodeBlob_sizes dead_nm;
 917     CodeBlob_sizes stubs;
 918     CodeBlob_sizes adapters;
 919 
 920     FOR_ALL_BLOBS(p) {
 921       // live or not-entrant methods
 922       if (p->is_nmethod()) {
 923         if (p->is_alive()) {
 924           live_nm.add(p);
 925         } else {
 926           dead_nm.add(p);
 927         }
 928       } else {
 929         if (p->is_adapter_blob()) {
 930           adapters.add(p);
 931         } else {
 932           stubs.add(p);
 933         }
 934       }
 935     }
 936     st->print_cr(" Interpreter=%dkB live_nmethods=%u(%ukB) dead_nmethods=%u(%ukB) stubs=%u(%ukB) adapters=%u(%ukB)",
 937                        interpreter_size, live_nm.get_total_count(), live_nm.get_total_size() / K,
 938                        dead_nm.get_total_count(), dead_nm.get_total_size() / K,
 939                        stubs.get_total_count(), stubs.get_total_size() / K,
 940                        adapters.get_total_count(), adapters.get_total_size() / K);
 941 
 942     st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
 943                  _heap->low_boundary(),
 944                  _heap->high(),
 945                  _heap->high_boundary());
 946 
 947     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
 948                  "enabled" : Arguments::mode() == Arguments::_int ?
 949                  "disabled (interpreter mode)" :
 950                  "disabled (not enough contiguous free space left)");
 951   }
 952 }
 953 
 954 void CodeCache::log_state(outputStream* st) {
 955   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
 956             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
 957             nof_blobs(), nof_nmethods(), nof_adapters(),
 958             unallocated_capacity());
 959 }
 960 
 961 
 962 //------------------------------------------------------------------------------------------------
 963 // Non-product version
 964 #ifndef PRODUCT
 965 
 966 void CodeCache::verify_if_often() {
 967   if (VerifyCodeCacheOften) {
 968     _heap->verify();
 969   }
 970 }
 971 
 972 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 973   if (_print_trace) {
 974     ResourceMark rm;
 975     if (size == 0)  {
 976       size = cb->size();
 977     }
 978     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: %dB", event, cb, size);
 979   }
 980 }
 981 
 982 void CodeCache::print_details() {
 983   ResourceMark rm;
 984 
 985   CodeBlob_sizes runtime_stubs;
 986   CodeBlob_sizes adapters;
 987   CodeBlob_sizes deoptimization_stubs;
 988   CodeBlob_sizes uncommon_trap_stubs;
 989   CodeBlob_sizes buffer_blobs;
 990   CodeBlob_sizes in_use;
 991   CodeBlob_sizes not_entrant;
 992   CodeBlob_sizes zombie;
 993   CodeBlob_sizes unloaded;
 994   CodeBlob_sizes java_methods;
 995   CodeBlob_sizes native_methods;
 996   CodeBlob_sizes other_entries;
 997   CodeBlob_sizes tiers[CompLevel_full_optimization + 1];
 998 
 999   int total_entries = 0;
1000   int max_code_size = 0;
1001 
1002   FOR_ALL_BLOBS(cb) {
1003     total_entries++;
1004     if (cb->is_nmethod()) {
1005       nmethod* nm = (nmethod*)cb;
1006 
1007       if (nm->is_in_use()) {
1008         in_use.add(nm);
1009       } else if (nm->is_not_entrant()) {
1010         not_entrant.add(nm);
1011       } else if (nm->is_zombie()) {
1012         zombie.add(nm);
1013       } else if (nm->is_unloaded()) {
1014         unloaded.add(nm);
1015       }
1016 
1017       if(nm->is_native_method()) {
1018         native_methods.add(nm);
1019       }
1020 
1021       // Native methods are Tier 0
1022       tiers[nm->comp_level()].add(nm);
1023 
1024       if (nm->method() != NULL && nm->is_java_method()) {
1025         java_methods.add(nm);
1026         if (nm->insts_size() > max_code_size) {
1027           max_code_size = nm->insts_size();
1028         }
1029       }
1030     } else if (cb->is_runtime_stub()) {
1031       runtime_stubs.add(cb);
1032     } else if (cb->is_deoptimization_stub()) {
1033       deoptimization_stubs.add(cb);
1034     } else if (cb->is_uncommon_trap_stub()) {
1035       uncommon_trap_stubs.add(cb);
1036     } else if (cb->is_adapter_blob()) {
1037       adapters.add(cb);
1038     } else if (cb->is_buffer_blob()) {
1039       buffer_blobs.add(cb);
1040     } else {
1041       other_entries.add(cb);
1042     }
1043   }
1044 
1045   tty->print_cr("\nCode cache entries: (total of #%d)", total_entries);
1046   int total_nm_count = tiers[0].get_total_count() + tiers[1].get_total_count() + tiers[2].get_total_count() +
1047                                tiers[3].get_total_count() + tiers[4].get_total_count();
1048   int total_nm_size =  tiers[0].get_total_size() + tiers[1].get_total_size() + tiers[2].get_total_size() +
1049                                tiers[3].get_total_size() + tiers[4].get_total_size();
1050   tty->print_cr("nmethods:\t%6d# 7%d kB",  total_nm_count, total_nm_size / K);
1051   java_methods.print(" Java");
1052   tiers[1].print("  Tier 1");
1053   tiers[2].print("  Tier 2");
1054   tiers[3].print("  Tier 3");
1055   tiers[4].print("  Tier 4");
1056   native_methods.print(" Native");
1057 
1058   runtime_stubs.print("runtime stubs");
1059   adapters.print("adapters");
1060   buffer_blobs.print("buffer blobs");
1061   deoptimization_stubs.print("deoptimization stubs");
1062   uncommon_trap_stubs.print("uncommon trap stubs");
1063   other_entries.print("others");
1064 
1065   tty->print_cr("\nnmethod state distribution");
1066   in_use.print(" in-use");
1067   not_entrant.print(" not-entrant");
1068   zombie.print(" zombie");
1069   unloaded.print(" unloaded");
1070 }
1071 
1072 void CodeCache::print_content() {
1073   const int bucketSize = 512;
1074   const int bucketLimit = _max_code_cache_used / bucketSize + 1;
1075   int* buckets = NEW_C_HEAP_ARRAY_RETURN_NULL(int, bucketLimit, mtInternal);
1076   if (buckets == NULL) {
1077     return;
1078   }
1079   memset(buckets, 0, sizeof(int) * bucketLimit);
1080 
1081   FOR_ALL_BLOBS(cb) {
1082     if (cb->is_nmethod()) {
1083       nmethod* nm = (nmethod*)cb;
1084       if (nm->method() != NULL) {
1085         ResourceMark rm;
1086         const char *method_name = nm->method()->name_and_sig_as_C_string();
1087         if (nm->is_in_use()) {
1088           tty->print("in-use:      ");
1089             } else if (nm->is_not_entrant()) {
1090           tty->print("not-entrant: ");
1091         } else if (nm->is_zombie()) {
1092           tty->print("zombie:      ");
1093             }
1094             tty->print_cr("%s", method_name);
1095             if(nm->is_java_method()) {
1096               buckets[nm->insts_size() / bucketSize]++;
1097             }
1098           }
1099     }
1100   }
1101 
1102   tty->print_cr("\nnmethod size distribution (non-zombie java)");
1103 
1104   for (int i = 0; i < bucketLimit; i++) {
1105     if (buckets[i] != 0) {
1106       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1107       tty->fill_to(40);
1108       tty->print_cr("%d", buckets[i]);
1109     }
1110   }
1111   FREE_C_HEAP_ARRAY(int, buckets, mtInternal);
1112 }
1113 
1114 void CodeCache::print_oop_map_usage() {
1115   int code_size = 0;
1116   int number_of_blobs = 0;
1117   int number_of_oop_maps = 0;
1118   int map_size = 0;
1119   FOR_ALL_BLOBS(p) {
1120     if (p->is_alive()) {
1121       number_of_blobs++;
1122       code_size += p->code_size();
1123       OopMapSet* set = p->oop_maps();
1124       if (set != NULL) {
1125         number_of_oop_maps += set->size();
1126         map_size           += set->heap_size();
1127       }
1128     }
1129   }
1130   tty->print_cr("OopMaps");
1131   tty->print_cr("  #blobs    = %d", number_of_blobs);
1132   tty->print_cr("  code size = %d", code_size);
1133   tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1134   tty->print_cr("  map size  = %d", map_size);
1135 }
1136 
1137 void CodeCache::print_dependency_checking_time() {
1138   tty->print_cr("total nmethod dependency checking time: %f[s]", dependentCheckTime.seconds());
1139   tty->print_cr("total number of dependency checks     : %d", dependentCheckCount);
1140 }
1141 #endif // !PRODUCT