1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "gc_implementation/shared/markSweep.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/gcLocker.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/objArrayOop.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/icache.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutexLocker.hpp"
  47 #include "services/memoryService.hpp"
  48 #include "trace/tracing.hpp"
  49 #include "utilities/xmlstream.hpp"
  50 
  51 // Helper class for printing in CodeCache
  52 
  53 class CodeBlob_sizes {
  54  private:
  55   int count;
  56   int total_size;
  57   int header_size;
  58   int code_size;
  59   int stub_size;
  60   int relocation_size;
  61   int scopes_oop_size;
  62   int scopes_metadata_size;
  63   int scopes_data_size;
  64   int scopes_pcs_size;
  65 
  66  public:
  67   CodeBlob_sizes() {
  68     count            = 0;
  69     total_size       = 0;
  70     header_size      = 0;
  71     code_size        = 0;
  72     stub_size        = 0;
  73     relocation_size  = 0;
  74     scopes_oop_size  = 0;
  75     scopes_metadata_size  = 0;
  76     scopes_data_size = 0;
  77     scopes_pcs_size  = 0;
  78   }
  79 
  80   int total()                                    { return total_size; }
  81   bool is_empty()                                { return count == 0; }
  82 
  83   void print(const char* title) {
  84     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
  85                   count,
  86                   title,
  87                   total() / K,
  88                   header_size             * 100 / total_size,
  89                   relocation_size         * 100 / total_size,
  90                   code_size               * 100 / total_size,
  91                   stub_size               * 100 / total_size,
  92                   scopes_oop_size         * 100 / total_size,
  93                   scopes_metadata_size    * 100 / total_size,
  94                   scopes_data_size        * 100 / total_size,
  95                   scopes_pcs_size         * 100 / total_size);
  96   }
  97 
  98   void add(CodeBlob* cb) {
  99     count++;
 100     total_size       += cb->size();
 101     header_size      += cb->header_size();
 102     relocation_size  += cb->relocation_size();
 103     if (cb->is_nmethod()) {
 104       nmethod* nm = cb->as_nmethod_or_null();
 105       code_size        += nm->insts_size();
 106       stub_size        += nm->stub_size();
 107 
 108       scopes_oop_size  += nm->oops_size();
 109       scopes_metadata_size  += nm->metadata_size();
 110       scopes_data_size += nm->scopes_data_size();
 111       scopes_pcs_size  += nm->scopes_pcs_size();
 112     } else {
 113       code_size        += cb->code_size();
 114     }
 115   }
 116 };
 117 
 118 // CodeCache implementation
 119 
 120 CodeHeap * CodeCache::_heap = new CodeHeap();
 121 int CodeCache::_number_of_blobs = 0;
 122 int CodeCache::_number_of_adapters = 0;
 123 int CodeCache::_number_of_nmethods = 0;
 124 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 125 bool CodeCache::_needs_cache_clean = false;
 126 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 127 
 128 int CodeCache::_codemem_full_count = 0;
 129 
 130 CodeBlob* CodeCache::first() {
 131   assert_locked_or_safepoint(CodeCache_lock);
 132   return (CodeBlob*)_heap->first();
 133 }
 134 
 135 
 136 CodeBlob* CodeCache::next(CodeBlob* cb) {
 137   assert_locked_or_safepoint(CodeCache_lock);
 138   return (CodeBlob*)_heap->next(cb);
 139 }
 140 
 141 
 142 CodeBlob* CodeCache::alive(CodeBlob *cb) {
 143   assert_locked_or_safepoint(CodeCache_lock);
 144   while (cb != NULL && !cb->is_alive()) cb = next(cb);
 145   return cb;
 146 }
 147 
 148 
 149 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
 150   assert_locked_or_safepoint(CodeCache_lock);
 151   while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
 152   return (nmethod*)cb;
 153 }
 154 
 155 nmethod* CodeCache::first_nmethod() {
 156   assert_locked_or_safepoint(CodeCache_lock);
 157   CodeBlob* cb = first();
 158   while (cb != NULL && !cb->is_nmethod()) {
 159     cb = next(cb);
 160   }
 161   return (nmethod*)cb;
 162 }
 163 
 164 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
 165   assert_locked_or_safepoint(CodeCache_lock);
 166   cb = next(cb);
 167   while (cb != NULL && !cb->is_nmethod()) {
 168     cb = next(cb);
 169   }
 170   return (nmethod*)cb;
 171 }
 172 
 173 static size_t maxCodeCacheUsed = 0;
 174 
 175 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
 176   // Do not seize the CodeCache lock here--if the caller has not
 177   // already done so, we are going to lose bigtime, since the code
 178   // cache will contain a garbage CodeBlob until the caller can
 179   // run the constructor for the CodeBlob subclass he is busy
 180   // instantiating.
 181   guarantee(size >= 0, "allocation request must be reasonable");
 182   assert_locked_or_safepoint(CodeCache_lock);
 183   CodeBlob* cb = NULL;
 184   _number_of_blobs++;
 185   while (true) {
 186     cb = (CodeBlob*)_heap->allocate(size, is_critical);
 187     if (cb != NULL) break;
 188     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 189       // Expansion failed
 190       return NULL;
 191     }
 192     if (PrintCodeCacheExtension) {
 193       ResourceMark rm;
 194       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
 195                     (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
 196                     (address)_heap->high() - (address)_heap->low_boundary());
 197     }
 198   }
 199   maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
 200                           (address)_heap->low_boundary()) - unallocated_capacity());
 201   print_trace("allocation", cb, size);
 202   return cb;
 203 }
 204 
 205 void CodeCache::free(CodeBlob* cb) {
 206   assert_locked_or_safepoint(CodeCache_lock);
 207 
 208   print_trace("free", cb);
 209   if (cb->is_nmethod()) {
 210     _number_of_nmethods--;
 211     if (((nmethod *)cb)->has_dependencies()) {
 212       _number_of_nmethods_with_dependencies--;
 213     }
 214   }
 215   if (cb->is_adapter_blob()) {
 216     _number_of_adapters--;
 217   }
 218   _number_of_blobs--;
 219 
 220   _heap->deallocate(cb);
 221 
 222   assert(_number_of_blobs >= 0, "sanity check");
 223 }
 224 
 225 
 226 void CodeCache::commit(CodeBlob* cb) {
 227   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 228   assert_locked_or_safepoint(CodeCache_lock);
 229   if (cb->is_nmethod()) {
 230     _number_of_nmethods++;
 231     if (((nmethod *)cb)->has_dependencies()) {
 232       _number_of_nmethods_with_dependencies++;
 233     }
 234   }
 235   if (cb->is_adapter_blob()) {
 236     _number_of_adapters++;
 237   }
 238 
 239   // flush the hardware I-cache
 240   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 241 }
 242 
 243 
 244 void CodeCache::flush() {
 245   assert_locked_or_safepoint(CodeCache_lock);
 246   Unimplemented();
 247 }
 248 
 249 
 250 // Iteration over CodeBlobs
 251 
 252 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 253 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 254 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 255 
 256 
 257 bool CodeCache::contains(void *p) {
 258   // It should be ok to call contains without holding a lock
 259   return _heap->contains(p);
 260 }
 261 
 262 
 263 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 264 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 265 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 266 CodeBlob* CodeCache::find_blob(void* start) {
 267   CodeBlob* result = find_blob_unsafe(start);
 268   if (result == NULL) return NULL;
 269   // We could potentially look up non_entrant methods
 270   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 271   return result;
 272 }
 273 
 274 nmethod* CodeCache::find_nmethod(void* start) {
 275   CodeBlob *cb = find_blob(start);
 276   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 277   return (nmethod*)cb;
 278 }
 279 
 280 
 281 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 282   assert_locked_or_safepoint(CodeCache_lock);
 283   FOR_ALL_BLOBS(p) {
 284     f(p);
 285   }
 286 }
 287 
 288 
 289 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 290   assert_locked_or_safepoint(CodeCache_lock);
 291   FOR_ALL_BLOBS(nm) {
 292     if (nm->is_nmethod()) f((nmethod*)nm);
 293   }
 294 }
 295 
 296 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
 297   assert_locked_or_safepoint(CodeCache_lock);
 298   FOR_ALL_ALIVE_NMETHODS(nm) {
 299     f(nm);
 300   }
 301 }
 302 
 303 int CodeCache::alignment_unit() {
 304   return (int)_heap->alignment_unit();
 305 }
 306 
 307 
 308 int CodeCache::alignment_offset() {
 309   return (int)_heap->alignment_offset();
 310 }
 311 
 312 
 313 // Mark nmethods for unloading if they contain otherwise unreachable
 314 // oops.
 315 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 316   assert_locked_or_safepoint(CodeCache_lock);
 317   FOR_ALL_ALIVE_NMETHODS(nm) {
 318     nm->do_unloading(is_alive, unloading_occurred);
 319   }
 320 }
 321 
 322 void CodeCache::blobs_do(CodeBlobClosure* f) {
 323   assert_locked_or_safepoint(CodeCache_lock);
 324   FOR_ALL_ALIVE_BLOBS(cb) {
 325     f->do_code_blob(cb);
 326 
 327 #ifdef ASSERT
 328     if (cb->is_nmethod())
 329       ((nmethod*)cb)->verify_scavenge_root_oops();
 330 #endif //ASSERT
 331   }
 332 }
 333 
 334 // Walk the list of methods which might contain non-perm oops.
 335 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
 336   assert_locked_or_safepoint(CodeCache_lock);
 337   debug_only(mark_scavenge_root_nmethods());
 338 
 339   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 340     debug_only(cur->clear_scavenge_root_marked());
 341     assert(cur->scavenge_root_not_marked(), "");
 342     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 343 
 344     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 345 #ifndef PRODUCT
 346     if (TraceScavenge) {
 347       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 348     }
 349 #endif //PRODUCT
 350     if (is_live) {
 351       // Perform cur->oops_do(f), maybe just once per nmethod.
 352       f->do_code_blob(cur);
 353     }
 354   }
 355 
 356   // Check for stray marks.
 357   debug_only(verify_perm_nmethods(NULL));
 358 }
 359 
 360 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 361   assert_locked_or_safepoint(CodeCache_lock);
 362   nm->set_on_scavenge_root_list();
 363   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 364   set_scavenge_root_nmethods(nm);
 365   print_trace("add_scavenge_root", nm);
 366 }
 367 
 368 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 369   assert_locked_or_safepoint(CodeCache_lock);
 370   print_trace("drop_scavenge_root", nm);
 371   nmethod* last = NULL;
 372   nmethod* cur = scavenge_root_nmethods();
 373   while (cur != NULL) {
 374     nmethod* next = cur->scavenge_root_link();
 375     if (cur == nm) {
 376       if (last != NULL)
 377             last->set_scavenge_root_link(next);
 378       else  set_scavenge_root_nmethods(next);
 379       nm->set_scavenge_root_link(NULL);
 380       nm->clear_on_scavenge_root_list();
 381       return;
 382     }
 383     last = cur;
 384     cur = next;
 385   }
 386   assert(false, "should have been on list");
 387 }
 388 
 389 void CodeCache::prune_scavenge_root_nmethods() {
 390   assert_locked_or_safepoint(CodeCache_lock);
 391   debug_only(mark_scavenge_root_nmethods());
 392 
 393   nmethod* last = NULL;
 394   nmethod* cur = scavenge_root_nmethods();
 395   while (cur != NULL) {
 396     nmethod* next = cur->scavenge_root_link();
 397     debug_only(cur->clear_scavenge_root_marked());
 398     assert(cur->scavenge_root_not_marked(), "");
 399     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 400 
 401     if (!cur->is_zombie() && !cur->is_unloaded()
 402         && cur->detect_scavenge_root_oops()) {
 403       // Keep it.  Advance 'last' to prevent deletion.
 404       last = cur;
 405     } else {
 406       // Prune it from the list, so we don't have to look at it any more.
 407       print_trace("prune_scavenge_root", cur);
 408       cur->set_scavenge_root_link(NULL);
 409       cur->clear_on_scavenge_root_list();
 410       if (last != NULL)
 411             last->set_scavenge_root_link(next);
 412       else  set_scavenge_root_nmethods(next);
 413     }
 414     cur = next;
 415   }
 416 
 417   // Check for stray marks.
 418   debug_only(verify_perm_nmethods(NULL));
 419 }
 420 
 421 #ifndef PRODUCT
 422 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 423   // While we are here, verify the integrity of the list.
 424   mark_scavenge_root_nmethods();
 425   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 426     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 427     cur->clear_scavenge_root_marked();
 428   }
 429   verify_perm_nmethods(f);
 430 }
 431 
 432 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 433 void CodeCache::mark_scavenge_root_nmethods() {
 434   FOR_ALL_ALIVE_BLOBS(cb) {
 435     if (cb->is_nmethod()) {
 436       nmethod *nm = (nmethod*)cb;
 437       assert(nm->scavenge_root_not_marked(), "clean state");
 438       if (nm->on_scavenge_root_list())
 439         nm->set_scavenge_root_marked();
 440     }
 441   }
 442 }
 443 
 444 // If the closure is given, run it on the unlisted nmethods.
 445 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 446 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 447   FOR_ALL_ALIVE_BLOBS(cb) {
 448     bool call_f = (f_or_null != NULL);
 449     if (cb->is_nmethod()) {
 450       nmethod *nm = (nmethod*)cb;
 451       assert(nm->scavenge_root_not_marked(), "must be already processed");
 452       if (nm->on_scavenge_root_list())
 453         call_f = false;  // don't show this one to the client
 454       nm->verify_scavenge_root_oops();
 455     } else {
 456       call_f = false;   // not an nmethod
 457     }
 458     if (call_f)  f_or_null->do_code_blob(cb);
 459   }
 460 }
 461 #endif //PRODUCT
 462 
 463 
 464 void CodeCache::gc_prologue() {
 465   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 466 }
 467 
 468 void CodeCache::gc_epilogue() {
 469   assert_locked_or_safepoint(CodeCache_lock);
 470   FOR_ALL_ALIVE_BLOBS(cb) {
 471     if (cb->is_nmethod()) {
 472       nmethod *nm = (nmethod*)cb;
 473       assert(!nm->is_unloaded(), "Tautology");
 474       if (needs_cache_clean()) {
 475         nm->cleanup_inline_caches();
 476       }
 477       DEBUG_ONLY(nm->verify());
 478       nm->fix_oop_relocations();
 479     }
 480   }
 481   set_needs_cache_clean(false);
 482   prune_scavenge_root_nmethods();
 483   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 484 
 485 #ifdef ASSERT
 486   // make sure that we aren't leaking icholders
 487   int count = 0;
 488   FOR_ALL_BLOBS(cb) {
 489     if (cb->is_nmethod()) {
 490       RelocIterator iter((nmethod*)cb);
 491       while(iter.next()) {
 492         if (iter.type() == relocInfo::virtual_call_type) {
 493           if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
 494             CompiledIC *ic = CompiledIC_at(iter.reloc());
 495             if (TraceCompiledIC) {
 496               tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
 497               ic->print();
 498             }
 499             assert(ic->cached_icholder() != NULL, "must be non-NULL");
 500             count++;
 501           }
 502         }
 503       }
 504     }
 505   }
 506 
 507   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 508          CompiledICHolder::live_count(), "must agree");
 509 #endif
 510 }
 511 
 512 
 513 void CodeCache::verify_oops() {
 514   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 515   VerifyOopClosure voc;
 516   FOR_ALL_ALIVE_BLOBS(cb) {
 517     if (cb->is_nmethod()) {
 518       nmethod *nm = (nmethod*)cb;
 519       nm->oops_do(&voc);
 520       nm->verify_oop_relocations();
 521     }
 522   }
 523 }
 524 
 525 
 526 address CodeCache::first_address() {
 527   assert_locked_or_safepoint(CodeCache_lock);
 528   return (address)_heap->low_boundary();
 529 }
 530 
 531 
 532 address CodeCache::last_address() {
 533   assert_locked_or_safepoint(CodeCache_lock);
 534   return (address)_heap->high();
 535 }
 536 
 537 /**
 538  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
 539  * is free, reverse_free_ratio() returns 4.
 540  */
 541 double CodeCache::reverse_free_ratio() {
 542   double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
 543   double max_capacity = (double)CodeCache::max_capacity();
 544   return max_capacity / unallocated_capacity;
 545 }
 546 
 547 void icache_init();
 548 
 549 void CodeCache::initialize() {
 550   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 551 #ifdef COMPILER2
 552   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 553 #endif
 554   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 555   // This was originally just a check of the alignment, causing failure, instead, round
 556   // the code cache to the page size.  In particular, Solaris is moving to a larger
 557   // default page size.
 558   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 559   InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
 560   ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
 561   if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
 562     vm_exit_during_initialization("Could not reserve enough space for code cache");
 563   }
 564 
 565   MemoryService::add_code_heap_memory_pool(_heap);
 566 
 567   // Initialize ICache flush mechanism
 568   // This service is needed for os::register_code_area
 569   icache_init();
 570 
 571   // Give OS a chance to register generated code area.
 572   // This is used on Windows 64 bit platforms to register
 573   // Structured Exception Handlers for our generated code.
 574   os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
 575 }
 576 
 577 
 578 void codeCache_init() {
 579   CodeCache::initialize();
 580 }
 581 
 582 //------------------------------------------------------------------------------------------------
 583 
 584 int CodeCache::number_of_nmethods_with_dependencies() {
 585   return _number_of_nmethods_with_dependencies;
 586 }
 587 
 588 void CodeCache::clear_inline_caches() {
 589   assert_locked_or_safepoint(CodeCache_lock);
 590   FOR_ALL_ALIVE_NMETHODS(nm) {
 591     nm->clear_inline_caches();
 592   }
 593 }
 594 
 595 #ifndef PRODUCT
 596 // Keeps track of time spent for checking dependencies
 597 static elapsedTimer dependentCheckTime;
 598 #endif
 599 
 600 
 601 int CodeCache::mark_for_deoptimization(DepChange& changes) {
 602   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 603   int number_of_marked_CodeBlobs = 0;
 604 
 605   // search the hierarchy looking for nmethods which are affected by the loading of this class
 606 
 607   // then search the interfaces this class implements looking for nmethods
 608   // which might be dependent of the fact that an interface only had one
 609   // implementor.
 610   // nmethod::check_all_dependencies works only correctly, if no safepoint
 611   // can happen
 612   No_Safepoint_Verifier nsv;
 613   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
 614     Klass* d = str.klass();
 615     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
 616   }
 617 
 618 #ifndef PRODUCT
 619   if (VerifyDependencies) {
 620     // Object pointers are used as unique identifiers for dependency arguments. This
 621     // is only possible if no safepoint, i.e., GC occurs during the verification code.
 622     dependentCheckTime.start();
 623     nmethod::check_all_dependencies(changes);
 624     dependentCheckTime.stop();
 625   }
 626 #endif
 627 
 628   return number_of_marked_CodeBlobs;
 629 }
 630 
 631 
 632 #ifdef HOTSWAP
 633 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
 634   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 635   int number_of_marked_CodeBlobs = 0;
 636 
 637   // Deoptimize all methods of the evolving class itself
 638   Array<Method*>* old_methods = dependee->methods();
 639   for (int i = 0; i < old_methods->length(); i++) {
 640     ResourceMark rm;
 641     Method* old_method = old_methods->at(i);
 642     nmethod *nm = old_method->code();
 643     if (nm != NULL) {
 644       nm->mark_for_deoptimization();
 645       number_of_marked_CodeBlobs++;
 646     }
 647   }
 648 
 649   FOR_ALL_ALIVE_NMETHODS(nm) {
 650     if (nm->is_marked_for_deoptimization()) {
 651       // ...Already marked in the previous pass; don't count it again.
 652     } else if (nm->is_evol_dependent_on(dependee())) {
 653       ResourceMark rm;
 654       nm->mark_for_deoptimization();
 655       number_of_marked_CodeBlobs++;
 656     } else  {
 657       // flush caches in case they refer to a redefined Method*
 658       nm->clear_inline_caches();
 659     }
 660   }
 661 
 662   return number_of_marked_CodeBlobs;
 663 }
 664 #endif // HOTSWAP
 665 
 666 
 667 // Deoptimize all methods
 668 void CodeCache::mark_all_nmethods_for_deoptimization() {
 669   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 670   FOR_ALL_ALIVE_NMETHODS(nm) {
 671     nm->mark_for_deoptimization();
 672   }
 673 }
 674 
 675 
 676 int CodeCache::mark_for_deoptimization(Method* dependee) {
 677   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 678   int number_of_marked_CodeBlobs = 0;
 679 
 680   FOR_ALL_ALIVE_NMETHODS(nm) {
 681     if (nm->is_dependent_on_method(dependee)) {
 682       ResourceMark rm;
 683       nm->mark_for_deoptimization();
 684       number_of_marked_CodeBlobs++;
 685     }
 686   }
 687 
 688   return number_of_marked_CodeBlobs;
 689 }
 690 
 691 void CodeCache::make_marked_nmethods_zombies() {
 692   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 693   FOR_ALL_ALIVE_NMETHODS(nm) {
 694     if (nm->is_marked_for_deoptimization()) {
 695 
 696       // If the nmethod has already been made non-entrant and it can be converted
 697       // then zombie it now. Otherwise make it non-entrant and it will eventually
 698       // be zombied when it is no longer seen on the stack. Note that the nmethod
 699       // might be "entrant" and not on the stack and so could be zombied immediately
 700       // but we can't tell because we don't track it on stack until it becomes
 701       // non-entrant.
 702 
 703       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 704         nm->make_zombie();
 705       } else {
 706         nm->make_not_entrant();
 707       }
 708     }
 709   }
 710 }
 711 
 712 void CodeCache::make_marked_nmethods_not_entrant() {
 713   assert_locked_or_safepoint(CodeCache_lock);
 714   FOR_ALL_ALIVE_NMETHODS(nm) {
 715     if (nm->is_marked_for_deoptimization()) {
 716       nm->make_not_entrant();
 717     }
 718   }
 719 }
 720 
 721 void CodeCache::verify() {
 722   _heap->verify();
 723   FOR_ALL_ALIVE_BLOBS(p) {
 724     p->verify();
 725   }
 726 }
 727 
 728 void CodeCache::report_codemem_full() {
 729   _codemem_full_count++;
 730   EventCodeCacheFull event;
 731   if (event.should_commit()) {
 732     event.set_startAddress((u8)low_bound());
 733     event.set_commitedTopAddress((u8)high());
 734     event.set_reservedTopAddress((u8)high_bound());
 735     event.set_entryCount(nof_blobs());
 736     event.set_methodCount(nof_nmethods());
 737     event.set_adaptorCount(nof_adapters());
 738     event.set_unallocatedCapacity(unallocated_capacity()/K);
 739     event.set_fullCount(_codemem_full_count);
 740     event.commit();
 741   }
 742 }
 743 
 744 //------------------------------------------------------------------------------------------------
 745 // Non-product version
 746 
 747 #ifndef PRODUCT
 748 
 749 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 750   if (PrintCodeCache2) {  // Need to add a new flag
 751     ResourceMark rm;
 752     if (size == 0)  size = cb->size();
 753     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
 754   }
 755 }
 756 
 757 void CodeCache::print_internals() {
 758   int nmethodCount = 0;
 759   int runtimeStubCount = 0;
 760   int adapterCount = 0;
 761   int deoptimizationStubCount = 0;
 762   int uncommonTrapStubCount = 0;
 763   int bufferBlobCount = 0;
 764   int total = 0;
 765   int nmethodAlive = 0;
 766   int nmethodNotEntrant = 0;
 767   int nmethodZombie = 0;
 768   int nmethodUnloaded = 0;
 769   int nmethodJava = 0;
 770   int nmethodNative = 0;
 771   int maxCodeSize = 0;
 772   ResourceMark rm;
 773 
 774   CodeBlob *cb;
 775   for (cb = first(); cb != NULL; cb = next(cb)) {
 776     total++;
 777     if (cb->is_nmethod()) {
 778       nmethod* nm = (nmethod*)cb;
 779 
 780       if (Verbose && nm->method() != NULL) {
 781         ResourceMark rm;
 782         char *method_name = nm->method()->name_and_sig_as_C_string();
 783         tty->print("%s", method_name);
 784         if(nm->is_alive()) { tty->print_cr(" alive"); }
 785         if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
 786         if(nm->is_zombie()) { tty->print_cr(" zombie"); }
 787       }
 788 
 789       nmethodCount++;
 790 
 791       if(nm->is_alive()) { nmethodAlive++; }
 792       if(nm->is_not_entrant()) { nmethodNotEntrant++; }
 793       if(nm->is_zombie()) { nmethodZombie++; }
 794       if(nm->is_unloaded()) { nmethodUnloaded++; }
 795       if(nm->is_native_method()) { nmethodNative++; }
 796 
 797       if(nm->method() != NULL && nm->is_java_method()) {
 798         nmethodJava++;
 799         if (nm->insts_size() > maxCodeSize) {
 800           maxCodeSize = nm->insts_size();
 801         }
 802       }
 803     } else if (cb->is_runtime_stub()) {
 804       runtimeStubCount++;
 805     } else if (cb->is_deoptimization_stub()) {
 806       deoptimizationStubCount++;
 807     } else if (cb->is_uncommon_trap_stub()) {
 808       uncommonTrapStubCount++;
 809     } else if (cb->is_adapter_blob()) {
 810       adapterCount++;
 811     } else if (cb->is_buffer_blob()) {
 812       bufferBlobCount++;
 813     }
 814   }
 815 
 816   int bucketSize = 512;
 817   int bucketLimit = maxCodeSize / bucketSize + 1;
 818   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
 819   memset(buckets,0,sizeof(int) * bucketLimit);
 820 
 821   for (cb = first(); cb != NULL; cb = next(cb)) {
 822     if (cb->is_nmethod()) {
 823       nmethod* nm = (nmethod*)cb;
 824       if(nm->is_java_method()) {
 825         buckets[nm->insts_size() / bucketSize]++;
 826       }
 827     }
 828   }
 829   tty->print_cr("Code Cache Entries (total of %d)",total);
 830   tty->print_cr("-------------------------------------------------");
 831   tty->print_cr("nmethods: %d",nmethodCount);
 832   tty->print_cr("\talive: %d",nmethodAlive);
 833   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
 834   tty->print_cr("\tzombie: %d",nmethodZombie);
 835   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
 836   tty->print_cr("\tjava: %d",nmethodJava);
 837   tty->print_cr("\tnative: %d",nmethodNative);
 838   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
 839   tty->print_cr("adapters: %d",adapterCount);
 840   tty->print_cr("buffer blobs: %d",bufferBlobCount);
 841   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
 842   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
 843   tty->print_cr("\nnmethod size distribution (non-zombie java)");
 844   tty->print_cr("-------------------------------------------------");
 845 
 846   for(int i=0; i<bucketLimit; i++) {
 847     if(buckets[i] != 0) {
 848       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
 849       tty->fill_to(40);
 850       tty->print_cr("%d",buckets[i]);
 851     }
 852   }
 853 
 854   FREE_C_HEAP_ARRAY(int, buckets, mtCode);
 855 }
 856 
 857 #endif // !PRODUCT
 858 
 859 void CodeCache::print() {
 860   print_summary(tty);
 861 
 862 #ifndef PRODUCT
 863   if (!Verbose) return;
 864 
 865   CodeBlob_sizes live;
 866   CodeBlob_sizes dead;
 867 
 868   FOR_ALL_BLOBS(p) {
 869     if (!p->is_alive()) {
 870       dead.add(p);
 871     } else {
 872       live.add(p);
 873     }
 874   }
 875 
 876   tty->print_cr("CodeCache:");
 877   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
 878 
 879   if (!live.is_empty()) {
 880     live.print("live");
 881   }
 882   if (!dead.is_empty()) {
 883     dead.print("dead");
 884   }
 885 
 886 
 887   if (WizardMode) {
 888      // print the oop_map usage
 889     int code_size = 0;
 890     int number_of_blobs = 0;
 891     int number_of_oop_maps = 0;
 892     int map_size = 0;
 893     FOR_ALL_BLOBS(p) {
 894       if (p->is_alive()) {
 895         number_of_blobs++;
 896         code_size += p->code_size();
 897         OopMapSet* set = p->oop_maps();
 898         if (set != NULL) {
 899           number_of_oop_maps += set->size();
 900           map_size           += set->heap_size();
 901         }
 902       }
 903     }
 904     tty->print_cr("OopMaps");
 905     tty->print_cr("  #blobs    = %d", number_of_blobs);
 906     tty->print_cr("  code size = %d", code_size);
 907     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
 908     tty->print_cr("  map size  = %d", map_size);
 909   }
 910 
 911 #endif // !PRODUCT
 912 }
 913 
 914 void CodeCache::print_summary(outputStream* st, bool detailed) {
 915   size_t total = (_heap->high_boundary() - _heap->low_boundary());
 916   st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
 917                "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
 918                total/K, (total - unallocated_capacity())/K,
 919                maxCodeCacheUsed/K, unallocated_capacity()/K);
 920 
 921   if (detailed) {
 922     st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
 923                  _heap->low_boundary(),
 924                  _heap->high(),
 925                  _heap->high_boundary());
 926     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
 927                  " adapters=" UINT32_FORMAT,
 928                  nof_blobs(), nof_nmethods(), nof_adapters());
 929     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
 930                  "enabled" : Arguments::mode() == Arguments::_int ?
 931                  "disabled (interpreter mode)" :
 932                  "disabled (not enough contiguous free space left)");
 933   }
 934 }
 935 
 936 void CodeCache::log_state(outputStream* st) {
 937   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
 938             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
 939             nof_blobs(), nof_nmethods(), nof_adapters(),
 940             unallocated_capacity());
 941 }
 942