1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/dependencies.hpp"
  29 #include "code/nmethod.hpp"
  30 #include "code/pcDesc.hpp"
  31 #include "gc_implementation/shared/markSweep.hpp"
  32 #include "memory/allocation.inline.hpp"
  33 #include "memory/gcLocker.hpp"
  34 #include "memory/iterator.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/methodOop.hpp"
  37 #include "oops/objArrayOop.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/icache.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/mutexLocker.hpp"
  43 #include "services/memoryService.hpp"
  44 #include "utilities/xmlstream.hpp"
  45 
  46 // Helper class for printing in CodeCache
  47 
  48 class CodeBlob_sizes {
  49  private:
  50   int count;
  51   int total_size;
  52   int header_size;
  53   int code_size;
  54   int stub_size;
  55   int relocation_size;
  56   int scopes_oop_size;
  57   int scopes_data_size;
  58   int scopes_pcs_size;
  59 
  60  public:
  61   CodeBlob_sizes() {
  62     count            = 0;
  63     total_size       = 0;
  64     header_size      = 0;
  65     code_size        = 0;
  66     stub_size        = 0;
  67     relocation_size  = 0;
  68     scopes_oop_size  = 0;
  69     scopes_data_size = 0;
  70     scopes_pcs_size  = 0;
  71   }
  72 
  73   int total()                                    { return total_size; }
  74   bool is_empty()                                { return count == 0; }
  75 
  76   void print(const char* title) {
  77     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
  78                   count,
  79                   title,
  80                   total() / K,
  81                   header_size             * 100 / total_size,
  82                   relocation_size         * 100 / total_size,
  83                   code_size               * 100 / total_size,
  84                   stub_size               * 100 / total_size,
  85                   scopes_oop_size         * 100 / total_size,
  86                   scopes_data_size        * 100 / total_size,
  87                   scopes_pcs_size         * 100 / total_size);
  88   }
  89 
  90   void add(CodeBlob* cb) {
  91     count++;
  92     total_size       += cb->size();
  93     header_size      += cb->header_size();
  94     relocation_size  += cb->relocation_size();
  95     if (cb->is_nmethod()) {
  96       nmethod* nm = cb->as_nmethod_or_null();
  97       code_size        += nm->insts_size();
  98       stub_size        += nm->stub_size();
  99 
 100       scopes_oop_size  += nm->oops_size();
 101       scopes_data_size += nm->scopes_data_size();
 102       scopes_pcs_size  += nm->scopes_pcs_size();
 103     } else {
 104       code_size        += cb->code_size();
 105     }
 106   }
 107 };
 108 
 109 
 110 // CodeCache implementation
 111 
 112 CodeHeap * CodeCache::_heap = new CodeHeap();
 113 int CodeCache::_number_of_blobs = 0;
 114 int CodeCache::_number_of_adapters = 0;
 115 int CodeCache::_number_of_nmethods = 0;
 116 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 117 bool CodeCache::_needs_cache_clean = false;
 118 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 119 nmethod* CodeCache::_saved_nmethods = NULL;
 120 
 121 
 122 CodeBlob* CodeCache::first() {
 123   assert_locked_or_safepoint(CodeCache_lock);
 124   return (CodeBlob*)_heap->first();
 125 }
 126 
 127 
 128 CodeBlob* CodeCache::next(CodeBlob* cb) {
 129   assert_locked_or_safepoint(CodeCache_lock);
 130   return (CodeBlob*)_heap->next(cb);
 131 }
 132 
 133 
 134 CodeBlob* CodeCache::alive(CodeBlob *cb) {
 135   assert_locked_or_safepoint(CodeCache_lock);
 136   while (cb != NULL && !cb->is_alive()) cb = next(cb);
 137   return cb;
 138 }
 139 
 140 
 141 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
 142   assert_locked_or_safepoint(CodeCache_lock);
 143   while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
 144   return (nmethod*)cb;
 145 }
 146 
 147 nmethod* CodeCache::first_nmethod() {
 148   assert_locked_or_safepoint(CodeCache_lock);
 149   CodeBlob* cb = first();
 150   while (cb != NULL && !cb->is_nmethod()) {
 151     cb = next(cb);
 152   }
 153   return (nmethod*)cb;
 154 }
 155 
 156 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
 157   assert_locked_or_safepoint(CodeCache_lock);
 158   cb = next(cb);
 159   while (cb != NULL && !cb->is_nmethod()) {
 160     cb = next(cb);
 161   }
 162   return (nmethod*)cb;
 163 }
 164 
 165 CodeBlob* CodeCache::allocate(int size) {
 166   // Do not seize the CodeCache lock here--if the caller has not
 167   // already done so, we are going to lose bigtime, since the code
 168   // cache will contain a garbage CodeBlob until the caller can
 169   // run the constructor for the CodeBlob subclass he is busy
 170   // instantiating.
 171   guarantee(size >= 0, "allocation request must be reasonable");
 172   assert_locked_or_safepoint(CodeCache_lock);
 173   CodeBlob* cb = NULL;
 174   _number_of_blobs++;
 175   while (true) {
 176     cb = (CodeBlob*)_heap->allocate(size);
 177     if (cb != NULL) break;
 178     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 179       // Expansion failed
 180       return NULL;
 181     }
 182     if (PrintCodeCacheExtension) {
 183       ResourceMark rm;
 184       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
 185                     (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
 186                     (address)_heap->end() - (address)_heap->begin());
 187     }
 188   }
 189   verify_if_often();
 190   print_trace("allocation", cb, size);
 191   return cb;
 192 }
 193 
 194 void CodeCache::free(CodeBlob* cb) {
 195   assert_locked_or_safepoint(CodeCache_lock);
 196   verify_if_often();
 197 
 198   print_trace("free", cb);
 199   if (cb->is_nmethod()) {
 200     _number_of_nmethods--;
 201     if (((nmethod *)cb)->has_dependencies()) {
 202       _number_of_nmethods_with_dependencies--;
 203     }
 204   }
 205   if (cb->is_adapter_blob()) {
 206     _number_of_adapters--;
 207   }
 208   _number_of_blobs--;
 209 
 210   _heap->deallocate(cb);
 211 
 212   verify_if_often();
 213   assert(_number_of_blobs >= 0, "sanity check");
 214 }
 215 
 216 
 217 void CodeCache::commit(CodeBlob* cb) {
 218   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 219   assert_locked_or_safepoint(CodeCache_lock);
 220   if (cb->is_nmethod()) {
 221     _number_of_nmethods++;
 222     if (((nmethod *)cb)->has_dependencies()) {
 223       _number_of_nmethods_with_dependencies++;
 224     }
 225   }
 226   if (cb->is_adapter_blob()) {
 227     _number_of_adapters++;
 228   }
 229 
 230   // flush the hardware I-cache
 231   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 232 }
 233 
 234 
 235 void CodeCache::flush() {
 236   assert_locked_or_safepoint(CodeCache_lock);
 237   Unimplemented();
 238 }
 239 
 240 
 241 // Iteration over CodeBlobs
 242 
 243 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 244 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 245 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 246 
 247 
 248 bool CodeCache::contains(void *p) {
 249   // It should be ok to call contains without holding a lock
 250   return _heap->contains(p);
 251 }
 252 
 253 
 254 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 255 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 256 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 257 CodeBlob* CodeCache::find_blob(void* start) {
 258   CodeBlob* result = find_blob_unsafe(start);
 259   if (result == NULL) return NULL;
 260   // We could potientially look up non_entrant methods
 261   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 262   return result;
 263 }
 264 
 265 nmethod* CodeCache::find_nmethod(void* start) {
 266   CodeBlob *cb = find_blob(start);
 267   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 268   return (nmethod*)cb;
 269 }
 270 
 271 
 272 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 273   assert_locked_or_safepoint(CodeCache_lock);
 274   FOR_ALL_BLOBS(p) {
 275     f(p);
 276   }
 277 }
 278 
 279 
 280 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 281   assert_locked_or_safepoint(CodeCache_lock);
 282   FOR_ALL_BLOBS(nm) {
 283     if (nm->is_nmethod()) f((nmethod*)nm);
 284   }
 285 }
 286 
 287 
 288 int CodeCache::alignment_unit() {
 289   return (int)_heap->alignment_unit();
 290 }
 291 
 292 
 293 int CodeCache::alignment_offset() {
 294   return (int)_heap->alignment_offset();
 295 }
 296 
 297 
 298 // Mark nmethods for unloading if they contain otherwise unreachable
 299 // oops.
 300 void CodeCache::do_unloading(BoolObjectClosure* is_alive,
 301                              OopClosure* keep_alive,
 302                              bool unloading_occurred) {
 303   assert_locked_or_safepoint(CodeCache_lock);
 304   FOR_ALL_ALIVE_NMETHODS(nm) {
 305     nm->do_unloading(is_alive, keep_alive, unloading_occurred);
 306   }
 307 }
 308 
 309 void CodeCache::blobs_do(CodeBlobClosure* f) {
 310   assert_locked_or_safepoint(CodeCache_lock);
 311   FOR_ALL_ALIVE_BLOBS(cb) {
 312     f->do_code_blob(cb);
 313 
 314 #ifdef ASSERT
 315     if (cb->is_nmethod())
 316       ((nmethod*)cb)->verify_scavenge_root_oops();
 317 #endif //ASSERT
 318   }
 319 }
 320 
 321 // Walk the list of methods which might contain non-perm oops.
 322 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
 323   assert_locked_or_safepoint(CodeCache_lock);
 324   debug_only(mark_scavenge_root_nmethods());
 325 
 326   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 327     debug_only(cur->clear_scavenge_root_marked());
 328     assert(cur->scavenge_root_not_marked(), "");
 329     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 330 
 331     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 332 #ifndef PRODUCT
 333     if (TraceScavenge) {
 334       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 335     }
 336 #endif //PRODUCT
 337     if (is_live) {
 338       // Perform cur->oops_do(f), maybe just once per nmethod.
 339       f->do_code_blob(cur);
 340     }
 341   }
 342 
 343   // Check for stray marks.
 344   debug_only(verify_perm_nmethods(NULL));
 345 }
 346 
 347 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 348   assert_locked_or_safepoint(CodeCache_lock);
 349   nm->set_on_scavenge_root_list();
 350   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 351   set_scavenge_root_nmethods(nm);
 352   print_trace("add_scavenge_root", nm);
 353 }
 354 
 355 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 356   assert_locked_or_safepoint(CodeCache_lock);
 357   print_trace("drop_scavenge_root", nm);
 358   nmethod* last = NULL;
 359   nmethod* cur = scavenge_root_nmethods();
 360   while (cur != NULL) {
 361     nmethod* next = cur->scavenge_root_link();
 362     if (cur == nm) {
 363       if (last != NULL)
 364             last->set_scavenge_root_link(next);
 365       else  set_scavenge_root_nmethods(next);
 366       nm->set_scavenge_root_link(NULL);
 367       nm->clear_on_scavenge_root_list();
 368       return;
 369     }
 370     last = cur;
 371     cur = next;
 372   }
 373   assert(false, "should have been on list");
 374 }
 375 
 376 void CodeCache::prune_scavenge_root_nmethods() {
 377   assert_locked_or_safepoint(CodeCache_lock);
 378   debug_only(mark_scavenge_root_nmethods());
 379 
 380   nmethod* last = NULL;
 381   nmethod* cur = scavenge_root_nmethods();
 382   while (cur != NULL) {
 383     nmethod* next = cur->scavenge_root_link();
 384     debug_only(cur->clear_scavenge_root_marked());
 385     assert(cur->scavenge_root_not_marked(), "");
 386     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 387 
 388     if (!cur->is_zombie() && !cur->is_unloaded()
 389         && cur->detect_scavenge_root_oops()) {
 390       // Keep it.  Advance 'last' to prevent deletion.
 391       last = cur;
 392     } else {
 393       // Prune it from the list, so we don't have to look at it any more.
 394       print_trace("prune_scavenge_root", cur);
 395       cur->set_scavenge_root_link(NULL);
 396       cur->clear_on_scavenge_root_list();
 397       if (last != NULL)
 398             last->set_scavenge_root_link(next);
 399       else  set_scavenge_root_nmethods(next);
 400     }
 401     cur = next;
 402   }
 403 
 404   // Check for stray marks.
 405   debug_only(verify_perm_nmethods(NULL));
 406 }
 407 
 408 #ifndef PRODUCT
 409 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 410   // While we are here, verify the integrity of the list.
 411   mark_scavenge_root_nmethods();
 412   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 413     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 414     cur->clear_scavenge_root_marked();
 415   }
 416   verify_perm_nmethods(f);
 417 }
 418 
 419 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 420 void CodeCache::mark_scavenge_root_nmethods() {
 421   FOR_ALL_ALIVE_BLOBS(cb) {
 422     if (cb->is_nmethod()) {
 423       nmethod *nm = (nmethod*)cb;
 424       assert(nm->scavenge_root_not_marked(), "clean state");
 425       if (nm->on_scavenge_root_list())
 426         nm->set_scavenge_root_marked();
 427     }
 428   }
 429 }
 430 
 431 // If the closure is given, run it on the unlisted nmethods.
 432 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 433 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 434   FOR_ALL_ALIVE_BLOBS(cb) {
 435     bool call_f = (f_or_null != NULL);
 436     if (cb->is_nmethod()) {
 437       nmethod *nm = (nmethod*)cb;
 438       assert(nm->scavenge_root_not_marked(), "must be already processed");
 439       if (nm->on_scavenge_root_list())
 440         call_f = false;  // don't show this one to the client
 441       nm->verify_scavenge_root_oops();
 442     } else {
 443       call_f = false;   // not an nmethod
 444     }
 445     if (call_f)  f_or_null->do_code_blob(cb);
 446   }
 447 }
 448 #endif //PRODUCT
 449 
 450 
 451 nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
 452   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 453   nmethod* saved = _saved_nmethods;
 454   nmethod* prev = NULL;
 455   while (saved != NULL) {
 456     if (saved->is_in_use() && saved->method() == m) {
 457       if (prev != NULL) {
 458         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 459       } else {
 460         _saved_nmethods = saved->saved_nmethod_link();
 461       }
 462       assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 463       saved->set_speculatively_disconnected(false);
 464       saved->set_saved_nmethod_link(NULL);
 465       if (PrintMethodFlushing) {
 466         saved->print_on(tty, " ### nmethod is reconnected\n");
 467       }
 468       if (LogCompilation && (xtty != NULL)) {
 469         ttyLocker ttyl;
 470         xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
 471         xtty->method(methodOop(m));
 472         xtty->stamp();
 473         xtty->end_elem();
 474       }
 475       return saved;
 476     }
 477     prev = saved;
 478     saved = saved->saved_nmethod_link();
 479   }
 480   return NULL;
 481 }
 482 
 483 void CodeCache::remove_saved_code(nmethod* nm) {
 484   // For conc swpr this will be called with CodeCache_lock taken by caller
 485   assert_locked_or_safepoint(CodeCache_lock);
 486   assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
 487   nmethod* saved = _saved_nmethods;
 488   nmethod* prev = NULL;
 489   while (saved != NULL) {
 490     if (saved == nm) {
 491       if (prev != NULL) {
 492         prev->set_saved_nmethod_link(saved->saved_nmethod_link());
 493       } else {
 494         _saved_nmethods = saved->saved_nmethod_link();
 495       }
 496       if (LogCompilation && (xtty != NULL)) {
 497         ttyLocker ttyl;
 498         xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
 499         xtty->stamp();
 500         xtty->end_elem();
 501       }
 502       return;
 503     }
 504     prev = saved;
 505     saved = saved->saved_nmethod_link();
 506   }
 507   ShouldNotReachHere();
 508 }
 509 
 510 void CodeCache::speculatively_disconnect(nmethod* nm) {
 511   assert_locked_or_safepoint(CodeCache_lock);
 512   assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
 513   nm->set_saved_nmethod_link(_saved_nmethods);
 514   _saved_nmethods = nm;
 515   if (PrintMethodFlushing) {
 516     nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
 517   }
 518   if (LogCompilation && (xtty != NULL)) {
 519     ttyLocker ttyl;
 520     xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
 521     xtty->method(methodOop(nm->method()));
 522     xtty->stamp();
 523     xtty->end_elem();
 524   }
 525   nm->method()->clear_code();
 526   nm->set_speculatively_disconnected(true);
 527 }
 528 
 529 
 530 void CodeCache::gc_prologue() {
 531   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 532 }
 533 
 534 
 535 void CodeCache::gc_epilogue() {
 536   assert_locked_or_safepoint(CodeCache_lock);
 537   FOR_ALL_ALIVE_BLOBS(cb) {
 538     if (cb->is_nmethod()) {
 539       nmethod *nm = (nmethod*)cb;
 540       assert(!nm->is_unloaded(), "Tautology");
 541       if (needs_cache_clean()) {
 542         nm->cleanup_inline_caches();
 543       }
 544       DEBUG_ONLY(nm->verify());
 545       nm->fix_oop_relocations();
 546     }
 547   }
 548   set_needs_cache_clean(false);
 549   prune_scavenge_root_nmethods();
 550   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
 551 }
 552 
 553 
 554 void CodeCache::verify_oops() {
 555   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 556   VerifyOopClosure voc;
 557   FOR_ALL_ALIVE_BLOBS(cb) {
 558     if (cb->is_nmethod()) {
 559       nmethod *nm = (nmethod*)cb;
 560       nm->oops_do(&voc);
 561       nm->verify_oop_relocations();
 562     }
 563   }
 564 }
 565 
 566 
 567 address CodeCache::first_address() {
 568   assert_locked_or_safepoint(CodeCache_lock);
 569   return (address)_heap->begin();
 570 }
 571 
 572 
 573 address CodeCache::last_address() {
 574   assert_locked_or_safepoint(CodeCache_lock);
 575   return (address)_heap->end();
 576 }
 577 
 578 
 579 void icache_init();
 580 
 581 void CodeCache::initialize() {
 582   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 583 #ifdef COMPILER2
 584   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 585 #endif
 586   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 587   // This was originally just a check of the alignment, causing failure, instead, round
 588   // the code cache to the page size.  In particular, Solaris is moving to a larger
 589   // default page size.
 590   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 591   InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
 592   ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
 593   if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
 594     vm_exit_during_initialization("Could not reserve enough space for code cache");
 595   }
 596 
 597   MemoryService::add_code_heap_memory_pool(_heap);
 598 
 599   // Initialize ICache flush mechanism
 600   // This service is needed for os::register_code_area
 601   icache_init();
 602 
 603   // Give OS a chance to register generated code area.
 604   // This is used on Windows 64 bit platforms to register
 605   // Structured Exception Handlers for our generated code.
 606   os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
 607 }
 608 
 609 
 610 void codeCache_init() {
 611   CodeCache::initialize();
 612 }
 613 
 614 //------------------------------------------------------------------------------------------------
 615 
 616 int CodeCache::number_of_nmethods_with_dependencies() {
 617   return _number_of_nmethods_with_dependencies;
 618 }
 619 
 620 void CodeCache::clear_inline_caches() {
 621   assert_locked_or_safepoint(CodeCache_lock);
 622   FOR_ALL_ALIVE_NMETHODS(nm) {
 623     nm->clear_inline_caches();
 624   }
 625 }
 626 
 627 #ifndef PRODUCT
 628 // used to keep track of how much time is spent in mark_for_deoptimization
 629 static elapsedTimer dependentCheckTime;
 630 static int dependentCheckCount = 0;
 631 #endif // PRODUCT
 632 
 633 
 634 int CodeCache::mark_for_deoptimization(DepChange& changes) {
 635   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 636 
 637 #ifndef PRODUCT
 638   dependentCheckTime.start();
 639   dependentCheckCount++;
 640 #endif // PRODUCT
 641 
 642   int number_of_marked_CodeBlobs = 0;
 643 
 644   // search the hierarchy looking for nmethods which are affected by the loading of this class
 645 
 646   // then search the interfaces this class implements looking for nmethods
 647   // which might be dependent of the fact that an interface only had one
 648   // implementor.
 649 
 650   { No_Safepoint_Verifier nsv;
 651     for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
 652       klassOop d = str.klass();
 653       number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
 654     }
 655   }
 656 
 657   if (VerifyDependencies) {
 658     // Turn off dependency tracing while actually testing deps.
 659     NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
 660     FOR_ALL_ALIVE_NMETHODS(nm) {
 661       if (!nm->is_marked_for_deoptimization() &&
 662           nm->check_all_dependencies()) {
 663         ResourceMark rm;
 664         tty->print_cr("Should have been marked for deoptimization:");
 665         changes.print();
 666         nm->print();
 667         nm->print_dependencies();
 668       }
 669     }
 670   }
 671 
 672 #ifndef PRODUCT
 673   dependentCheckTime.stop();
 674 #endif // PRODUCT
 675 
 676   return number_of_marked_CodeBlobs;
 677 }
 678 
 679 
 680 #ifdef HOTSWAP
 681 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
 682   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 683   int number_of_marked_CodeBlobs = 0;
 684 
 685   // Deoptimize all methods of the evolving class itself
 686   objArrayOop old_methods = dependee->methods();
 687   for (int i = 0; i < old_methods->length(); i++) {
 688     ResourceMark rm;
 689     methodOop old_method = (methodOop) old_methods->obj_at(i);
 690     nmethod *nm = old_method->code();
 691     if (nm != NULL) {
 692       nm->mark_for_deoptimization();
 693       number_of_marked_CodeBlobs++;
 694     }
 695   }
 696 
 697   FOR_ALL_ALIVE_NMETHODS(nm) {
 698     if (nm->is_marked_for_deoptimization()) {
 699       // ...Already marked in the previous pass; don't count it again.
 700     } else if (nm->is_evol_dependent_on(dependee())) {
 701       ResourceMark rm;
 702       nm->mark_for_deoptimization();
 703       number_of_marked_CodeBlobs++;
 704     } else  {
 705       // flush caches in case they refer to a redefined methodOop
 706       nm->clear_inline_caches();
 707     }
 708   }
 709 
 710   return number_of_marked_CodeBlobs;
 711 }
 712 #endif // HOTSWAP
 713 
 714 
 715 // Deoptimize all methods
 716 void CodeCache::mark_all_nmethods_for_deoptimization() {
 717   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 718   FOR_ALL_ALIVE_NMETHODS(nm) {
 719     nm->mark_for_deoptimization();
 720   }
 721 }
 722 
 723 
 724 int CodeCache::mark_for_deoptimization(methodOop dependee) {
 725   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 726   int number_of_marked_CodeBlobs = 0;
 727 
 728   FOR_ALL_ALIVE_NMETHODS(nm) {
 729     if (nm->is_dependent_on_method(dependee)) {
 730       ResourceMark rm;
 731       nm->mark_for_deoptimization();
 732       number_of_marked_CodeBlobs++;
 733     }
 734   }
 735 
 736   return number_of_marked_CodeBlobs;
 737 }
 738 
 739 void CodeCache::make_marked_nmethods_zombies() {
 740   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 741   FOR_ALL_ALIVE_NMETHODS(nm) {
 742     if (nm->is_marked_for_deoptimization()) {
 743 
 744       // If the nmethod has already been made non-entrant and it can be converted
 745       // then zombie it now. Otherwise make it non-entrant and it will eventually
 746       // be zombied when it is no longer seen on the stack. Note that the nmethod
 747       // might be "entrant" and not on the stack and so could be zombied immediately
 748       // but we can't tell because we don't track it on stack until it becomes
 749       // non-entrant.
 750 
 751       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 752         nm->make_zombie();
 753       } else {
 754         nm->make_not_entrant();
 755       }
 756     }
 757   }
 758 }
 759 
 760 void CodeCache::make_marked_nmethods_not_entrant() {
 761   assert_locked_or_safepoint(CodeCache_lock);
 762   FOR_ALL_ALIVE_NMETHODS(nm) {
 763     if (nm->is_marked_for_deoptimization()) {
 764       nm->make_not_entrant();
 765     }
 766   }
 767 }
 768 
 769 void CodeCache::verify() {
 770   _heap->verify();
 771   FOR_ALL_ALIVE_BLOBS(p) {
 772     p->verify();
 773   }
 774 }
 775 
 776 //------------------------------------------------------------------------------------------------
 777 // Non-product version
 778 
 779 #ifndef PRODUCT
 780 
 781 void CodeCache::verify_if_often() {
 782   if (VerifyCodeCacheOften) {
 783     _heap->verify();
 784   }
 785 }
 786 
 787 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 788   if (PrintCodeCache2) {  // Need to add a new flag
 789     ResourceMark rm;
 790     if (size == 0)  size = cb->size();
 791     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
 792   }
 793 }
 794 
 795 void CodeCache::print_internals() {
 796   int nmethodCount = 0;
 797   int runtimeStubCount = 0;
 798   int adapterCount = 0;
 799   int deoptimizationStubCount = 0;
 800   int uncommonTrapStubCount = 0;
 801   int bufferBlobCount = 0;
 802   int total = 0;
 803   int nmethodAlive = 0;
 804   int nmethodNotEntrant = 0;
 805   int nmethodZombie = 0;
 806   int nmethodUnloaded = 0;
 807   int nmethodJava = 0;
 808   int nmethodNative = 0;
 809   int maxCodeSize = 0;
 810   ResourceMark rm;
 811 
 812   CodeBlob *cb;
 813   for (cb = first(); cb != NULL; cb = next(cb)) {
 814     total++;
 815     if (cb->is_nmethod()) {
 816       nmethod* nm = (nmethod*)cb;
 817 
 818       if (Verbose && nm->method() != NULL) {
 819         ResourceMark rm;
 820         char *method_name = nm->method()->name_and_sig_as_C_string();
 821         tty->print("%s", method_name);
 822         if(nm->is_alive()) { tty->print_cr(" alive"); }
 823         if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
 824         if(nm->is_zombie()) { tty->print_cr(" zombie"); }
 825       }
 826 
 827       nmethodCount++;
 828 
 829       if(nm->is_alive()) { nmethodAlive++; }
 830       if(nm->is_not_entrant()) { nmethodNotEntrant++; }
 831       if(nm->is_zombie()) { nmethodZombie++; }
 832       if(nm->is_unloaded()) { nmethodUnloaded++; }
 833       if(nm->is_native_method()) { nmethodNative++; }
 834 
 835       if(nm->method() != NULL && nm->is_java_method()) {
 836         nmethodJava++;
 837         if (nm->insts_size() > maxCodeSize) {
 838           maxCodeSize = nm->insts_size();
 839         }
 840       }
 841     } else if (cb->is_runtime_stub()) {
 842       runtimeStubCount++;
 843     } else if (cb->is_deoptimization_stub()) {
 844       deoptimizationStubCount++;
 845     } else if (cb->is_uncommon_trap_stub()) {
 846       uncommonTrapStubCount++;
 847     } else if (cb->is_adapter_blob()) {
 848       adapterCount++;
 849     } else if (cb->is_buffer_blob()) {
 850       bufferBlobCount++;
 851     }
 852   }
 853 
 854   int bucketSize = 512;
 855   int bucketLimit = maxCodeSize / bucketSize + 1;
 856   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
 857   memset(buckets,0,sizeof(int) * bucketLimit);
 858 
 859   for (cb = first(); cb != NULL; cb = next(cb)) {
 860     if (cb->is_nmethod()) {
 861       nmethod* nm = (nmethod*)cb;
 862       if(nm->is_java_method()) {
 863         buckets[nm->insts_size() / bucketSize]++;
 864       }
 865     }
 866   }
 867   tty->print_cr("Code Cache Entries (total of %d)",total);
 868   tty->print_cr("-------------------------------------------------");
 869   tty->print_cr("nmethods: %d",nmethodCount);
 870   tty->print_cr("\talive: %d",nmethodAlive);
 871   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
 872   tty->print_cr("\tzombie: %d",nmethodZombie);
 873   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
 874   tty->print_cr("\tjava: %d",nmethodJava);
 875   tty->print_cr("\tnative: %d",nmethodNative);
 876   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
 877   tty->print_cr("adapters: %d",adapterCount);
 878   tty->print_cr("buffer blobs: %d",bufferBlobCount);
 879   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
 880   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
 881   tty->print_cr("\nnmethod size distribution (non-zombie java)");
 882   tty->print_cr("-------------------------------------------------");
 883 
 884   for(int i=0; i<bucketLimit; i++) {
 885     if(buckets[i] != 0) {
 886       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
 887       tty->fill_to(40);
 888       tty->print_cr("%d",buckets[i]);
 889     }
 890   }
 891 
 892   FREE_C_HEAP_ARRAY(int, buckets);
 893 }
 894 
 895 void CodeCache::print() {
 896   CodeBlob_sizes live;
 897   CodeBlob_sizes dead;
 898 
 899   FOR_ALL_BLOBS(p) {
 900     if (!p->is_alive()) {
 901       dead.add(p);
 902     } else {
 903       live.add(p);
 904     }
 905   }
 906 
 907   tty->print_cr("CodeCache:");
 908 
 909   tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
 910                 dependentCheckTime.seconds() / dependentCheckCount);
 911 
 912   if (!live.is_empty()) {
 913     live.print("live");
 914   }
 915   if (!dead.is_empty()) {
 916     dead.print("dead");
 917   }
 918 
 919 
 920   if (Verbose) {
 921      // print the oop_map usage
 922     int code_size = 0;
 923     int number_of_blobs = 0;
 924     int number_of_oop_maps = 0;
 925     int map_size = 0;
 926     FOR_ALL_BLOBS(p) {
 927       if (p->is_alive()) {
 928         number_of_blobs++;
 929         code_size += p->code_size();
 930         OopMapSet* set = p->oop_maps();
 931         if (set != NULL) {
 932           number_of_oop_maps += set->size();
 933           map_size           += set->heap_size();
 934         }
 935       }
 936     }
 937     tty->print_cr("OopMaps");
 938     tty->print_cr("  #blobs    = %d", number_of_blobs);
 939     tty->print_cr("  code size = %d", code_size);
 940     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
 941     tty->print_cr("  map size  = %d", map_size);
 942   }
 943 
 944 }
 945 
 946 #endif // PRODUCT
 947 
 948 void CodeCache::print_bounds(outputStream* st) {
 949   st->print_cr("Code Cache  [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 950                _heap->low_boundary(),
 951                _heap->high(),
 952                _heap->high_boundary());
 953   st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
 954                " adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT "Kb"
 955                " largest_free_block=" SIZE_FORMAT,
 956                nof_blobs(), nof_nmethods(), nof_adapters(),
 957                unallocated_capacity()/K, largest_free_block());
 958 }
 959 
 960 void CodeCache::log_state(outputStream* st) {
 961   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
 962             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
 963             " largest_free_block='" SIZE_FORMAT "'",
 964             nof_blobs(), nof_nmethods(), nof_adapters(),
 965             unallocated_capacity(), largest_free_block());
 966 }
 967 
 968 size_t CodeCache::largest_free_block() {
 969   // This is called both with and without CodeCache_lock held so
 970   // handle both cases.
 971   if (CodeCache_lock->owned_by_self()) {
 972     return _heap->largest_free_block();
 973   } else {
 974     // Avoid lock ordering problems with ttyLock.
 975     ttyUnlocker ttyul;
 976     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 977     return _heap->largest_free_block();
 978   }
 979 }