1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "gc_implementation/shared/markSweep.hpp"
  35 #include "jfr/jfrEvents.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/gcLocker.hpp"
  38 #include "memory/iterator.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "oops/method.hpp"
  41 #include "oops/objArrayOop.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/arguments.hpp"
  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/icache.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "utilities/xmlstream.hpp"
  51 
  52 
  53 // Helper class for printing in CodeCache
  54 
  55 class CodeBlob_sizes {
  56  private:
  57   int count;
  58   int total_size;
  59   int header_size;
  60   int code_size;
  61   int stub_size;
  62   int relocation_size;
  63   int scopes_oop_size;
  64   int scopes_metadata_size;
  65   int scopes_data_size;
  66   int scopes_pcs_size;
  67 
  68  public:
  69   CodeBlob_sizes() {
  70     count            = 0;
  71     total_size       = 0;
  72     header_size      = 0;
  73     code_size        = 0;
  74     stub_size        = 0;
  75     relocation_size  = 0;
  76     scopes_oop_size  = 0;
  77     scopes_metadata_size  = 0;
  78     scopes_data_size = 0;
  79     scopes_pcs_size  = 0;
  80   }
  81 
  82   int total()                                    { return total_size; }
  83   bool is_empty()                                { return count == 0; }
  84 
  85   void print(const char* title) {
  86     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
  87                   count,
  88                   title,
  89                   (int)(total() / K),
  90                   header_size             * 100 / total_size,
  91                   relocation_size         * 100 / total_size,
  92                   code_size               * 100 / total_size,
  93                   stub_size               * 100 / total_size,
  94                   scopes_oop_size         * 100 / total_size,
  95                   scopes_metadata_size    * 100 / total_size,
  96                   scopes_data_size        * 100 / total_size,
  97                   scopes_pcs_size         * 100 / total_size);
  98   }
  99 
 100   void add(CodeBlob* cb) {
 101     count++;
 102     total_size       += cb->size();
 103     header_size      += cb->header_size();
 104     relocation_size  += cb->relocation_size();
 105     if (cb->is_nmethod()) {
 106       nmethod* nm = cb->as_nmethod_or_null();
 107       code_size        += nm->insts_size();
 108       stub_size        += nm->stub_size();
 109 
 110       scopes_oop_size  += nm->oops_size();
 111       scopes_metadata_size  += nm->metadata_size();
 112       scopes_data_size += nm->scopes_data_size();
 113       scopes_pcs_size  += nm->scopes_pcs_size();
 114     } else {
 115       code_size        += cb->code_size();
 116     }
 117   }
 118 };
 119 
 120 // CodeCache implementation
 121 
 122 CodeHeap * CodeCache::_heap = new CodeHeap();
 123 int CodeCache::_number_of_blobs = 0;
 124 int CodeCache::_number_of_adapters = 0;
 125 int CodeCache::_number_of_nmethods = 0;
 126 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 127 bool CodeCache::_needs_cache_clean = false;
 128 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 129 
 130 int CodeCache::_codemem_full_count = 0;
 131 
 132 CodeBlob* CodeCache::first() {
 133   assert_locked_or_safepoint(CodeCache_lock);
 134   return (CodeBlob*)_heap->first();
 135 }
 136 
 137 
 138 CodeBlob* CodeCache::next(CodeBlob* cb) {
 139   assert_locked_or_safepoint(CodeCache_lock);
 140   return (CodeBlob*)_heap->next(cb);
 141 }
 142 
 143 
 144 CodeBlob* CodeCache::alive(CodeBlob *cb) {
 145   assert_locked_or_safepoint(CodeCache_lock);
 146   while (cb != NULL && !cb->is_alive()) cb = next(cb);
 147   return cb;
 148 }
 149 
 150 
 151 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
 152   assert_locked_or_safepoint(CodeCache_lock);
 153   while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
 154   return (nmethod*)cb;
 155 }
 156 
 157 nmethod* CodeCache::first_nmethod() {
 158   assert_locked_or_safepoint(CodeCache_lock);
 159   CodeBlob* cb = first();
 160   while (cb != NULL && !cb->is_nmethod()) {
 161     cb = next(cb);
 162   }
 163   return (nmethod*)cb;
 164 }
 165 
 166 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
 167   assert_locked_or_safepoint(CodeCache_lock);
 168   cb = next(cb);
 169   while (cb != NULL && !cb->is_nmethod()) {
 170     cb = next(cb);
 171   }
 172   return (nmethod*)cb;
 173 }
 174 
 175 static size_t maxCodeCacheUsed = 0;
 176 
 177 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
 178   // Do not seize the CodeCache lock here--if the caller has not
 179   // already done so, we are going to lose bigtime, since the code
 180   // cache will contain a garbage CodeBlob until the caller can
 181   // run the constructor for the CodeBlob subclass he is busy
 182   // instantiating.
 183   guarantee(size >= 0, "allocation request must be reasonable");
 184   assert_locked_or_safepoint(CodeCache_lock);
 185   CodeBlob* cb = NULL;
 186   _number_of_blobs++;
 187   while (true) {
 188     cb = (CodeBlob*)_heap->allocate(size, is_critical);
 189     if (cb != NULL) break;
 190     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 191       // Expansion failed
 192       if (CodeCache_lock->owned_by_self()) {
 193         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 194         report_codemem_full();
 195       } else {
 196         report_codemem_full();
 197       }
 198       return NULL;
 199     }
 200     if (PrintCodeCacheExtension) {
 201       ResourceMark rm;
 202       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 203                     (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
 204                     (address)_heap->high() - (address)_heap->low_boundary());
 205     }
 206   }
 207   maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
 208                           (address)_heap->low_boundary()) - unallocated_capacity());
 209   verify_if_often();
 210   print_trace("allocation", cb, size);
 211   return cb;
 212 }
 213 
 214 void CodeCache::free(CodeBlob* cb) {
 215   assert_locked_or_safepoint(CodeCache_lock);
 216   verify_if_often();
 217 
 218   print_trace("free", cb);
 219   if (cb->is_nmethod()) {
 220     _number_of_nmethods--;
 221     if (((nmethod *)cb)->has_dependencies()) {
 222       _number_of_nmethods_with_dependencies--;
 223     }
 224   }
 225   if (cb->is_adapter_blob()) {
 226     _number_of_adapters--;
 227   }
 228   _number_of_blobs--;
 229 
 230   _heap->deallocate(cb);
 231 
 232   verify_if_often();
 233   assert(_number_of_blobs >= 0, "sanity check");
 234 }
 235 
 236 
 237 void CodeCache::commit(CodeBlob* cb) {
 238   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 239   assert_locked_or_safepoint(CodeCache_lock);
 240   if (cb->is_nmethod()) {
 241     _number_of_nmethods++;
 242     if (((nmethod *)cb)->has_dependencies()) {
 243       _number_of_nmethods_with_dependencies++;
 244     }
 245   }
 246   if (cb->is_adapter_blob()) {
 247     _number_of_adapters++;
 248   }
 249 
 250   // flush the hardware I-cache
 251   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 252 }
 253 
 254 
 255 void CodeCache::flush() {
 256   assert_locked_or_safepoint(CodeCache_lock);
 257   Unimplemented();
 258 }
 259 
 260 
 261 // Iteration over CodeBlobs
 262 
 263 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 264 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 265 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 266 
 267 
 268 bool CodeCache::contains(void *p) {
 269   // It should be ok to call contains without holding a lock
 270   return _heap->contains(p);
 271 }
 272 
 273 
 274 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 275 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 276 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 277 CodeBlob* CodeCache::find_blob(void* start) {
 278   CodeBlob* result = find_blob_unsafe(start);
 279   if (result == NULL) return NULL;
 280   // We could potientially look up non_entrant methods
 281   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 282   return result;
 283 }
 284 
 285 nmethod* CodeCache::find_nmethod(void* start) {
 286   CodeBlob *cb = find_blob(start);
 287   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 288   return (nmethod*)cb;
 289 }
 290 
 291 
 292 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 293   assert_locked_or_safepoint(CodeCache_lock);
 294   FOR_ALL_BLOBS(p) {
 295     f(p);
 296   }
 297 }
 298 
 299 
 300 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 301   assert_locked_or_safepoint(CodeCache_lock);
 302   FOR_ALL_BLOBS(nm) {
 303     if (nm->is_nmethod()) f((nmethod*)nm);
 304   }
 305 }
 306 
 307 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
 308   assert_locked_or_safepoint(CodeCache_lock);
 309   FOR_ALL_ALIVE_NMETHODS(nm) {
 310     f(nm);
 311   }
 312 }
 313 
 314 int CodeCache::alignment_unit() {
 315   return (int)_heap->alignment_unit();
 316 }
 317 
 318 
 319 int CodeCache::alignment_offset() {
 320   return (int)_heap->alignment_offset();
 321 }
 322 
 323 
 324 // Mark nmethods for unloading if they contain otherwise unreachable
 325 // oops.
 326 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 327   assert_locked_or_safepoint(CodeCache_lock);
 328   FOR_ALL_ALIVE_NMETHODS(nm) {
 329     nm->do_unloading(is_alive, unloading_occurred);
 330   }
 331 }
 332 
 333 void CodeCache::blobs_do(CodeBlobClosure* f) {
 334   assert_locked_or_safepoint(CodeCache_lock);
 335   FOR_ALL_ALIVE_BLOBS(cb) {
 336     f->do_code_blob(cb);
 337 
 338 #ifdef ASSERT
 339     if (cb->is_nmethod())
 340       ((nmethod*)cb)->verify_scavenge_root_oops();
 341 #endif //ASSERT
 342   }
 343 }
 344 
 345 // Walk the list of methods which might contain non-perm oops.
 346 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
 347   assert_locked_or_safepoint(CodeCache_lock);
 348 
 349   if (UseG1GC) {
 350     return;
 351   }
 352 
 353   debug_only(mark_scavenge_root_nmethods());
 354 
 355   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 356     debug_only(cur->clear_scavenge_root_marked());
 357     assert(cur->scavenge_root_not_marked(), "");
 358     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 359 
 360     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 361 #ifndef PRODUCT
 362     if (TraceScavenge) {
 363       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 364     }
 365 #endif //PRODUCT
 366     if (is_live) {
 367       // Perform cur->oops_do(f), maybe just once per nmethod.
 368       f->do_code_blob(cur);
 369     }
 370   }
 371 
 372   // Check for stray marks.
 373   debug_only(verify_perm_nmethods(NULL));
 374 }
 375 
 376 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 377   assert_locked_or_safepoint(CodeCache_lock);
 378 
 379   if (UseG1GC) {
 380     return;
 381   }
 382 
 383   nm->set_on_scavenge_root_list();
 384   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 385   set_scavenge_root_nmethods(nm);
 386   print_trace("add_scavenge_root", nm);
 387 }
 388 
 389 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 390   assert_locked_or_safepoint(CodeCache_lock);
 391 
 392   if (UseG1GC) {
 393     return;
 394   }
 395 
 396   print_trace("drop_scavenge_root", nm);
 397   nmethod* last = NULL;
 398   nmethod* cur = scavenge_root_nmethods();
 399   while (cur != NULL) {
 400     nmethod* next = cur->scavenge_root_link();
 401     if (cur == nm) {
 402       if (last != NULL)
 403             last->set_scavenge_root_link(next);
 404       else  set_scavenge_root_nmethods(next);
 405       nm->set_scavenge_root_link(NULL);
 406       nm->clear_on_scavenge_root_list();
 407       return;
 408     }
 409     last = cur;
 410     cur = next;
 411   }
 412   assert(false, "should have been on list");
 413 }
 414 
 415 void CodeCache::prune_scavenge_root_nmethods() {
 416   assert_locked_or_safepoint(CodeCache_lock);
 417 
 418   if (UseG1GC) {
 419     return;
 420   }
 421 
 422   debug_only(mark_scavenge_root_nmethods());
 423 
 424   nmethod* last = NULL;
 425   nmethod* cur = scavenge_root_nmethods();
 426   while (cur != NULL) {
 427     nmethod* next = cur->scavenge_root_link();
 428     debug_only(cur->clear_scavenge_root_marked());
 429     assert(cur->scavenge_root_not_marked(), "");
 430     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 431 
 432     if (!cur->is_zombie() && !cur->is_unloaded()
 433         && cur->detect_scavenge_root_oops()) {
 434       // Keep it.  Advance 'last' to prevent deletion.
 435       last = cur;
 436     } else {
 437       // Prune it from the list, so we don't have to look at it any more.
 438       print_trace("prune_scavenge_root", cur);
 439       cur->set_scavenge_root_link(NULL);
 440       cur->clear_on_scavenge_root_list();
 441       if (last != NULL)
 442             last->set_scavenge_root_link(next);
 443       else  set_scavenge_root_nmethods(next);
 444     }
 445     cur = next;
 446   }
 447 
 448   // Check for stray marks.
 449   debug_only(verify_perm_nmethods(NULL));
 450 }
 451 
 452 #ifndef PRODUCT
 453 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 454   if (UseG1GC) {
 455     return;
 456   }
 457 
 458   // While we are here, verify the integrity of the list.
 459   mark_scavenge_root_nmethods();
 460   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 461     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 462     cur->clear_scavenge_root_marked();
 463   }
 464   verify_perm_nmethods(f);
 465 }
 466 
 467 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 468 void CodeCache::mark_scavenge_root_nmethods() {
 469   FOR_ALL_ALIVE_BLOBS(cb) {
 470     if (cb->is_nmethod()) {
 471       nmethod *nm = (nmethod*)cb;
 472       assert(nm->scavenge_root_not_marked(), "clean state");
 473       if (nm->on_scavenge_root_list())
 474         nm->set_scavenge_root_marked();
 475     }
 476   }
 477 }
 478 
 479 // If the closure is given, run it on the unlisted nmethods.
 480 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 481 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 482   FOR_ALL_ALIVE_BLOBS(cb) {
 483     bool call_f = (f_or_null != NULL);
 484     if (cb->is_nmethod()) {
 485       nmethod *nm = (nmethod*)cb;
 486       assert(nm->scavenge_root_not_marked(), "must be already processed");
 487       if (nm->on_scavenge_root_list())
 488         call_f = false;  // don't show this one to the client
 489       nm->verify_scavenge_root_oops();
 490     } else {
 491       call_f = false;   // not an nmethod
 492     }
 493     if (call_f)  f_or_null->do_code_blob(cb);
 494   }
 495 }
 496 #endif //PRODUCT
 497 
 498 void CodeCache::verify_clean_inline_caches() {
 499 #ifdef ASSERT
 500   FOR_ALL_ALIVE_BLOBS(cb) {
 501     if (cb->is_nmethod()) {
 502       nmethod* nm = (nmethod*)cb;
 503       assert(!nm->is_unloaded(), "Tautology");
 504       nm->verify_clean_inline_caches();
 505       nm->verify();
 506     }
 507   }
 508 #endif
 509 }
 510 
 511 void CodeCache::verify_icholder_relocations() {
 512 #ifdef ASSERT
 513   // make sure that we aren't leaking icholders
 514   int count = 0;
 515   FOR_ALL_BLOBS(cb) {
 516     if (cb->is_nmethod()) {
 517       nmethod* nm = (nmethod*)cb;
 518       count += nm->verify_icholder_relocations();
 519     }
 520   }
 521 
 522   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 523          CompiledICHolder::live_count(), "must agree");
 524 #endif
 525 }
 526 
 527 void CodeCache::gc_prologue() {
 528 }
 529 
 530 void CodeCache::gc_epilogue() {
 531   assert_locked_or_safepoint(CodeCache_lock);
 532   NOT_DEBUG(if (needs_cache_clean())) {
 533     FOR_ALL_ALIVE_BLOBS(cb) {
 534       if (cb->is_nmethod()) {
 535         nmethod *nm = (nmethod*)cb;
 536         assert(!nm->is_unloaded(), "Tautology");
 537         DEBUG_ONLY(if (needs_cache_clean())) {
 538           nm->cleanup_inline_caches();
 539         }
 540         DEBUG_ONLY(nm->verify());
 541         DEBUG_ONLY(nm->verify_oop_relocations());
 542       }
 543     }
 544   }
 545   set_needs_cache_clean(false);
 546   prune_scavenge_root_nmethods();
 547 
 548   verify_icholder_relocations();
 549 }
 550 
 551 void CodeCache::verify_oops() {
 552   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 553   VerifyOopClosure voc;
 554   FOR_ALL_ALIVE_BLOBS(cb) {
 555     if (cb->is_nmethod()) {
 556       nmethod *nm = (nmethod*)cb;
 557       nm->oops_do(&voc);
 558       nm->verify_oop_relocations();
 559     }
 560   }
 561 }
 562 
 563 
 564 address CodeCache::first_address() {
 565   assert_locked_or_safepoint(CodeCache_lock);
 566   return (address)_heap->low_boundary();
 567 }
 568 
 569 
 570 address CodeCache::last_address() {
 571   assert_locked_or_safepoint(CodeCache_lock);
 572   return (address)_heap->high();
 573 }
 574 
 575 /**
 576  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
 577  * is free, reverse_free_ratio() returns 4.
 578  */
 579 double CodeCache::reverse_free_ratio() {
 580   double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
 581   double max_capacity = (double)CodeCache::max_capacity();
 582   return max_capacity / unallocated_capacity;
 583 }
 584 
 585 void icache_init();
 586 
 587 void CodeCache::initialize() {
 588   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 589 #ifdef COMPILER2
 590   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 591 #endif
 592   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 593   // This was originally just a check of the alignment, causing failure, instead, round
 594   // the code cache to the page size.  In particular, Solaris is moving to a larger
 595   // default page size.
 596   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 597   InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
 598   ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
 599   if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
 600     vm_exit_during_initialization("Could not reserve enough space for code cache");
 601   }
 602 
 603   MemoryService::add_code_heap_memory_pool(_heap);
 604 
 605   // Initialize ICache flush mechanism
 606   // This service is needed for os::register_code_area
 607   icache_init();
 608 
 609   // Give OS a chance to register generated code area.
 610   // This is used on Windows 64 bit platforms to register
 611   // Structured Exception Handlers for our generated code.
 612   os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
 613 }
 614 
 615 
 616 void codeCache_init() {
 617   CodeCache::initialize();
 618 }
 619 
 620 //------------------------------------------------------------------------------------------------
 621 
 622 int CodeCache::number_of_nmethods_with_dependencies() {
 623   return _number_of_nmethods_with_dependencies;
 624 }
 625 
 626 void CodeCache::clear_inline_caches() {
 627   assert_locked_or_safepoint(CodeCache_lock);
 628   FOR_ALL_ALIVE_NMETHODS(nm) {
 629     nm->clear_inline_caches();
 630   }
 631 }
 632 
 633 #ifndef PRODUCT
 634 // used to keep track of how much time is spent in mark_for_deoptimization
 635 static elapsedTimer dependentCheckTime;
 636 static int dependentCheckCount = 0;
 637 #endif // PRODUCT
 638 
 639 
 640 int CodeCache::mark_for_deoptimization(DepChange& changes) {
 641   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 642 
 643 #ifndef PRODUCT
 644   dependentCheckTime.start();
 645   dependentCheckCount++;
 646 #endif // PRODUCT
 647 
 648   int number_of_marked_CodeBlobs = 0;
 649 
 650   // search the hierarchy looking for nmethods which are affected by the loading of this class
 651 
 652   // then search the interfaces this class implements looking for nmethods
 653   // which might be dependent of the fact that an interface only had one
 654   // implementor.
 655 
 656   { No_Safepoint_Verifier nsv;
 657     for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
 658       Klass* d = str.klass();
 659       number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
 660     }
 661   }
 662 
 663   if (VerifyDependencies) {
 664     // Turn off dependency tracing while actually testing deps.
 665     NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
 666     FOR_ALL_ALIVE_NMETHODS(nm) {
 667       if (!nm->is_marked_for_deoptimization() &&
 668           nm->check_all_dependencies()) {
 669         ResourceMark rm;
 670         tty->print_cr("Should have been marked for deoptimization:");
 671         changes.print();
 672         nm->print();
 673         nm->print_dependencies();
 674       }
 675     }
 676   }
 677 
 678 #ifndef PRODUCT
 679   dependentCheckTime.stop();
 680 #endif // PRODUCT
 681 
 682   return number_of_marked_CodeBlobs;
 683 }
 684 
 685 
 686 #ifdef HOTSWAP
 687 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
 688   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 689   int number_of_marked_CodeBlobs = 0;
 690 
 691   // Deoptimize all methods of the evolving class itself
 692   Array<Method*>* old_methods = dependee->methods();
 693   for (int i = 0; i < old_methods->length(); i++) {
 694     ResourceMark rm;
 695     Method* old_method = old_methods->at(i);
 696     nmethod *nm = old_method->code();
 697     if (nm != NULL) {
 698       nm->mark_for_deoptimization();
 699       number_of_marked_CodeBlobs++;
 700     }
 701   }
 702 
 703   FOR_ALL_ALIVE_NMETHODS(nm) {
 704     if (nm->is_marked_for_deoptimization()) {
 705       // ...Already marked in the previous pass; don't count it again.
 706     } else if (nm->is_evol_dependent_on(dependee())) {
 707       ResourceMark rm;
 708       nm->mark_for_deoptimization();
 709       number_of_marked_CodeBlobs++;
 710     } else  {
 711       // flush caches in case they refer to a redefined Method*
 712       nm->clear_inline_caches();
 713     }
 714   }
 715 
 716   return number_of_marked_CodeBlobs;
 717 }
 718 #endif // HOTSWAP
 719 
 720 
 721 // Deoptimize all methods
 722 void CodeCache::mark_all_nmethods_for_deoptimization() {
 723   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 724   FOR_ALL_ALIVE_NMETHODS(nm) {
 725     if (!nm->method()->is_method_handle_intrinsic()) {
 726       nm->mark_for_deoptimization();
 727     }
 728   }
 729 }
 730 
 731 
 732 int CodeCache::mark_for_deoptimization(Method* dependee) {
 733   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 734   int number_of_marked_CodeBlobs = 0;
 735 
 736   FOR_ALL_ALIVE_NMETHODS(nm) {
 737     if (nm->is_dependent_on_method(dependee)) {
 738       ResourceMark rm;
 739       nm->mark_for_deoptimization();
 740       number_of_marked_CodeBlobs++;
 741     }
 742   }
 743 
 744   return number_of_marked_CodeBlobs;
 745 }
 746 
 747 void CodeCache::make_marked_nmethods_not_entrant() {
 748   assert_locked_or_safepoint(CodeCache_lock);
 749   FOR_ALL_ALIVE_NMETHODS(nm) {
 750     if (nm->is_marked_for_deoptimization()) {
 751       nm->make_not_entrant();
 752     }
 753   }
 754 }
 755 
 756 void CodeCache::verify() {
 757   _heap->verify();
 758   FOR_ALL_ALIVE_BLOBS(p) {
 759     p->verify();
 760   }
 761 }
 762 
 763 void CodeCache::report_codemem_full() {
 764   _codemem_full_count++;
 765   EventCodeCacheFull event;
 766   if (event.should_commit()) {
 767     event.set_codeBlobType((u1)CodeBlobType::All);
 768     event.set_startAddress((u8)low_bound());
 769     event.set_commitedTopAddress((u8)high());
 770     event.set_reservedTopAddress((u8)high_bound());
 771     event.set_entryCount(nof_blobs());
 772     event.set_methodCount(nof_nmethods());
 773     event.set_adaptorCount(nof_adapters());
 774     event.set_unallocatedCapacity(unallocated_capacity()/K);
 775     event.set_fullCount(_codemem_full_count);
 776     event.commit();
 777   }
 778 }
 779 
 780 //------------------------------------------------------------------------------------------------
 781 // Non-product version
 782 
 783 #ifndef PRODUCT
 784 
 785 void CodeCache::verify_if_often() {
 786   if (VerifyCodeCacheOften) {
 787     _heap->verify();
 788   }
 789 }
 790 
 791 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 792   if (PrintCodeCache2) {  // Need to add a new flag
 793     ResourceMark rm;
 794     if (size == 0)  size = cb->size();
 795     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
 796   }
 797 }
 798 
 799 void CodeCache::print_internals() {
 800   int nmethodCount = 0;
 801   int runtimeStubCount = 0;
 802   int adapterCount = 0;
 803   int deoptimizationStubCount = 0;
 804   int uncommonTrapStubCount = 0;
 805   int bufferBlobCount = 0;
 806   int total = 0;
 807   int nmethodAlive = 0;
 808   int nmethodNotEntrant = 0;
 809   int nmethodZombie = 0;
 810   int nmethodUnloaded = 0;
 811   int nmethodJava = 0;
 812   int nmethodNative = 0;
 813   int maxCodeSize = 0;
 814   ResourceMark rm;
 815 
 816   CodeBlob *cb;
 817   for (cb = first(); cb != NULL; cb = next(cb)) {
 818     total++;
 819     if (cb->is_nmethod()) {
 820       nmethod* nm = (nmethod*)cb;
 821 
 822       if (Verbose && nm->method() != NULL) {
 823         ResourceMark rm;
 824         char *method_name = nm->method()->name_and_sig_as_C_string();
 825         tty->print("%s", method_name);
 826         if(nm->is_alive()) { tty->print_cr(" alive"); }
 827         if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
 828         if(nm->is_zombie()) { tty->print_cr(" zombie"); }
 829       }
 830 
 831       nmethodCount++;
 832 
 833       if(nm->is_alive()) { nmethodAlive++; }
 834       if(nm->is_not_entrant()) { nmethodNotEntrant++; }
 835       if(nm->is_zombie()) { nmethodZombie++; }
 836       if(nm->is_unloaded()) { nmethodUnloaded++; }
 837       if(nm->is_native_method()) { nmethodNative++; }
 838 
 839       if(nm->method() != NULL && nm->is_java_method()) {
 840         nmethodJava++;
 841         if (nm->insts_size() > maxCodeSize) {
 842           maxCodeSize = nm->insts_size();
 843         }
 844       }
 845     } else if (cb->is_runtime_stub()) {
 846       runtimeStubCount++;
 847     } else if (cb->is_deoptimization_stub()) {
 848       deoptimizationStubCount++;
 849     } else if (cb->is_uncommon_trap_stub()) {
 850       uncommonTrapStubCount++;
 851     } else if (cb->is_adapter_blob()) {
 852       adapterCount++;
 853     } else if (cb->is_buffer_blob()) {
 854       bufferBlobCount++;
 855     }
 856   }
 857 
 858   int bucketSize = 512;
 859   int bucketLimit = maxCodeSize / bucketSize + 1;
 860   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
 861   memset(buckets,0,sizeof(int) * bucketLimit);
 862 
 863   for (cb = first(); cb != NULL; cb = next(cb)) {
 864     if (cb->is_nmethod()) {
 865       nmethod* nm = (nmethod*)cb;
 866       if(nm->is_java_method()) {
 867         buckets[nm->insts_size() / bucketSize]++;
 868       }
 869     }
 870   }
 871   tty->print_cr("Code Cache Entries (total of %d)",total);
 872   tty->print_cr("-------------------------------------------------");
 873   tty->print_cr("nmethods: %d",nmethodCount);
 874   tty->print_cr("\talive: %d",nmethodAlive);
 875   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
 876   tty->print_cr("\tzombie: %d",nmethodZombie);
 877   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
 878   tty->print_cr("\tjava: %d",nmethodJava);
 879   tty->print_cr("\tnative: %d",nmethodNative);
 880   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
 881   tty->print_cr("adapters: %d",adapterCount);
 882   tty->print_cr("buffer blobs: %d",bufferBlobCount);
 883   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
 884   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
 885   tty->print_cr("\nnmethod size distribution (non-zombie java)");
 886   tty->print_cr("-------------------------------------------------");
 887 
 888   for(int i=0; i<bucketLimit; i++) {
 889     if(buckets[i] != 0) {
 890       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
 891       tty->fill_to(40);
 892       tty->print_cr("%d",buckets[i]);
 893     }
 894   }
 895 
 896   FREE_C_HEAP_ARRAY(int, buckets, mtCode);
 897 }
 898 
 899 #endif // !PRODUCT
 900 
 901 void CodeCache::print() {
 902   print_summary(tty);
 903 
 904 #ifndef PRODUCT
 905   if (!Verbose) return;
 906 
 907   CodeBlob_sizes live;
 908   CodeBlob_sizes dead;
 909 
 910   FOR_ALL_BLOBS(p) {
 911     if (!p->is_alive()) {
 912       dead.add(p);
 913     } else {
 914       live.add(p);
 915     }
 916   }
 917 
 918   tty->print_cr("CodeCache:");
 919 
 920   tty->print_cr("nmethod dependency checking time %f, per dependent %f", dependentCheckTime.seconds(),
 921                 dependentCheckTime.seconds() / dependentCheckCount);
 922 
 923   if (!live.is_empty()) {
 924     live.print("live");
 925   }
 926   if (!dead.is_empty()) {
 927     dead.print("dead");
 928   }
 929 
 930 
 931   if (WizardMode) {
 932      // print the oop_map usage
 933     int code_size = 0;
 934     int number_of_blobs = 0;
 935     int number_of_oop_maps = 0;
 936     int map_size = 0;
 937     FOR_ALL_BLOBS(p) {
 938       if (p->is_alive()) {
 939         number_of_blobs++;
 940         code_size += p->code_size();
 941         OopMapSet* set = p->oop_maps();
 942         if (set != NULL) {
 943           number_of_oop_maps += set->size();
 944           map_size           += set->heap_size();
 945         }
 946       }
 947     }
 948     tty->print_cr("OopMaps");
 949     tty->print_cr("  #blobs    = %d", number_of_blobs);
 950     tty->print_cr("  code size = %d", code_size);
 951     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
 952     tty->print_cr("  map size  = %d", map_size);
 953   }
 954 
 955 #endif // !PRODUCT
 956 }
 957 
 958 void CodeCache::print_summary(outputStream* st, bool detailed) {
 959   size_t total = (_heap->high_boundary() - _heap->low_boundary());
 960   st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
 961                "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
 962                total/K, (total - unallocated_capacity())/K,
 963                maxCodeCacheUsed/K, unallocated_capacity()/K);
 964 
 965   if (detailed) {
 966     st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
 967                  p2i(_heap->low_boundary()),
 968                  p2i(_heap->high()),
 969                  p2i(_heap->high_boundary()));
 970     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
 971                  " adapters=" UINT32_FORMAT,
 972                  nof_blobs(), nof_nmethods(), nof_adapters());
 973     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
 974                  "enabled" : Arguments::mode() == Arguments::_int ?
 975                  "disabled (interpreter mode)" :
 976                  "disabled (not enough contiguous free space left)");
 977   }
 978 }
 979 
 980 void CodeCache::log_state(outputStream* st) {
 981   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
 982             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
 983             nof_blobs(), nof_nmethods(), nof_adapters(),
 984             unallocated_capacity());
 985 }
 986