1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "gc_implementation/shared/markSweep.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/gcLocker.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/objArrayOop.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/icache.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutexLocker.hpp"
  47 #include "runtime/compilationPolicy.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "trace/tracing.hpp"
  50 #include "utilities/xmlstream.hpp"
  51 #ifdef COMPILER1
  52 #include "c1/c1_Compilation.hpp"
  53 #include "c1/c1_Compiler.hpp"
  54 #endif
  55 #ifdef COMPILER2
  56 #include "opto/c2compiler.hpp"
  57 #include "opto/compile.hpp"
  58 #include "opto/node.hpp"
  59 #endif
  60 
  61 // Helper class for printing in CodeCache
  62 class CodeBlob_sizes {
  63  private:
  64   int count;
  65   int total_size;
  66   int header_size;
  67   int code_size;
  68   int stub_size;
  69   int relocation_size;
  70   int scopes_oop_size;
  71   int scopes_metadata_size;
  72   int scopes_data_size;
  73   int scopes_pcs_size;
  74 
  75  public:
  76   CodeBlob_sizes() {
  77     count            = 0;
  78     total_size       = 0;
  79     header_size      = 0;
  80     code_size        = 0;
  81     stub_size        = 0;
  82     relocation_size  = 0;
  83     scopes_oop_size  = 0;
  84     scopes_metadata_size  = 0;
  85     scopes_data_size = 0;
  86     scopes_pcs_size  = 0;
  87   }
  88 
  89   int total()                                    { return total_size; }
  90   bool is_empty()                                { return count == 0; }
  91 
  92   void print(const char* title) {
  93     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
  94                   count,
  95                   title,
  96                   (int)(total() / K),
  97                   header_size             * 100 / total_size,
  98                   relocation_size         * 100 / total_size,
  99                   code_size               * 100 / total_size,
 100                   stub_size               * 100 / total_size,
 101                   scopes_oop_size         * 100 / total_size,
 102                   scopes_metadata_size    * 100 / total_size,
 103                   scopes_data_size        * 100 / total_size,
 104                   scopes_pcs_size         * 100 / total_size);
 105   }
 106 
 107   void add(CodeBlob* cb) {
 108     count++;
 109     total_size       += cb->size();
 110     header_size      += cb->header_size();
 111     relocation_size  += cb->relocation_size();
 112     if (cb->is_nmethod()) {
 113       nmethod* nm = cb->as_nmethod_or_null();
 114       code_size        += nm->insts_size();
 115       stub_size        += nm->stub_size();
 116 
 117       scopes_oop_size  += nm->oops_size();
 118       scopes_metadata_size  += nm->metadata_size();
 119       scopes_data_size += nm->scopes_data_size();
 120       scopes_pcs_size  += nm->scopes_pcs_size();
 121     } else {
 122       code_size        += cb->code_size();
 123     }
 124   }
 125 };
 126 
 127 // Iterate over all CodeHeaps
 128 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 129 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 130 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
 131 
 132 address CodeCache::_low_bound = 0;
 133 address CodeCache::_high_bound = 0;
 134 int CodeCache::_number_of_blobs = 0;
 135 int CodeCache::_number_of_adapters = 0;
 136 int CodeCache::_number_of_nmethods = 0;
 137 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 138 bool CodeCache::_needs_cache_clean = false;
 139 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 140 int CodeCache::_codemem_full_count = 0;
 141 
 142 // Initialize array of CodeHeaps
 143 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 144 
 145 void CodeCache::initialize_heaps() {
 146   // Determine size of compiler buffers
 147   size_t code_buffers_size = 0;
 148 #ifdef COMPILER1
 149   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 150   const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
 151   code_buffers_size += c1_count * Compiler::code_buffer_size();
 152 #endif
 153 #ifdef COMPILER2
 154   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 155   const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
 156   // Initial size of constant table (this may be increased if a compiled method needs more space)
 157   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 158 #endif
 159 
 160   // Calculate default CodeHeap sizes if not set by user
 161   if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
 162       && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
 163     // Increase default NonNMethodCodeHeapSize to account for compiler buffers
 164     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);
 165 
 166     // Check if we have enough space for the non-nmethod code heap
 167     if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
 168       // Use the default value for NonNMethodCodeHeapSize and one half of the
 169       // remaining size for non-profiled methods and one half for profiled methods
 170       size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
 171       size_t profiled_size = remaining_size / 2;
 172       size_t non_profiled_size = remaining_size - profiled_size;
 173       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
 174       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
 175     } else {
 176       // Use all space for the non-nmethod heap and set other heaps to minimal size
 177       FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
 178       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
 179       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
 180     }
 181   }
 182 
 183   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 184   if(!heap_available(CodeBlobType::MethodProfiled)) {
 185     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
 186     FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
 187   }
 188   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 189   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 190     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
 191     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
 192   }
 193 
 194   // Make sure we have enough space for VM internal code
 195   uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
 196   if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
 197     vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
 198   }
 199   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
 200 
 201   // Align reserved sizes of CodeHeaps
 202   size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
 203   size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
 204   size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 205 
 206   // Compute initial sizes of CodeHeaps
 207   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
 208   size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
 209   size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
 210 
 211   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 212   // parts for the individual heaps. The memory layout looks like this:
 213   // ---------- high -----------
 214   //    Non-profiled nmethods
 215   //      Profiled nmethods
 216   //         Non-nmethods
 217   // ---------- low ------------
 218   ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
 219   ReservedSpace non_method_space    = rs.first_part(non_method_size);
 220   ReservedSpace rest                = rs.last_part(non_method_size);
 221   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 222   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 223 
 224   // Non-nmethods (stubs, adapters, ...)
 225   add_heap(non_method_space, "CodeHeap 'non-nmethods'", init_non_method_size, CodeBlobType::NonNMethod);
 226   // Tier 2 and tier 3 (profiled) methods
 227   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled);
 228   // Tier 1 and tier 4 (non-profiled) methods and native methods
 229   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
 230 }
 231 
 232 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 233   // Determine alignment
 234   const size_t page_size = os::can_execute_large_page_memory() ?
 235           MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
 236                os::page_size_for_region(size, 8)) :
 237           os::vm_page_size();
 238   const size_t granularity = os::vm_allocation_granularity();
 239   const size_t r_align = MAX2(page_size, granularity);
 240   const size_t r_size = align_size_up(size, r_align);
 241   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 242     MAX2(page_size, granularity);
 243 
 244   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 245 
 246   // Initialize bounds
 247   _low_bound = (address)rs.base();
 248   _high_bound = _low_bound + rs.size();
 249 
 250   return rs;
 251 }
 252 
 253 bool CodeCache::heap_available(int code_blob_type) {
 254   if (!SegmentedCodeCache) {
 255     // No segmentation: use a single code heap
 256     return (code_blob_type == CodeBlobType::All);
 257   } else if ((Arguments::mode() == Arguments::_int) ||
 258              (TieredStopAtLevel == CompLevel_none)) {
 259     // Interpreter only: we don't need any method code heaps
 260     return (code_blob_type == CodeBlobType::NonNMethod);
 261   } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
 262     // Tiered compilation: use all code heaps
 263     return (code_blob_type < CodeBlobType::All);
 264   } else {
 265     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 266     return (code_blob_type == CodeBlobType::NonNMethod) ||
 267            (code_blob_type == CodeBlobType::MethodNonProfiled);
 268   }
 269 }
 270 
 271 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
 272   // Check if heap is needed
 273   if (!heap_available(code_blob_type)) {
 274     return;
 275   }
 276 
 277   // Create CodeHeap
 278   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 279   _heaps->append(heap);
 280 
 281   // Reserve Space
 282   size_initial = round_to(size_initial, os::vm_page_size());
 283 
 284   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 285     vm_exit_during_initialization("Could not reserve enough space for code cache");
 286   }
 287 
 288   // Register the CodeHeap
 289   MemoryService::add_code_heap_memory_pool(heap, name);
 290 }
 291 
 292 CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) {
 293   assert(cb != NULL, "CodeBlob is null");
 294   FOR_ALL_HEAPS(heap) {
 295     if ((*heap)->contains(cb)) {
 296       return *heap;
 297     }
 298   }
 299   ShouldNotReachHere();
 300   return NULL;
 301 }
 302 
 303 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
 304   FOR_ALL_HEAPS(heap) {
 305     if ((*heap)->accepts(code_blob_type)) {
 306       return *heap;
 307     }
 308   }
 309   return NULL;
 310 }
 311 
 312 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 313   assert_locked_or_safepoint(CodeCache_lock);
 314   assert(heap != NULL, "heap is null");
 315   return (CodeBlob*)heap->first();
 316 }
 317 
 318 CodeBlob* CodeCache::first_blob(int code_blob_type) {
 319   if (heap_available(code_blob_type)) {
 320     return first_blob(get_code_heap(code_blob_type));
 321   } else {
 322     return NULL;
 323   }
 324 }
 325 
 326 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 327   assert_locked_or_safepoint(CodeCache_lock);
 328   assert(heap != NULL, "heap is null");
 329   return (CodeBlob*)heap->next(cb);
 330 }
 331 
 332 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
 333   return next_blob(get_code_heap(cb), cb);
 334 }
 335 
 336 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
 337   // Do not seize the CodeCache lock here--if the caller has not
 338   // already done so, we are going to lose bigtime, since the code
 339   // cache will contain a garbage CodeBlob until the caller can
 340   // run the constructor for the CodeBlob subclass he is busy
 341   // instantiating.
 342   assert_locked_or_safepoint(CodeCache_lock);
 343   assert(size > 0, "allocation request must be reasonable");
 344   if (size <= 0) {
 345     return NULL;
 346   }
 347   CodeBlob* cb = NULL;
 348 
 349   // Get CodeHeap for the given CodeBlobType
 350   CodeHeap* heap = get_code_heap(code_blob_type);
 351   assert(heap != NULL, "heap is null");
 352 
 353   while (true) {
 354     cb = (CodeBlob*)heap->allocate(size, is_critical);
 355     if (cb != NULL) break;
 356     if (!heap->expand_by(CodeCacheExpansionSize)) {
 357       // Expansion failed
 358       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
 359         // Fallback solution: Store non-nmethod code in the non-profiled code heap
 360         return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
 361       }
 362       return NULL;
 363     }
 364     if (PrintCodeCacheExtension) {
 365       ResourceMark rm;
 366       if (SegmentedCodeCache) {
 367         tty->print("%s", heap->name());
 368       } else {
 369         tty->print("CodeCache");
 370       }
 371       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 372                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 373                     (address)heap->high() - (address)heap->low_boundary());
 374     }
 375   }
 376   print_trace("allocation", cb, size);
 377   _number_of_blobs++;
 378   return cb;
 379 }
 380 
 381 void CodeCache::free(CodeBlob* cb) {
 382   assert_locked_or_safepoint(CodeCache_lock);
 383 
 384   print_trace("free", cb);
 385   if (cb->is_nmethod()) {
 386     _number_of_nmethods--;
 387     if (((nmethod *)cb)->has_dependencies()) {
 388       _number_of_nmethods_with_dependencies--;
 389     }
 390   }
 391   if (cb->is_adapter_blob()) {
 392     _number_of_adapters--;
 393   }
 394   _number_of_blobs--;
 395 
 396   // Get heap for given CodeBlob and deallocate
 397   get_code_heap(cb)->deallocate(cb);
 398 
 399   assert(_number_of_blobs >= 0, "sanity check");
 400 }
 401 
 402 void CodeCache::commit(CodeBlob* cb) {
 403   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 404   assert_locked_or_safepoint(CodeCache_lock);
 405   if (cb->is_nmethod()) {
 406     _number_of_nmethods++;
 407     if (((nmethod *)cb)->has_dependencies()) {
 408       _number_of_nmethods_with_dependencies++;
 409     }
 410   }
 411   if (cb->is_adapter_blob()) {
 412     _number_of_adapters++;
 413   }
 414 
 415   // flush the hardware I-cache
 416   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 417 }
 418 
 419 bool CodeCache::contains(void *p) {
 420   // It should be ok to call contains without holding a lock
 421   FOR_ALL_HEAPS(heap) {
 422     if ((*heap)->contains(p)) {
 423       return true;
 424     }
 425   }
 426   return false;
 427 }
 428 
 429 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
 430 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 431 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 432 CodeBlob* CodeCache::find_blob(void* start) {
 433   CodeBlob* result = find_blob_unsafe(start);
 434   // We could potentially look up non_entrant methods
 435   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 436   return result;
 437 }
 438 
 439 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 440 // what you are doing)
 441 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 442   // NMT can walk the stack before code cache is created
 443   if (_heaps == NULL || _heaps->is_empty()) return NULL;
 444 
 445   FOR_ALL_HEAPS(heap) {
 446     CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
 447     if (result != NULL && result->blob_contains((address)start)) {
 448       return result;
 449     }
 450   }
 451   return NULL;
 452 }
 453 
 454 nmethod* CodeCache::find_nmethod(void* start) {
 455   CodeBlob* cb = find_blob(start);
 456   assert(cb->is_nmethod(), "did not find an nmethod");
 457   return (nmethod*)cb;
 458 }
 459 
 460 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 461   assert_locked_or_safepoint(CodeCache_lock);
 462   FOR_ALL_HEAPS(heap) {
 463     FOR_ALL_BLOBS(cb, *heap) {
 464       f(cb);
 465     }
 466   }
 467 }
 468 
 469 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 470   assert_locked_or_safepoint(CodeCache_lock);
 471   NMethodIterator iter;
 472   while(iter.next()) {
 473     f(iter.method());
 474   }
 475 }
 476 
 477 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
 478   assert_locked_or_safepoint(CodeCache_lock);
 479   NMethodIterator iter;
 480   while(iter.next_alive()) {
 481     f(iter.method());
 482   }
 483 }
 484 
 485 int CodeCache::alignment_unit() {
 486   return (int)_heaps->first()->alignment_unit();
 487 }
 488 
 489 int CodeCache::alignment_offset() {
 490   return (int)_heaps->first()->alignment_offset();
 491 }
 492 
 493 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 494 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 495   assert_locked_or_safepoint(CodeCache_lock);
 496   NMethodIterator iter;
 497   while(iter.next_alive()) {
 498     iter.method()->do_unloading(is_alive, unloading_occurred);
 499   }
 500 }
 501 
 502 void CodeCache::blobs_do(CodeBlobClosure* f) {
 503   assert_locked_or_safepoint(CodeCache_lock);
 504   FOR_ALL_HEAPS(heap) {
 505     FOR_ALL_BLOBS(cb, *heap) {
 506       if (cb->is_alive()) {
 507         f->do_code_blob(cb);
 508 
 509 #ifdef ASSERT
 510         if (cb->is_nmethod())
 511         ((nmethod*)cb)->verify_scavenge_root_oops();
 512 #endif //ASSERT
 513       }
 514     }
 515   }
 516 }
 517 
 518 // Walk the list of methods which might contain non-perm oops.
 519 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
 520   assert_locked_or_safepoint(CodeCache_lock);
 521 
 522   if (UseG1GC) {
 523     return;
 524   }
 525 
 526   debug_only(mark_scavenge_root_nmethods());
 527 
 528   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 529     debug_only(cur->clear_scavenge_root_marked());
 530     assert(cur->scavenge_root_not_marked(), "");
 531     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 532 
 533     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 534 #ifndef PRODUCT
 535     if (TraceScavenge) {
 536       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 537     }
 538 #endif //PRODUCT
 539     if (is_live) {
 540       // Perform cur->oops_do(f), maybe just once per nmethod.
 541       f->do_code_blob(cur);
 542     }
 543   }
 544 
 545   // Check for stray marks.
 546   debug_only(verify_perm_nmethods(NULL));
 547 }
 548 
 549 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 550   assert_locked_or_safepoint(CodeCache_lock);
 551 
 552   if (UseG1GC) {
 553     return;
 554   }
 555 
 556   nm->set_on_scavenge_root_list();
 557   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 558   set_scavenge_root_nmethods(nm);
 559   print_trace("add_scavenge_root", nm);
 560 }
 561 
 562 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 563   assert_locked_or_safepoint(CodeCache_lock);
 564 
 565   if (UseG1GC) {
 566     return;
 567   }
 568 
 569   print_trace("drop_scavenge_root", nm);
 570   nmethod* last = NULL;
 571   nmethod* cur = scavenge_root_nmethods();
 572   while (cur != NULL) {
 573     nmethod* next = cur->scavenge_root_link();
 574     if (cur == nm) {
 575       if (last != NULL)
 576             last->set_scavenge_root_link(next);
 577       else  set_scavenge_root_nmethods(next);
 578       nm->set_scavenge_root_link(NULL);
 579       nm->clear_on_scavenge_root_list();
 580       return;
 581     }
 582     last = cur;
 583     cur = next;
 584   }
 585   assert(false, "should have been on list");
 586 }
 587 
 588 void CodeCache::prune_scavenge_root_nmethods() {
 589   assert_locked_or_safepoint(CodeCache_lock);
 590 
 591   if (UseG1GC) {
 592     return;
 593   }
 594 
 595   debug_only(mark_scavenge_root_nmethods());
 596 
 597   nmethod* last = NULL;
 598   nmethod* cur = scavenge_root_nmethods();
 599   while (cur != NULL) {
 600     nmethod* next = cur->scavenge_root_link();
 601     debug_only(cur->clear_scavenge_root_marked());
 602     assert(cur->scavenge_root_not_marked(), "");
 603     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 604 
 605     if (!cur->is_zombie() && !cur->is_unloaded()
 606         && cur->detect_scavenge_root_oops()) {
 607       // Keep it.  Advance 'last' to prevent deletion.
 608       last = cur;
 609     } else {
 610       // Prune it from the list, so we don't have to look at it any more.
 611       print_trace("prune_scavenge_root", cur);
 612       cur->set_scavenge_root_link(NULL);
 613       cur->clear_on_scavenge_root_list();
 614       if (last != NULL)
 615             last->set_scavenge_root_link(next);
 616       else  set_scavenge_root_nmethods(next);
 617     }
 618     cur = next;
 619   }
 620 
 621   // Check for stray marks.
 622   debug_only(verify_perm_nmethods(NULL));
 623 }
 624 
 625 #ifndef PRODUCT
 626 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 627   if (UseG1GC) {
 628     return;
 629   }
 630 
 631   // While we are here, verify the integrity of the list.
 632   mark_scavenge_root_nmethods();
 633   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 634     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 635     cur->clear_scavenge_root_marked();
 636   }
 637   verify_perm_nmethods(f);
 638 }
 639 
 640 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 641 void CodeCache::mark_scavenge_root_nmethods() {
 642   NMethodIterator iter;
 643   while(iter.next_alive()) {
 644     nmethod* nm = iter.method();
 645     assert(nm->scavenge_root_not_marked(), "clean state");
 646     if (nm->on_scavenge_root_list())
 647       nm->set_scavenge_root_marked();
 648   }
 649 }
 650 
 651 // If the closure is given, run it on the unlisted nmethods.
 652 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 653 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 654   NMethodIterator iter;
 655   while(iter.next_alive()) {
 656     nmethod* nm = iter.method();
 657     bool call_f = (f_or_null != NULL);
 658     assert(nm->scavenge_root_not_marked(), "must be already processed");
 659     if (nm->on_scavenge_root_list())
 660       call_f = false;  // don't show this one to the client
 661     nm->verify_scavenge_root_oops();
 662     if (call_f)  f_or_null->do_code_blob(nm);
 663   }
 664 }
 665 #endif //PRODUCT
 666 
 667 void CodeCache::verify_clean_inline_caches() {
 668 #ifdef ASSERT
 669   NMethodIterator iter;
 670   while(iter.next_alive()) {
 671     nmethod* nm = iter.method();
 672     assert(!nm->is_unloaded(), "Tautology");
 673     nm->verify_clean_inline_caches();
 674     nm->verify();
 675   }
 676 #endif
 677 }
 678 
 679 void CodeCache::verify_icholder_relocations() {
 680 #ifdef ASSERT
 681   // make sure that we aren't leaking icholders
 682   int count = 0;
 683   FOR_ALL_HEAPS(heap) {
 684     FOR_ALL_BLOBS(cb, *heap) {
 685       if (cb->is_nmethod()) {
 686         nmethod* nm = (nmethod*)cb;
 687         count += nm->verify_icholder_relocations();
 688       }
 689     }
 690   }
 691 
 692   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 693          CompiledICHolder::live_count(), "must agree");
 694 #endif
 695 }
 696 
 697 void CodeCache::gc_prologue() {
 698 }
 699 
 700 void CodeCache::gc_epilogue() {
 701   assert_locked_or_safepoint(CodeCache_lock);
 702   NMethodIterator iter;
 703   while(iter.next_alive()) {
 704     nmethod* nm = iter.method();
 705     assert(!nm->is_unloaded(), "Tautology");
 706     if (needs_cache_clean()) {
 707       nm->cleanup_inline_caches();
 708     }
 709     DEBUG_ONLY(nm->verify());
 710     DEBUG_ONLY(nm->verify_oop_relocations());
 711   }
 712   set_needs_cache_clean(false);
 713   prune_scavenge_root_nmethods();
 714 
 715   verify_icholder_relocations();
 716 }
 717 
 718 void CodeCache::verify_oops() {
 719   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 720   VerifyOopClosure voc;
 721   NMethodIterator iter;
 722   while(iter.next_alive()) {
 723     nmethod* nm = iter.method();
 724     nm->oops_do(&voc);
 725     nm->verify_oop_relocations();
 726   }
 727 }
 728 
 729 size_t CodeCache::capacity() {
 730   size_t cap = 0;
 731   FOR_ALL_HEAPS(heap) {
 732     cap += (*heap)->capacity();
 733   }
 734   return cap;
 735 }
 736 
 737 size_t CodeCache::unallocated_capacity(int code_blob_type) {
 738   CodeHeap* heap = get_code_heap(code_blob_type);
 739   return (heap != NULL) ? heap->unallocated_capacity() : 0;
 740 }
 741 
 742 size_t CodeCache::unallocated_capacity() {
 743   size_t unallocated_cap = 0;
 744   FOR_ALL_HEAPS(heap) {
 745     unallocated_cap += (*heap)->unallocated_capacity();
 746   }
 747   return unallocated_cap;
 748 }
 749 
 750 size_t CodeCache::max_capacity() {
 751   size_t max_cap = 0;
 752   FOR_ALL_HEAPS(heap) {
 753     max_cap += (*heap)->max_capacity();
 754   }
 755   return max_cap;
 756 }
 757 
 758 /**
 759  * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
 760  */
 761 bool CodeCache::is_full(int* code_blob_type) {
 762   FOR_ALL_HEAPS(heap) {
 763     if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
 764       *code_blob_type = (*heap)->code_blob_type();
 765       return true;
 766     }
 767   }
 768   return false;
 769 }
 770 
 771 /**
 772  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
 773  * is free, reverse_free_ratio() returns 4.
 774  */
 775 double CodeCache::reverse_free_ratio(int code_blob_type) {
 776   CodeHeap* heap = get_code_heap(code_blob_type);
 777   if (heap == NULL) {
 778     return 0;
 779   }
 780   double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
 781   double max_capacity = (double)heap->max_capacity();
 782   return max_capacity / unallocated_capacity;
 783 }
 784 
 785 size_t CodeCache::bytes_allocated_in_freelists() {
 786   size_t allocated_bytes = 0;
 787   FOR_ALL_HEAPS(heap) {
 788     allocated_bytes += (*heap)->allocated_in_freelist();
 789   }
 790   return allocated_bytes;
 791 }
 792 
 793 int CodeCache::allocated_segments() {
 794   int number_of_segments = 0;
 795   FOR_ALL_HEAPS(heap) {
 796     number_of_segments += (*heap)->allocated_segments();
 797   }
 798   return number_of_segments;
 799 }
 800 
 801 size_t CodeCache::freelists_length() {
 802   size_t length = 0;
 803   FOR_ALL_HEAPS(heap) {
 804     length += (*heap)->freelist_length();
 805   }
 806   return length;
 807 }
 808 
 809 void icache_init();
 810 
 811 void CodeCache::initialize() {
 812   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 813 #ifdef COMPILER2
 814   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 815 #endif
 816   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 817   // This was originally just a check of the alignment, causing failure, instead, round
 818   // the code cache to the page size.  In particular, Solaris is moving to a larger
 819   // default page size.
 820   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 821 
 822   if (SegmentedCodeCache) {
 823     // Use multiple code heaps
 824     initialize_heaps();
 825   } else {
 826     // Use a single code heap
 827     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
 828     add_heap(rs, "CodeCache", InitialCodeCacheSize, CodeBlobType::All);
 829   }
 830 
 831   // Initialize ICache flush mechanism
 832   // This service is needed for os::register_code_area
 833   icache_init();
 834 
 835   // Give OS a chance to register generated code area.
 836   // This is used on Windows 64 bit platforms to register
 837   // Structured Exception Handlers for our generated code.
 838   os::register_code_area((char*)low_bound(), (char*)high_bound());
 839 }
 840 
 841 void codeCache_init() {
 842   CodeCache::initialize();
 843 }
 844 
 845 //------------------------------------------------------------------------------------------------
 846 
 847 int CodeCache::number_of_nmethods_with_dependencies() {
 848   return _number_of_nmethods_with_dependencies;
 849 }
 850 
 851 void CodeCache::clear_inline_caches() {
 852   assert_locked_or_safepoint(CodeCache_lock);
 853   NMethodIterator iter;
 854   while(iter.next_alive()) {
 855     iter.method()->clear_inline_caches();
 856   }
 857 }
 858 
 859 // Keeps track of time spent for checking dependencies
 860 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
 861 
 862 int CodeCache::mark_for_deoptimization(DepChange& changes) {
 863   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 864   int number_of_marked_CodeBlobs = 0;
 865 
 866   // search the hierarchy looking for nmethods which are affected by the loading of this class
 867 
 868   // then search the interfaces this class implements looking for nmethods
 869   // which might be dependent of the fact that an interface only had one
 870   // implementor.
 871   // nmethod::check_all_dependencies works only correctly, if no safepoint
 872   // can happen
 873   No_Safepoint_Verifier nsv;
 874   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
 875     Klass* d = str.klass();
 876     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
 877   }
 878 
 879 #ifndef PRODUCT
 880   if (VerifyDependencies) {
 881     // Object pointers are used as unique identifiers for dependency arguments. This
 882     // is only possible if no safepoint, i.e., GC occurs during the verification code.
 883     dependentCheckTime.start();
 884     nmethod::check_all_dependencies(changes);
 885     dependentCheckTime.stop();
 886   }
 887 #endif
 888 
 889   return number_of_marked_CodeBlobs;
 890 }
 891 
 892 
 893 #ifdef HOTSWAP
 894 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
 895   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 896   int number_of_marked_CodeBlobs = 0;
 897 
 898   // Deoptimize all methods of the evolving class itself
 899   Array<Method*>* old_methods = dependee->methods();
 900   for (int i = 0; i < old_methods->length(); i++) {
 901     ResourceMark rm;
 902     Method* old_method = old_methods->at(i);
 903     nmethod *nm = old_method->code();
 904     if (nm != NULL) {
 905       nm->mark_for_deoptimization();
 906       number_of_marked_CodeBlobs++;
 907     }
 908   }
 909 
 910   NMethodIterator iter;
 911   while(iter.next_alive()) {
 912     nmethod* nm = iter.method();
 913     if (nm->is_marked_for_deoptimization()) {
 914       // ...Already marked in the previous pass; don't count it again.
 915     } else if (nm->is_evol_dependent_on(dependee())) {
 916       ResourceMark rm;
 917       nm->mark_for_deoptimization();
 918       number_of_marked_CodeBlobs++;
 919     } else  {
 920       // flush caches in case they refer to a redefined Method*
 921       nm->clear_inline_caches();
 922     }
 923   }
 924 
 925   return number_of_marked_CodeBlobs;
 926 }
 927 #endif // HOTSWAP
 928 
 929 
 930 // Deoptimize all methods
 931 void CodeCache::mark_all_nmethods_for_deoptimization() {
 932   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 933   NMethodIterator iter;
 934   while(iter.next_alive()) {
 935     nmethod* nm = iter.method();
 936     if (!nm->method()->is_method_handle_intrinsic()) {
 937       nm->mark_for_deoptimization();
 938     }
 939   }
 940 }
 941 
 942 int CodeCache::mark_for_deoptimization(Method* dependee) {
 943   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 944   int number_of_marked_CodeBlobs = 0;
 945 
 946   NMethodIterator iter;
 947   while(iter.next_alive()) {
 948     nmethod* nm = iter.method();
 949     if (nm->is_dependent_on_method(dependee)) {
 950       ResourceMark rm;
 951       nm->mark_for_deoptimization();
 952       number_of_marked_CodeBlobs++;
 953     }
 954   }
 955 
 956   return number_of_marked_CodeBlobs;
 957 }
 958 
 959 void CodeCache::make_marked_nmethods_zombies() {
 960   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 961   NMethodIterator iter;
 962   while(iter.next_alive()) {
 963     nmethod* nm = iter.method();
 964     if (nm->is_marked_for_deoptimization()) {
 965 
 966       // If the nmethod has already been made non-entrant and it can be converted
 967       // then zombie it now. Otherwise make it non-entrant and it will eventually
 968       // be zombied when it is no longer seen on the stack. Note that the nmethod
 969       // might be "entrant" and not on the stack and so could be zombied immediately
 970       // but we can't tell because we don't track it on stack until it becomes
 971       // non-entrant.
 972 
 973       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 974         nm->make_zombie();
 975       } else {
 976         nm->make_not_entrant();
 977       }
 978     }
 979   }
 980 }
 981 
 982 void CodeCache::make_marked_nmethods_not_entrant() {
 983   assert_locked_or_safepoint(CodeCache_lock);
 984   NMethodIterator iter;
 985   while(iter.next_alive()) {
 986     nmethod* nm = iter.method();
 987     if (nm->is_marked_for_deoptimization()) {
 988       nm->make_not_entrant();
 989     }
 990   }
 991 }
 992 
 993 void CodeCache::verify() {
 994   assert_locked_or_safepoint(CodeCache_lock);
 995   FOR_ALL_HEAPS(heap) {
 996     (*heap)->verify();
 997     FOR_ALL_BLOBS(cb, *heap) {
 998       if (cb->is_alive()) {
 999         cb->verify();
1000       }
1001     }
1002   }
1003 }
1004 
1005 // A CodeHeap is full. Print out warning and report event.
1006 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1007   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1008   CodeHeap* heap = get_code_heap(code_blob_type);
1009   assert(heap != NULL, "heap is null");
1010 
1011   if (!heap->was_full() || print) {
1012     // Not yet reported for this heap, report
1013     heap->report_full();
1014     if (SegmentedCodeCache) {
1015       warning("%s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
1016       warning("Try increasing the code heap size using -XX:%s=",
1017           (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
1018     } else {
1019       warning("CodeCache is full. Compiler has been disabled.");
1020       warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
1021     }
1022     ResourceMark rm;
1023     stringStream s;
1024     // Dump code cache  into a buffer before locking the tty,
1025     {
1026       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1027       print_summary(&s);
1028     }
1029     ttyLocker ttyl;
1030     tty->print("%s", s.as_string());
1031   }
1032 
1033   _codemem_full_count++;
1034   EventCodeCacheFull event;
1035   if (event.should_commit()) {
1036     event.set_codeBlobType((u1)code_blob_type);
1037     event.set_startAddress((u8)heap->low_boundary());
1038     event.set_commitedTopAddress((u8)heap->high());
1039     event.set_reservedTopAddress((u8)heap->high_boundary());
1040     event.set_entryCount(nof_blobs());
1041     event.set_methodCount(nof_nmethods());
1042     event.set_adaptorCount(nof_adapters());
1043     event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
1044     event.set_fullCount(_codemem_full_count);
1045     event.commit();
1046   }
1047 }
1048 
1049 void CodeCache::print_memory_overhead() {
1050   size_t wasted_bytes = 0;
1051   FOR_ALL_HEAPS(heap) {
1052       CodeHeap* curr_heap = *heap;
1053       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1054         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1055         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1056       }
1057   }
1058   // Print bytes that are allocated in the freelist
1059   ttyLocker ttl;
1060   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1061   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1062   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1063   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1064 }
1065 
1066 //------------------------------------------------------------------------------------------------
1067 // Non-product version
1068 
1069 #ifndef PRODUCT
1070 
1071 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1072   if (PrintCodeCache2) {  // Need to add a new flag
1073     ResourceMark rm;
1074     if (size == 0)  size = cb->size();
1075     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1076   }
1077 }
1078 
1079 void CodeCache::print_internals() {
1080   int nmethodCount = 0;
1081   int runtimeStubCount = 0;
1082   int adapterCount = 0;
1083   int deoptimizationStubCount = 0;
1084   int uncommonTrapStubCount = 0;
1085   int bufferBlobCount = 0;
1086   int total = 0;
1087   int nmethodAlive = 0;
1088   int nmethodNotEntrant = 0;
1089   int nmethodZombie = 0;
1090   int nmethodUnloaded = 0;
1091   int nmethodJava = 0;
1092   int nmethodNative = 0;
1093   int max_nm_size = 0;
1094   ResourceMark rm;
1095 
1096   int i = 0;
1097   FOR_ALL_HEAPS(heap) {
1098     if (SegmentedCodeCache && Verbose) {
1099       tty->print_cr("-- %s --", (*heap)->name());
1100     }
1101     FOR_ALL_BLOBS(cb, *heap) {
1102       total++;
1103       if (cb->is_nmethod()) {
1104         nmethod* nm = (nmethod*)cb;
1105 
1106         if (Verbose && nm->method() != NULL) {
1107           ResourceMark rm;
1108           char *method_name = nm->method()->name_and_sig_as_C_string();
1109           tty->print("%s", method_name);
1110           if(nm->is_alive()) { tty->print_cr(" alive"); }
1111           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1112           if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1113         }
1114 
1115         nmethodCount++;
1116 
1117         if(nm->is_alive()) { nmethodAlive++; }
1118         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1119         if(nm->is_zombie()) { nmethodZombie++; }
1120         if(nm->is_unloaded()) { nmethodUnloaded++; }
1121         if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1122 
1123         if(nm->method() != NULL && nm->is_java_method()) {
1124           nmethodJava++;
1125           max_nm_size = MAX2(max_nm_size, nm->size());
1126         }
1127       } else if (cb->is_runtime_stub()) {
1128         runtimeStubCount++;
1129       } else if (cb->is_deoptimization_stub()) {
1130         deoptimizationStubCount++;
1131       } else if (cb->is_uncommon_trap_stub()) {
1132         uncommonTrapStubCount++;
1133       } else if (cb->is_adapter_blob()) {
1134         adapterCount++;
1135       } else if (cb->is_buffer_blob()) {
1136         bufferBlobCount++;
1137       }
1138     }
1139   }
1140 
1141   int bucketSize = 512;
1142   int bucketLimit = max_nm_size / bucketSize + 1;
1143   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1144   memset(buckets, 0, sizeof(int) * bucketLimit);
1145 
1146   NMethodIterator iter;
1147   while(iter.next()) {
1148     nmethod* nm = iter.method();
1149     if(nm->method() != NULL && nm->is_java_method()) {
1150       buckets[nm->size() / bucketSize]++;
1151     }
1152   }
1153 
1154   tty->print_cr("Code Cache Entries (total of %d)",total);
1155   tty->print_cr("-------------------------------------------------");
1156   tty->print_cr("nmethods: %d",nmethodCount);
1157   tty->print_cr("\talive: %d",nmethodAlive);
1158   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1159   tty->print_cr("\tzombie: %d",nmethodZombie);
1160   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1161   tty->print_cr("\tjava: %d",nmethodJava);
1162   tty->print_cr("\tnative: %d",nmethodNative);
1163   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1164   tty->print_cr("adapters: %d",adapterCount);
1165   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1166   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1167   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1168   tty->print_cr("\nnmethod size distribution (non-zombie java)");
1169   tty->print_cr("-------------------------------------------------");
1170 
1171   for(int i=0; i<bucketLimit; i++) {
1172     if(buckets[i] != 0) {
1173       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1174       tty->fill_to(40);
1175       tty->print_cr("%d",buckets[i]);
1176     }
1177   }
1178 
1179   FREE_C_HEAP_ARRAY(int, buckets, mtCode);
1180   print_memory_overhead();
1181 }
1182 
1183 #endif // !PRODUCT
1184 
1185 void CodeCache::print() {
1186   print_summary(tty);
1187 
1188 #ifndef PRODUCT
1189   if (!Verbose) return;
1190 
1191   CodeBlob_sizes live;
1192   CodeBlob_sizes dead;
1193 
1194   FOR_ALL_HEAPS(heap) {
1195     FOR_ALL_BLOBS(cb, *heap) {
1196       if (!cb->is_alive()) {
1197         dead.add(cb);
1198       } else {
1199         live.add(cb);
1200       }
1201     }
1202   }
1203 
1204   tty->print_cr("CodeCache:");
1205   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1206 
1207   if (!live.is_empty()) {
1208     live.print("live");
1209   }
1210   if (!dead.is_empty()) {
1211     dead.print("dead");
1212   }
1213 
1214   if (WizardMode) {
1215      // print the oop_map usage
1216     int code_size = 0;
1217     int number_of_blobs = 0;
1218     int number_of_oop_maps = 0;
1219     int map_size = 0;
1220     FOR_ALL_HEAPS(heap) {
1221       FOR_ALL_BLOBS(cb, *heap) {
1222         if (cb->is_alive()) {
1223           number_of_blobs++;
1224           code_size += cb->code_size();
1225           OopMapSet* set = cb->oop_maps();
1226           if (set != NULL) {
1227             number_of_oop_maps += set->size();
1228             map_size           += set->heap_size();
1229           }
1230         }
1231       }
1232     }
1233     tty->print_cr("OopMaps");
1234     tty->print_cr("  #blobs    = %d", number_of_blobs);
1235     tty->print_cr("  code size = %d", code_size);
1236     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1237     tty->print_cr("  map size  = %d", map_size);
1238   }
1239 
1240 #endif // !PRODUCT
1241 }
1242 
1243 void CodeCache::print_summary(outputStream* st, bool detailed) {
1244   FOR_ALL_HEAPS(heap_iterator) {
1245     CodeHeap* heap = (*heap_iterator);
1246     size_t total = (heap->high_boundary() - heap->low_boundary());
1247     if (SegmentedCodeCache) {
1248       st->print("%s:", heap->name());
1249     } else {
1250       st->print("CodeCache:");
1251     }
1252     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1253                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1254                  total/K, (total - heap->unallocated_capacity())/K,
1255                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1256 
1257     if (detailed) {
1258       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1259                    p2i(heap->low_boundary()),
1260                    p2i(heap->high()),
1261                    p2i(heap->high_boundary()));
1262     }
1263   }
1264 
1265   if (detailed) {
1266     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1267                        " adapters=" UINT32_FORMAT,
1268                        nof_blobs(), nof_nmethods(), nof_adapters());
1269     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1270                  "enabled" : Arguments::mode() == Arguments::_int ?
1271                  "disabled (interpreter mode)" :
1272                  "disabled (not enough contiguous free space left)");
1273   }
1274 }
1275 
1276 void CodeCache::print_codelist(outputStream* st) {
1277   assert_locked_or_safepoint(CodeCache_lock);
1278 
1279   NMethodIterator iter;
1280   while(iter.next_alive()) {
1281     nmethod* nm = iter.method();
1282     ResourceMark rm;
1283     char *method_name = nm->method()->name_and_sig_as_C_string();
1284     st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]",
1285                  nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
1286                  (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1287   }
1288 }
1289 
1290 void CodeCache::print_layout(outputStream* st) {
1291   assert_locked_or_safepoint(CodeCache_lock);
1292   ResourceMark rm;
1293 
1294   print_summary(st, true);
1295 }
1296 
1297 void CodeCache::log_state(outputStream* st) {
1298   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1299             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1300             nof_blobs(), nof_nmethods(), nof_adapters(),
1301             unallocated_capacity());
1302 }