1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/compiledIC.hpp"
  30 #include "code/dependencies.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "code/nmethod.hpp"
  33 #include "code/pcDesc.hpp"
  34 #include "compiler/compileBroker.hpp"
  35 #include "gc/shared/gcLocker.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/objArrayOop.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "oops/verifyOopClosure.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/compilationPolicy.hpp"
  45 #include "runtime/deoptimization.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/icache.hpp"
  48 #include "runtime/java.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "runtime/sweeper.hpp"
  51 #include "services/memoryService.hpp"
  52 #include "trace/tracing.hpp"
  53 #include "utilities/xmlstream.hpp"
  54 #ifdef COMPILER1
  55 #include "c1/c1_Compilation.hpp"
  56 #include "c1/c1_Compiler.hpp"
  57 #endif
  58 #ifdef COMPILER2
  59 #include "opto/c2compiler.hpp"
  60 #include "opto/compile.hpp"
  61 #include "opto/node.hpp"
  62 #endif
  63 
  64 // Helper class for printing in CodeCache
  65 class CodeBlob_sizes {
  66  private:
  67   int count;
  68   int total_size;
  69   int header_size;
  70   int code_size;
  71   int stub_size;
  72   int relocation_size;
  73   int scopes_oop_size;
  74   int scopes_metadata_size;
  75   int scopes_data_size;
  76   int scopes_pcs_size;
  77 
  78  public:
  79   CodeBlob_sizes() {
  80     count            = 0;
  81     total_size       = 0;
  82     header_size      = 0;
  83     code_size        = 0;
  84     stub_size        = 0;
  85     relocation_size  = 0;
  86     scopes_oop_size  = 0;
  87     scopes_metadata_size  = 0;
  88     scopes_data_size = 0;
  89     scopes_pcs_size  = 0;
  90   }
  91 
  92   int total()                                    { return total_size; }
  93   bool is_empty()                                { return count == 0; }
  94 
  95   void print(const char* title) {
  96     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
  97                   count,
  98                   title,
  99                   (int)(total() / K),
 100                   header_size             * 100 / total_size,
 101                   relocation_size         * 100 / total_size,
 102                   code_size               * 100 / total_size,
 103                   stub_size               * 100 / total_size,
 104                   scopes_oop_size         * 100 / total_size,
 105                   scopes_metadata_size    * 100 / total_size,
 106                   scopes_data_size        * 100 / total_size,
 107                   scopes_pcs_size         * 100 / total_size);
 108   }
 109 
 110   void add(CodeBlob* cb) {
 111     count++;
 112     total_size       += cb->size();
 113     header_size      += cb->header_size();
 114     relocation_size  += cb->relocation_size();
 115     if (cb->is_nmethod()) {
 116       nmethod* nm = cb->as_nmethod_or_null();
 117       code_size        += nm->insts_size();
 118       stub_size        += nm->stub_size();
 119 
 120       scopes_oop_size  += nm->oops_size();
 121       scopes_metadata_size  += nm->metadata_size();
 122       scopes_data_size += nm->scopes_data_size();
 123       scopes_pcs_size  += nm->scopes_pcs_size();
 124     } else {
 125       code_size        += cb->code_size();
 126     }
 127   }
 128 };
 129 
 130 // Iterate over all CodeHeaps
 131 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 132 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 133 
 134 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 135 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
 136 
 137 address CodeCache::_low_bound = 0;
 138 address CodeCache::_high_bound = 0;
 139 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 140 bool CodeCache::_needs_cache_clean = false;
 141 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 142 
 143 // Initialize array of CodeHeaps
 144 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 145 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 146 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 147 
 148 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 149   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 150   // Prepare error message
 151   const char* error = "Invalid code heap sizes";
 152   err_msg message("NonNMethodCodeHeapSize (%zuK) + ProfiledCodeHeapSize (%zuK) + NonProfiledCodeHeapSize (%zuK) = %zuK",
 153           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 154 
 155   if (total_size > cache_size) {
 156     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 157     message.append(" is greater than ReservedCodeCacheSize (%zuK).", cache_size/K);
 158     vm_exit_during_initialization(error, message);
 159   } else if (all_set && total_size != cache_size) {
 160     // All code heap sizes were explicitly set: total_size must equal cache_size
 161     message.append(" is not equal to ReservedCodeCacheSize (%zuK).", cache_size/K);
 162     vm_exit_during_initialization(error, message);
 163   }
 164 }
 165 
 166 void CodeCache::initialize_heaps() {
 167   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 168   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 169   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 170   size_t min_size           = os::vm_page_size();
 171   size_t cache_size         = ReservedCodeCacheSize;
 172   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 173   size_t profiled_size      = ProfiledCodeHeapSize;
 174   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 175   // Check if total size set via command line flags exceeds the reserved size
 176   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 177                    (profiled_set     ? profiled_size     : min_size),
 178                    (non_profiled_set ? non_profiled_size : min_size),
 179                    cache_size,
 180                    non_nmethod_set && profiled_set && non_profiled_set);
 181 
 182   // Determine size of compiler buffers
 183   size_t code_buffers_size = 0;
 184 #ifdef COMPILER1
 185   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 186   const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
 187   code_buffers_size += c1_count * Compiler::code_buffer_size();
 188 #endif
 189 #ifdef COMPILER2
 190   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 191   const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
 192   // Initial size of constant table (this may be increased if a compiled method needs more space)
 193   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 194 #endif
 195 
 196   // Increase default non_nmethod_size to account for compiler buffers
 197   if (!non_nmethod_set) {
 198     non_nmethod_size += code_buffers_size;
 199   }
 200   // Calculate default CodeHeap sizes if not set by user
 201   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 202     // Check if we have enough space for the non-nmethod code heap
 203     if (cache_size > non_nmethod_size) {
 204       // Use the default value for non_nmethod_size and one half of the
 205       // remaining size for non-profiled and one half for profiled methods
 206       size_t remaining_size = cache_size - non_nmethod_size;
 207       profiled_size = remaining_size / 2;
 208       non_profiled_size = remaining_size - profiled_size;
 209     } else {
 210       // Use all space for the non-nmethod heap and set other heaps to minimal size
 211       non_nmethod_size = cache_size - 2 * min_size;
 212       profiled_size = min_size;
 213       non_profiled_size = min_size;
 214     }
 215   } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
 216     // The user explicitly set some code heap sizes. Increase or decrease the (default)
 217     // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
 218     // code heap sizes and then only change non-nmethod code heap size if still necessary.
 219     intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
 220     if (non_profiled_set) {
 221       if (!profiled_set) {
 222         // Adapt size of profiled code heap
 223         if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
 224           // Not enough space available, set to minimum size
 225           diff_size += profiled_size - min_size;
 226           profiled_size = min_size;
 227         } else {
 228           profiled_size += diff_size;
 229           diff_size = 0;
 230         }
 231       }
 232     } else if (profiled_set) {
 233       // Adapt size of non-profiled code heap
 234       if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
 235         // Not enough space available, set to minimum size
 236         diff_size += non_profiled_size - min_size;
 237         non_profiled_size = min_size;
 238       } else {
 239         non_profiled_size += diff_size;
 240         diff_size = 0;
 241       }
 242     } else if (non_nmethod_set) {
 243       // Distribute remaining size between profiled and non-profiled code heaps
 244       diff_size = cache_size - non_nmethod_size;
 245       profiled_size = diff_size / 2;
 246       non_profiled_size = diff_size - profiled_size;
 247       diff_size = 0;
 248     }
 249     if (diff_size != 0) {
 250       // Use non-nmethod code heap for remaining space requirements
 251       assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
 252       non_nmethod_size += diff_size;
 253     }
 254   }
 255 
 256   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 257   if(!heap_available(CodeBlobType::MethodProfiled)) {
 258     non_profiled_size += profiled_size;
 259     profiled_size = 0;
 260   }
 261   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 262   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 263     non_nmethod_size += non_profiled_size;
 264     non_profiled_size = 0;
 265   }
 266   // Make sure we have enough space for VM internal code
 267   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 268   if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
 269     vm_exit_during_initialization(err_msg(
 270         "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
 271         non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
 272   }
 273 
 274   // Verify sizes and update flag values
 275   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 276   FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
 277   FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
 278   FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
 279 
 280   // Align CodeHeaps
 281   size_t alignment = heap_alignment();
 282   non_nmethod_size = align_size_up(non_nmethod_size, alignment);
 283   profiled_size   = align_size_down(profiled_size, alignment);
 284 
 285   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 286   // parts for the individual heaps. The memory layout looks like this:
 287   // ---------- high -----------
 288   //    Non-profiled nmethods
 289   //      Profiled nmethods
 290   //         Non-nmethods
 291   // ---------- low ------------
 292   ReservedCodeSpace rs = reserve_heap_memory(cache_size);
 293   ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
 294   ReservedSpace rest                = rs.last_part(non_nmethod_size);
 295   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 296   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 297 
 298   // Non-nmethods (stubs, adapters, ...)
 299   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 300   // Tier 2 and tier 3 (profiled) methods
 301   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 302   // Tier 1 and tier 4 (non-profiled) methods and native methods
 303   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 304 }
 305 
 306 size_t CodeCache::heap_alignment() {
 307   // If large page support is enabled, align code heaps according to large
 308   // page size to make sure that code cache is covered by large pages.
 309   const size_t page_size = os::can_execute_large_page_memory() ?
 310              os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) :
 311              os::vm_page_size();
 312   return MAX2(page_size, (size_t) os::vm_allocation_granularity());
 313 }
 314 
 315 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 316   // Determine alignment
 317   const size_t page_size = os::can_execute_large_page_memory() ?
 318           MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
 319                os::page_size_for_region_aligned(size, 8)) :
 320           os::vm_page_size();
 321   const size_t granularity = os::vm_allocation_granularity();
 322   const size_t r_align = MAX2(page_size, granularity);
 323   const size_t r_size = align_size_up(size, r_align);
 324   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 325     MAX2(page_size, granularity);
 326 
 327   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 328 
 329   if (!rs.is_reserved()) {
 330     vm_exit_during_initialization("Could not reserve enough space for code cache");
 331   }
 332 
 333   // Initialize bounds
 334   _low_bound = (address)rs.base();
 335   _high_bound = _low_bound + rs.size();
 336 
 337   return rs;
 338 }
 339 
 340 bool CodeCache::heap_available(int code_blob_type) {
 341   if (!SegmentedCodeCache) {
 342     // No segmentation: use a single code heap
 343     return (code_blob_type == CodeBlobType::All);
 344   } else if (Arguments::is_interpreter_only()) {
 345     // Interpreter only: we don't need any method code heaps
 346     return (code_blob_type == CodeBlobType::NonNMethod);
 347   } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
 348     // Tiered compilation: use all code heaps
 349     return (code_blob_type < CodeBlobType::All);
 350   } else {
 351     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 352     return (code_blob_type == CodeBlobType::NonNMethod) ||
 353            (code_blob_type == CodeBlobType::MethodNonProfiled);
 354   }
 355 }
 356 
 357 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
 358   switch(code_blob_type) {
 359   case CodeBlobType::NonNMethod:
 360     return "NonNMethodCodeHeapSize";
 361     break;
 362   case CodeBlobType::MethodNonProfiled:
 363     return "NonProfiledCodeHeapSize";
 364     break;
 365   case CodeBlobType::MethodProfiled:
 366     return "ProfiledCodeHeapSize";
 367     break;
 368   }
 369   ShouldNotReachHere();
 370   return NULL;
 371 }
 372 
 373 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 374   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 375     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 376   } else {
 377     return lhs->code_blob_type() - rhs->code_blob_type();
 378   }
 379 }
 380 
 381 void CodeCache::add_heap(CodeHeap* heap) {
 382   assert(!Universe::is_fully_initialized(), "late heap addition?");
 383 
 384   _heaps->insert_sorted<code_heap_compare>(heap);
 385 
 386   int type = heap->code_blob_type();
 387   if (code_blob_type_accepts_compiled(type)) {
 388     _compiled_heaps->insert_sorted<code_heap_compare>(heap);
 389   }
 390   if (code_blob_type_accepts_nmethod(type)) {
 391     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 392   }
 393 }
 394 
 395 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
 396   // Check if heap is needed
 397   if (!heap_available(code_blob_type)) {
 398     return;
 399   }
 400 
 401   // Create CodeHeap
 402   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 403   add_heap(heap);
 404 
 405   // Reserve Space
 406   size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
 407   size_initial = round_to(size_initial, os::vm_page_size());
 408   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 409     vm_exit_during_initialization("Could not reserve enough space for code cache");
 410   }
 411 
 412   // Register the CodeHeap
 413   MemoryService::add_code_heap_memory_pool(heap, name);
 414 }
 415 
 416 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
 417   assert(cb != NULL, "CodeBlob is null");
 418   FOR_ALL_HEAPS(heap) {
 419     if ((*heap)->contains(cb->code_begin())) {
 420       return *heap;
 421     }
 422   }
 423   ShouldNotReachHere();
 424   return NULL;
 425 }
 426 
 427 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
 428   FOR_ALL_HEAPS(heap) {
 429     if ((*heap)->accepts(code_blob_type)) {
 430       return *heap;
 431     }
 432   }
 433   return NULL;
 434 }
 435 
 436 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 437   assert_locked_or_safepoint(CodeCache_lock);
 438   assert(heap != NULL, "heap is null");
 439   return (CodeBlob*)heap->first();
 440 }
 441 
 442 CodeBlob* CodeCache::first_blob(int code_blob_type) {
 443   if (heap_available(code_blob_type)) {
 444     return first_blob(get_code_heap(code_blob_type));
 445   } else {
 446     return NULL;
 447   }
 448 }
 449 
 450 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 451   assert_locked_or_safepoint(CodeCache_lock);
 452   assert(heap != NULL, "heap is null");
 453   return (CodeBlob*)heap->next(cb);
 454 }
 455 
 456 /**
 457  * Do not seize the CodeCache lock here--if the caller has not
 458  * already done so, we are going to lose bigtime, since the code
 459  * cache will contain a garbage CodeBlob until the caller can
 460  * run the constructor for the CodeBlob subclass he is busy
 461  * instantiating.
 462  */
 463 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
 464   // Possibly wakes up the sweeper thread.
 465   NMethodSweeper::notify(code_blob_type);
 466   assert_locked_or_safepoint(CodeCache_lock);
 467   assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
 468   if (size <= 0) {
 469     return NULL;
 470   }
 471   CodeBlob* cb = NULL;
 472 
 473   // Get CodeHeap for the given CodeBlobType
 474   CodeHeap* heap = get_code_heap(code_blob_type);
 475   assert(heap != NULL, "heap is null");
 476 
 477   while (true) {
 478     cb = (CodeBlob*)heap->allocate(size);
 479     if (cb != NULL) break;
 480     if (!heap->expand_by(CodeCacheExpansionSize)) {
 481       // Save original type for error reporting
 482       if (orig_code_blob_type == CodeBlobType::All) {
 483         orig_code_blob_type = code_blob_type;
 484       }
 485       // Expansion failed
 486       if (SegmentedCodeCache) {
 487         // Fallback solution: Try to store code in another code heap.
 488         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 489         // Note that in the sweeper, we check the reverse_free_ratio of the code heap
 490         // and force stack scanning if less than 10% of the code heap are free.
 491         int type = code_blob_type;
 492         switch (type) {
 493         case CodeBlobType::NonNMethod:
 494           type = CodeBlobType::MethodNonProfiled;
 495           break;
 496         case CodeBlobType::MethodNonProfiled:
 497           type = CodeBlobType::MethodProfiled;
 498           break;
 499         case CodeBlobType::MethodProfiled:
 500           // Avoid loop if we already tried that code heap
 501           if (type == orig_code_blob_type) {
 502             type = CodeBlobType::MethodNonProfiled;
 503           }
 504           break;
 505         }
 506         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 507           if (PrintCodeCacheExtension) {
 508             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 509                           heap->name(), get_code_heap(type)->name());
 510           }
 511           return allocate(size, type, orig_code_blob_type);
 512         }
 513       }
 514       MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 515       CompileBroker::handle_full_code_cache(orig_code_blob_type);
 516       return NULL;
 517     }
 518     if (PrintCodeCacheExtension) {
 519       ResourceMark rm;
 520       if (_nmethod_heaps->length() >= 1) {
 521         tty->print("%s", heap->name());
 522       } else {
 523         tty->print("CodeCache");
 524       }
 525       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 526                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 527                     (address)heap->high() - (address)heap->low_boundary());
 528     }
 529   }
 530   print_trace("allocation", cb, size);
 531   return cb;
 532 }
 533 
 534 void CodeCache::free(CodeBlob* cb) {
 535   assert_locked_or_safepoint(CodeCache_lock);
 536   CodeHeap* heap = get_code_heap(cb);
 537   print_trace("free", cb);
 538   if (cb->is_nmethod()) {
 539     heap->set_nmethod_count(heap->nmethod_count() - 1);
 540     if (((nmethod *)cb)->has_dependencies()) {
 541       _number_of_nmethods_with_dependencies--;
 542     }
 543   }
 544   if (cb->is_adapter_blob()) {
 545     heap->set_adapter_count(heap->adapter_count() - 1);
 546   }
 547 
 548   // Get heap for given CodeBlob and deallocate
 549   get_code_heap(cb)->deallocate(cb);
 550 
 551   assert(heap->blob_count() >= 0, "sanity check");
 552 }
 553 
 554 void CodeCache::commit(CodeBlob* cb) {
 555   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 556   assert_locked_or_safepoint(CodeCache_lock);
 557   CodeHeap* heap = get_code_heap(cb);
 558   if (cb->is_nmethod()) {
 559     heap->set_nmethod_count(heap->nmethod_count() + 1);
 560     if (((nmethod *)cb)->has_dependencies()) {
 561       _number_of_nmethods_with_dependencies++;
 562     }
 563   }
 564   if (cb->is_adapter_blob()) {
 565     heap->set_adapter_count(heap->adapter_count() + 1);
 566   }
 567 
 568   // flush the hardware I-cache
 569   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 570 }
 571 
 572 bool CodeCache::contains(void *p) {
 573   // S390 uses contains() in current_frame(), which is used before
 574   // code cache initialization if NativeMemoryTracking=detail is set.
 575   S390_ONLY(if (_heaps == NULL) return false;)
 576   // It should be ok to call contains without holding a lock.
 577   FOR_ALL_HEAPS(heap) {
 578     if ((*heap)->contains(p)) {
 579       return true;
 580     }
 581   }
 582   return false;
 583 }
 584 
 585 bool CodeCache::contains(nmethod *nm) {
 586   return contains((void *)nm);
 587 }
 588 
 589 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
 590 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 591 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 592 CodeBlob* CodeCache::find_blob(void* start) {
 593   CodeBlob* result = find_blob_unsafe(start);
 594   // We could potentially look up non_entrant methods
 595   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 596   return result;
 597 }
 598 
 599 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 600 // what you are doing)
 601 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 602   // NMT can walk the stack before code cache is created
 603   if (_heaps != NULL && !_heaps->is_empty()) {
 604     FOR_ALL_HEAPS(heap) {
 605       CodeBlob* result = (*heap)->find_blob_unsafe(start);
 606       if (result != NULL) {
 607         return result;
 608       }
 609     }
 610   }
 611   return NULL;
 612 }
 613 
 614 nmethod* CodeCache::find_nmethod(void* start) {
 615   CodeBlob* cb = find_blob(start);
 616   assert(cb->is_nmethod(), "did not find an nmethod");
 617   return (nmethod*)cb;
 618 }
 619 
 620 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 621   assert_locked_or_safepoint(CodeCache_lock);
 622   FOR_ALL_NMETHOD_HEAPS(heap) {
 623     FOR_ALL_BLOBS(cb, *heap) {
 624       f(cb);
 625     }
 626   }
 627 }
 628 
 629 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 630   assert_locked_or_safepoint(CodeCache_lock);
 631   NMethodIterator iter;
 632   while(iter.next()) {
 633     f(iter.method());
 634   }
 635 }
 636 
 637 void CodeCache::metadata_do(void f(Metadata* m)) {
 638   assert_locked_or_safepoint(CodeCache_lock);
 639   NMethodIterator iter;
 640   while(iter.next_alive()) {
 641     iter.method()->metadata_do(f);
 642   }
 643   AOTLoader::metadata_do(f);
 644 }
 645 
 646 int CodeCache::alignment_unit() {
 647   return (int)_heaps->first()->alignment_unit();
 648 }
 649 
 650 int CodeCache::alignment_offset() {
 651   return (int)_heaps->first()->alignment_offset();
 652 }
 653 
 654 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 655 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 656   assert_locked_or_safepoint(CodeCache_lock);
 657   CompiledMethodIterator iter;
 658   while(iter.next_alive()) {
 659     iter.method()->do_unloading(is_alive, unloading_occurred);
 660   }
 661 }
 662 
 663 void CodeCache::blobs_do(CodeBlobClosure* f) {
 664   assert_locked_or_safepoint(CodeCache_lock);
 665   FOR_ALL_NMETHOD_HEAPS(heap) {
 666     FOR_ALL_BLOBS(cb, *heap) {
 667       if (cb->is_alive()) {
 668         f->do_code_blob(cb);
 669 #ifdef ASSERT
 670         if (cb->is_nmethod())
 671         ((nmethod*)cb)->verify_scavenge_root_oops();
 672 #endif //ASSERT
 673       }
 674     }
 675   }
 676 }
 677 
 678 // Walk the list of methods which might contain non-perm oops.
 679 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
 680   assert_locked_or_safepoint(CodeCache_lock);
 681 
 682   if (UseG1GC) {
 683     return;
 684   }
 685 
 686   const bool fix_relocations = f->fix_relocations();
 687   debug_only(mark_scavenge_root_nmethods());
 688 
 689   nmethod* prev = NULL;
 690   nmethod* cur = scavenge_root_nmethods();
 691   while (cur != NULL) {
 692     debug_only(cur->clear_scavenge_root_marked());
 693     assert(cur->scavenge_root_not_marked(), "");
 694     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 695 
 696     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
 697     if (TraceScavenge) {
 698       cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
 699     }
 700     if (is_live) {
 701       // Perform cur->oops_do(f), maybe just once per nmethod.
 702       f->do_code_blob(cur);
 703     }
 704     nmethod* const next = cur->scavenge_root_link();
 705     // The scavengable nmethod list must contain all methods with scavengable
 706     // oops. It is safe to include more nmethod on the list, but we do not
 707     // expect any live non-scavengable nmethods on the list.
 708     if (fix_relocations) {
 709       if (!is_live || !cur->detect_scavenge_root_oops()) {
 710         unlink_scavenge_root_nmethod(cur, prev);
 711       } else {
 712         prev = cur;
 713       }
 714     }
 715     cur = next;
 716   }
 717 
 718   // Check for stray marks.
 719   debug_only(verify_perm_nmethods(NULL));
 720 }
 721 
 722 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
 723   assert_locked_or_safepoint(CodeCache_lock);
 724 
 725   if (UseG1GC) {
 726     return;
 727   }
 728 
 729   nm->set_on_scavenge_root_list();
 730   nm->set_scavenge_root_link(_scavenge_root_nmethods);
 731   set_scavenge_root_nmethods(nm);
 732   print_trace("add_scavenge_root", nm);
 733 }
 734 
 735 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
 736   assert_locked_or_safepoint(CodeCache_lock);
 737 
 738   assert((prev == NULL && scavenge_root_nmethods() == nm) ||
 739          (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
 740 
 741   assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
 742 
 743   print_trace("unlink_scavenge_root", nm);
 744   if (prev == NULL) {
 745     set_scavenge_root_nmethods(nm->scavenge_root_link());
 746   } else {
 747     prev->set_scavenge_root_link(nm->scavenge_root_link());
 748   }
 749   nm->set_scavenge_root_link(NULL);
 750   nm->clear_on_scavenge_root_list();
 751 }
 752 
 753 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
 754   assert_locked_or_safepoint(CodeCache_lock);
 755 
 756   if (UseG1GC) {
 757     return;
 758   }
 759 
 760   print_trace("drop_scavenge_root", nm);
 761   nmethod* prev = NULL;
 762   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 763     if (cur == nm) {
 764       unlink_scavenge_root_nmethod(cur, prev);
 765       return;
 766     }
 767     prev = cur;
 768   }
 769   assert(false, "should have been on list");
 770 }
 771 
 772 void CodeCache::prune_scavenge_root_nmethods() {
 773   assert_locked_or_safepoint(CodeCache_lock);
 774 
 775   if (UseG1GC) {
 776     return;
 777   }
 778 
 779   debug_only(mark_scavenge_root_nmethods());
 780 
 781   nmethod* last = NULL;
 782   nmethod* cur = scavenge_root_nmethods();
 783   while (cur != NULL) {
 784     nmethod* next = cur->scavenge_root_link();
 785     debug_only(cur->clear_scavenge_root_marked());
 786     assert(cur->scavenge_root_not_marked(), "");
 787     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 788 
 789     if (!cur->is_zombie() && !cur->is_unloaded()
 790         && cur->detect_scavenge_root_oops()) {
 791       // Keep it.  Advance 'last' to prevent deletion.
 792       last = cur;
 793     } else {
 794       // Prune it from the list, so we don't have to look at it any more.
 795       print_trace("prune_scavenge_root", cur);
 796       unlink_scavenge_root_nmethod(cur, last);
 797     }
 798     cur = next;
 799   }
 800 
 801   // Check for stray marks.
 802   debug_only(verify_perm_nmethods(NULL));
 803 }
 804 
 805 #ifndef PRODUCT
 806 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
 807   if (UseG1GC) {
 808     return;
 809   }
 810 
 811   // While we are here, verify the integrity of the list.
 812   mark_scavenge_root_nmethods();
 813   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
 814     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 815     cur->clear_scavenge_root_marked();
 816   }
 817   verify_perm_nmethods(f);
 818 }
 819 
 820 // Temporarily mark nmethods that are claimed to be on the non-perm list.
 821 void CodeCache::mark_scavenge_root_nmethods() {
 822   NMethodIterator iter;
 823   while(iter.next_alive()) {
 824     nmethod* nm = iter.method();
 825     assert(nm->scavenge_root_not_marked(), "clean state");
 826     if (nm->on_scavenge_root_list())
 827       nm->set_scavenge_root_marked();
 828   }
 829 }
 830 
 831 // If the closure is given, run it on the unlisted nmethods.
 832 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
 833 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
 834   NMethodIterator iter;
 835   while(iter.next_alive()) {
 836     nmethod* nm = iter.method();
 837     bool call_f = (f_or_null != NULL);
 838     assert(nm->scavenge_root_not_marked(), "must be already processed");
 839     if (nm->on_scavenge_root_list())
 840       call_f = false;  // don't show this one to the client
 841     nm->verify_scavenge_root_oops();
 842     if (call_f)  f_or_null->do_code_blob(nm);
 843   }
 844 }
 845 #endif //PRODUCT
 846 
 847 void CodeCache::verify_clean_inline_caches() {
 848 #ifdef ASSERT
 849   NMethodIterator iter;
 850   while(iter.next_alive()) {
 851     nmethod* nm = iter.method();
 852     assert(!nm->is_unloaded(), "Tautology");
 853     nm->verify_clean_inline_caches();
 854     nm->verify();
 855   }
 856 #endif
 857 }
 858 
 859 void CodeCache::verify_icholder_relocations() {
 860 #ifdef ASSERT
 861   // make sure that we aren't leaking icholders
 862   int count = 0;
 863   FOR_ALL_HEAPS(heap) {
 864     FOR_ALL_BLOBS(cb, *heap) {
 865       CompiledMethod *nm = cb->as_compiled_method_or_null();
 866       if (nm != NULL) {
 867         count += nm->verify_icholder_relocations();
 868       }
 869     }
 870   }
 871   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 872          CompiledICHolder::live_count(), "must agree");
 873 #endif
 874 }
 875 
 876 void CodeCache::gc_prologue() {
 877 }
 878 
 879 void CodeCache::gc_epilogue() {
 880   assert_locked_or_safepoint(CodeCache_lock);
 881   NOT_DEBUG(if (needs_cache_clean())) {
 882     CompiledMethodIterator iter;
 883     while(iter.next_alive()) {
 884       CompiledMethod* cm = iter.method();
 885       assert(!cm->is_unloaded(), "Tautology");
 886       DEBUG_ONLY(if (needs_cache_clean())) {
 887         cm->cleanup_inline_caches();
 888       }
 889       DEBUG_ONLY(cm->verify());
 890       DEBUG_ONLY(cm->verify_oop_relocations());
 891     }
 892   }
 893 
 894   set_needs_cache_clean(false);
 895   prune_scavenge_root_nmethods();
 896 
 897   verify_icholder_relocations();
 898 }
 899 
 900 void CodeCache::verify_oops() {
 901   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 902   VerifyOopClosure voc;
 903   NMethodIterator iter;
 904   while(iter.next_alive()) {
 905     nmethod* nm = iter.method();
 906     nm->oops_do(&voc);
 907     nm->verify_oop_relocations();
 908   }
 909 }
 910 
 911 int CodeCache::blob_count(int code_blob_type) {
 912   CodeHeap* heap = get_code_heap(code_blob_type);
 913   return (heap != NULL) ? heap->blob_count() : 0;
 914 }
 915 
 916 int CodeCache::blob_count() {
 917   int count = 0;
 918   FOR_ALL_HEAPS(heap) {
 919     count += (*heap)->blob_count();
 920   }
 921   return count;
 922 }
 923 
 924 int CodeCache::nmethod_count(int code_blob_type) {
 925   CodeHeap* heap = get_code_heap(code_blob_type);
 926   return (heap != NULL) ? heap->nmethod_count() : 0;
 927 }
 928 
 929 int CodeCache::nmethod_count() {
 930   int count = 0;
 931   FOR_ALL_NMETHOD_HEAPS(heap) {
 932     count += (*heap)->nmethod_count();
 933   }
 934   return count;
 935 }
 936 
 937 int CodeCache::adapter_count(int code_blob_type) {
 938   CodeHeap* heap = get_code_heap(code_blob_type);
 939   return (heap != NULL) ? heap->adapter_count() : 0;
 940 }
 941 
 942 int CodeCache::adapter_count() {
 943   int count = 0;
 944   FOR_ALL_HEAPS(heap) {
 945     count += (*heap)->adapter_count();
 946   }
 947   return count;
 948 }
 949 
 950 address CodeCache::low_bound(int code_blob_type) {
 951   CodeHeap* heap = get_code_heap(code_blob_type);
 952   return (heap != NULL) ? (address)heap->low_boundary() : NULL;
 953 }
 954 
 955 address CodeCache::high_bound(int code_blob_type) {
 956   CodeHeap* heap = get_code_heap(code_blob_type);
 957   return (heap != NULL) ? (address)heap->high_boundary() : NULL;
 958 }
 959 
 960 size_t CodeCache::capacity() {
 961   size_t cap = 0;
 962   FOR_ALL_NMETHOD_HEAPS(heap) {
 963     cap += (*heap)->capacity();
 964   }
 965   return cap;
 966 }
 967 
 968 size_t CodeCache::unallocated_capacity(int code_blob_type) {
 969   CodeHeap* heap = get_code_heap(code_blob_type);
 970   return (heap != NULL) ? heap->unallocated_capacity() : 0;
 971 }
 972 
 973 size_t CodeCache::unallocated_capacity() {
 974   size_t unallocated_cap = 0;
 975   FOR_ALL_NMETHOD_HEAPS(heap) {
 976     unallocated_cap += (*heap)->unallocated_capacity();
 977   }
 978   return unallocated_cap;
 979 }
 980 
 981 size_t CodeCache::max_capacity() {
 982   size_t max_cap = 0;
 983   FOR_ALL_NMETHOD_HEAPS(heap) {
 984     max_cap += (*heap)->max_capacity();
 985   }
 986   return max_cap;
 987 }
 988 
 989 /**
 990  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
 991  * is free, reverse_free_ratio() returns 4.
 992  */
 993 double CodeCache::reverse_free_ratio(int code_blob_type) {
 994   CodeHeap* heap = get_code_heap(code_blob_type);
 995   if (heap == NULL) {
 996     return 0;
 997   }
 998 
 999   double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
1000   double max_capacity = (double)heap->max_capacity();
1001   double result = max_capacity / unallocated_capacity;
1002   assert (max_capacity >= unallocated_capacity, "Must be");
1003   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1004   return result;
1005 }
1006 
1007 size_t CodeCache::bytes_allocated_in_freelists() {
1008   size_t allocated_bytes = 0;
1009   FOR_ALL_NMETHOD_HEAPS(heap) {
1010     allocated_bytes += (*heap)->allocated_in_freelist();
1011   }
1012   return allocated_bytes;
1013 }
1014 
1015 int CodeCache::allocated_segments() {
1016   int number_of_segments = 0;
1017   FOR_ALL_NMETHOD_HEAPS(heap) {
1018     number_of_segments += (*heap)->allocated_segments();
1019   }
1020   return number_of_segments;
1021 }
1022 
1023 size_t CodeCache::freelists_length() {
1024   size_t length = 0;
1025   FOR_ALL_NMETHOD_HEAPS(heap) {
1026     length += (*heap)->freelist_length();
1027   }
1028   return length;
1029 }
1030 
1031 void icache_init();
1032 
1033 void CodeCache::initialize() {
1034   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1035 #ifdef COMPILER2
1036   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
1037 #endif
1038   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
1039   // This was originally just a check of the alignment, causing failure, instead, round
1040   // the code cache to the page size.  In particular, Solaris is moving to a larger
1041   // default page size.
1042   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
1043 
1044   if (SegmentedCodeCache) {
1045     // Use multiple code heaps
1046     initialize_heaps();
1047   } else {
1048     // Use a single code heap
1049     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
1050     FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
1051     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
1052     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
1053     add_heap(rs, "CodeCache", CodeBlobType::All);
1054   }
1055 
1056   // Initialize ICache flush mechanism
1057   // This service is needed for os::register_code_area
1058   icache_init();
1059 
1060   // Give OS a chance to register generated code area.
1061   // This is used on Windows 64 bit platforms to register
1062   // Structured Exception Handlers for our generated code.
1063   os::register_code_area((char*)low_bound(), (char*)high_bound());
1064 }
1065 
1066 void codeCache_init() {
1067   CodeCache::initialize();
1068   // Load AOT libraries and add AOT code heaps.
1069   AOTLoader::initialize();
1070 }
1071 
1072 //------------------------------------------------------------------------------------------------
1073 
1074 int CodeCache::number_of_nmethods_with_dependencies() {
1075   return _number_of_nmethods_with_dependencies;
1076 }
1077 
1078 void CodeCache::clear_inline_caches() {
1079   assert_locked_or_safepoint(CodeCache_lock);
1080   CompiledMethodIterator iter;
1081   while(iter.next_alive()) {
1082     iter.method()->clear_inline_caches();
1083   }
1084 }
1085 
1086 void CodeCache::cleanup_inline_caches() {
1087   assert_locked_or_safepoint(CodeCache_lock);
1088   NMethodIterator iter;
1089   while(iter.next_alive()) {
1090     iter.method()->cleanup_inline_caches(/*clean_all=*/true);
1091   }
1092 }
1093 
1094 // Keeps track of time spent for checking dependencies
1095 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
1096 
1097 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
1098   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1099   int number_of_marked_CodeBlobs = 0;
1100 
1101   // search the hierarchy looking for nmethods which are affected by the loading of this class
1102 
1103   // then search the interfaces this class implements looking for nmethods
1104   // which might be dependent of the fact that an interface only had one
1105   // implementor.
1106   // nmethod::check_all_dependencies works only correctly, if no safepoint
1107   // can happen
1108   NoSafepointVerifier nsv;
1109   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1110     Klass* d = str.klass();
1111     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
1112   }
1113 
1114 #ifndef PRODUCT
1115   if (VerifyDependencies) {
1116     // Object pointers are used as unique identifiers for dependency arguments. This
1117     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1118     dependentCheckTime.start();
1119     nmethod::check_all_dependencies(changes);
1120     dependentCheckTime.stop();
1121   }
1122 #endif
1123 
1124   return number_of_marked_CodeBlobs;
1125 }
1126 
1127 CompiledMethod* CodeCache::find_compiled(void* start) {
1128   CodeBlob *cb = find_blob(start);
1129   assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1130   return (CompiledMethod*)cb;
1131 }
1132 
1133 bool CodeCache::is_far_target(address target) {
1134 #if INCLUDE_AOT
1135   return NativeCall::is_far_call(_low_bound,  target) ||
1136          NativeCall::is_far_call(_high_bound, target);
1137 #else
1138   return false;
1139 #endif
1140 }
1141 
1142 #ifdef HOTSWAP
1143 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
1144   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1145   int number_of_marked_CodeBlobs = 0;
1146 
1147   // Deoptimize all methods of the evolving class itself
1148   Array<Method*>* old_methods = dependee->methods();
1149   for (int i = 0; i < old_methods->length(); i++) {
1150     ResourceMark rm;
1151     Method* old_method = old_methods->at(i);
1152     CompiledMethod* nm = old_method->code();
1153     if (nm != NULL) {
1154       nm->mark_for_deoptimization();
1155       number_of_marked_CodeBlobs++;
1156     }
1157   }
1158 
1159   CompiledMethodIterator iter;
1160   while(iter.next_alive()) {
1161     CompiledMethod* nm = iter.method();
1162     if (nm->is_marked_for_deoptimization()) {
1163       // ...Already marked in the previous pass; don't count it again.
1164     } else if (nm->is_evol_dependent_on(dependee())) {
1165       ResourceMark rm;
1166       nm->mark_for_deoptimization();
1167       number_of_marked_CodeBlobs++;
1168     } else  {
1169       // flush caches in case they refer to a redefined Method*
1170       nm->clear_inline_caches();
1171     }
1172   }
1173 
1174   return number_of_marked_CodeBlobs;
1175 }
1176 #endif // HOTSWAP
1177 
1178 
1179 // Deoptimize all methods
1180 void CodeCache::mark_all_nmethods_for_deoptimization() {
1181   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1182   CompiledMethodIterator iter;
1183   while(iter.next_alive()) {
1184     CompiledMethod* nm = iter.method();
1185     if (!nm->method()->is_method_handle_intrinsic()) {
1186       nm->mark_for_deoptimization();
1187     }
1188   }
1189 }
1190 
1191 int CodeCache::mark_for_deoptimization(Method* dependee) {
1192   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1193   int number_of_marked_CodeBlobs = 0;
1194 
1195   CompiledMethodIterator iter;
1196   while(iter.next_alive()) {
1197     CompiledMethod* nm = iter.method();
1198     if (nm->is_dependent_on_method(dependee)) {
1199       ResourceMark rm;
1200       nm->mark_for_deoptimization();
1201       number_of_marked_CodeBlobs++;
1202     }
1203   }
1204 
1205   return number_of_marked_CodeBlobs;
1206 }
1207 
1208 void CodeCache::make_marked_nmethods_not_entrant() {
1209   assert_locked_or_safepoint(CodeCache_lock);
1210   CompiledMethodIterator iter;
1211   while(iter.next_alive()) {
1212     CompiledMethod* nm = iter.method();
1213     if (nm->is_marked_for_deoptimization()) {
1214       nm->make_not_entrant();
1215     }
1216   }
1217 }
1218 
1219 // Flushes compiled methods dependent on dependee.
1220 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
1221   assert_lock_strong(Compile_lock);
1222 
1223   if (number_of_nmethods_with_dependencies() == 0) return;
1224 
1225   // CodeCache can only be updated by a thread_in_VM and they will all be
1226   // stopped during the safepoint so CodeCache will be safe to update without
1227   // holding the CodeCache_lock.
1228 
1229   KlassDepChange changes(dependee);
1230 
1231   // Compute the dependent nmethods
1232   if (mark_for_deoptimization(changes) > 0) {
1233     // At least one nmethod has been marked for deoptimization
1234     VM_Deoptimize op;
1235     VMThread::execute(&op);
1236   }
1237 }
1238 
1239 #ifdef HOTSWAP
1240 // Flushes compiled methods dependent on dependee in the evolutionary sense
1241 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1242   // --- Compile_lock is not held. However we are at a safepoint.
1243   assert_locked_or_safepoint(Compile_lock);
1244   if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return;
1245 
1246   // CodeCache can only be updated by a thread_in_VM and they will all be
1247   // stopped during the safepoint so CodeCache will be safe to update without
1248   // holding the CodeCache_lock.
1249 
1250   // Compute the dependent nmethods
1251   if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1252     // At least one nmethod has been marked for deoptimization
1253 
1254     // All this already happens inside a VM_Operation, so we'll do all the work here.
1255     // Stuff copied from VM_Deoptimize and modified slightly.
1256 
1257     // We do not want any GCs to happen while we are in the middle of this VM operation
1258     ResourceMark rm;
1259     DeoptimizationMarker dm;
1260 
1261     // Deoptimize all activations depending on marked nmethods
1262     Deoptimization::deoptimize_dependents();
1263 
1264     // Make the dependent methods not entrant
1265     make_marked_nmethods_not_entrant();
1266   }
1267 }
1268 #endif // HOTSWAP
1269 
1270 
1271 // Flushes compiled methods dependent on dependee
1272 void CodeCache::flush_dependents_on_method(methodHandle m_h) {
1273   // --- Compile_lock is not held. However we are at a safepoint.
1274   assert_locked_or_safepoint(Compile_lock);
1275 
1276   // CodeCache can only be updated by a thread_in_VM and they will all be
1277   // stopped dring the safepoint so CodeCache will be safe to update without
1278   // holding the CodeCache_lock.
1279 
1280   // Compute the dependent nmethods
1281   if (mark_for_deoptimization(m_h()) > 0) {
1282     // At least one nmethod has been marked for deoptimization
1283 
1284     // All this already happens inside a VM_Operation, so we'll do all the work here.
1285     // Stuff copied from VM_Deoptimize and modified slightly.
1286 
1287     // We do not want any GCs to happen while we are in the middle of this VM operation
1288     ResourceMark rm;
1289     DeoptimizationMarker dm;
1290 
1291     // Deoptimize all activations depending on marked nmethods
1292     Deoptimization::deoptimize_dependents();
1293 
1294     // Make the dependent methods not entrant
1295     make_marked_nmethods_not_entrant();
1296   }
1297 }
1298 
1299 void CodeCache::verify() {
1300   assert_locked_or_safepoint(CodeCache_lock);
1301   FOR_ALL_HEAPS(heap) {
1302     (*heap)->verify();
1303     FOR_ALL_BLOBS(cb, *heap) {
1304       if (cb->is_alive()) {
1305         cb->verify();
1306       }
1307     }
1308   }
1309 }
1310 
1311 // A CodeHeap is full. Print out warning and report event.
1312 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1313   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1314   CodeHeap* heap = get_code_heap(code_blob_type);
1315   assert(heap != NULL, "heap is null");
1316 
1317   if ((heap->full_count() == 0) || print) {
1318     // Not yet reported for this heap, report
1319     if (SegmentedCodeCache) {
1320       warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
1321       warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
1322     } else {
1323       warning("CodeCache is full. Compiler has been disabled.");
1324       warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
1325     }
1326     ResourceMark rm;
1327     stringStream s;
1328     // Dump code cache  into a buffer before locking the tty,
1329     {
1330       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1331       print_summary(&s);
1332     }
1333     ttyLocker ttyl;
1334     tty->print("%s", s.as_string());
1335   }
1336 
1337   heap->report_full();
1338 
1339   EventCodeCacheFull event;
1340   if (event.should_commit()) {
1341     event.set_codeBlobType((u1)code_blob_type);
1342     event.set_startAddress((u8)heap->low_boundary());
1343     event.set_commitedTopAddress((u8)heap->high());
1344     event.set_reservedTopAddress((u8)heap->high_boundary());
1345     event.set_entryCount(heap->blob_count());
1346     event.set_methodCount(heap->nmethod_count());
1347     event.set_adaptorCount(heap->adapter_count());
1348     event.set_unallocatedCapacity(heap->unallocated_capacity());
1349     event.set_fullCount(heap->full_count());
1350     event.commit();
1351   }
1352 }
1353 
1354 void CodeCache::print_memory_overhead() {
1355   size_t wasted_bytes = 0;
1356   FOR_ALL_NMETHOD_HEAPS(heap) {
1357       CodeHeap* curr_heap = *heap;
1358       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1359         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1360         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1361       }
1362   }
1363   // Print bytes that are allocated in the freelist
1364   ttyLocker ttl;
1365   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1366   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1367   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1368   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1369 }
1370 
1371 //------------------------------------------------------------------------------------------------
1372 // Non-product version
1373 
1374 #ifndef PRODUCT
1375 
1376 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1377   if (PrintCodeCache2) {  // Need to add a new flag
1378     ResourceMark rm;
1379     if (size == 0)  size = cb->size();
1380     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1381   }
1382 }
1383 
1384 void CodeCache::print_internals() {
1385   int nmethodCount = 0;
1386   int runtimeStubCount = 0;
1387   int adapterCount = 0;
1388   int deoptimizationStubCount = 0;
1389   int uncommonTrapStubCount = 0;
1390   int bufferBlobCount = 0;
1391   int total = 0;
1392   int nmethodAlive = 0;
1393   int nmethodNotEntrant = 0;
1394   int nmethodZombie = 0;
1395   int nmethodUnloaded = 0;
1396   int nmethodJava = 0;
1397   int nmethodNative = 0;
1398   int max_nm_size = 0;
1399   ResourceMark rm;
1400 
1401   int i = 0;
1402   FOR_ALL_NMETHOD_HEAPS(heap) {
1403     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1404       tty->print_cr("-- %s --", (*heap)->name());
1405     }
1406     FOR_ALL_BLOBS(cb, *heap) {
1407       total++;
1408       if (cb->is_nmethod()) {
1409         nmethod* nm = (nmethod*)cb;
1410 
1411         if (Verbose && nm->method() != NULL) {
1412           ResourceMark rm;
1413           char *method_name = nm->method()->name_and_sig_as_C_string();
1414           tty->print("%s", method_name);
1415           if(nm->is_alive()) { tty->print_cr(" alive"); }
1416           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1417           if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1418         }
1419 
1420         nmethodCount++;
1421 
1422         if(nm->is_alive()) { nmethodAlive++; }
1423         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1424         if(nm->is_zombie()) { nmethodZombie++; }
1425         if(nm->is_unloaded()) { nmethodUnloaded++; }
1426         if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1427 
1428         if(nm->method() != NULL && nm->is_java_method()) {
1429           nmethodJava++;
1430           max_nm_size = MAX2(max_nm_size, nm->size());
1431         }
1432       } else if (cb->is_runtime_stub()) {
1433         runtimeStubCount++;
1434       } else if (cb->is_deoptimization_stub()) {
1435         deoptimizationStubCount++;
1436       } else if (cb->is_uncommon_trap_stub()) {
1437         uncommonTrapStubCount++;
1438       } else if (cb->is_adapter_blob()) {
1439         adapterCount++;
1440       } else if (cb->is_buffer_blob()) {
1441         bufferBlobCount++;
1442       }
1443     }
1444   }
1445 
1446   int bucketSize = 512;
1447   int bucketLimit = max_nm_size / bucketSize + 1;
1448   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1449   memset(buckets, 0, sizeof(int) * bucketLimit);
1450 
1451   NMethodIterator iter;
1452   while(iter.next()) {
1453     nmethod* nm = iter.method();
1454     if(nm->method() != NULL && nm->is_java_method()) {
1455       buckets[nm->size() / bucketSize]++;
1456     }
1457   }
1458 
1459   tty->print_cr("Code Cache Entries (total of %d)",total);
1460   tty->print_cr("-------------------------------------------------");
1461   tty->print_cr("nmethods: %d",nmethodCount);
1462   tty->print_cr("\talive: %d",nmethodAlive);
1463   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1464   tty->print_cr("\tzombie: %d",nmethodZombie);
1465   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1466   tty->print_cr("\tjava: %d",nmethodJava);
1467   tty->print_cr("\tnative: %d",nmethodNative);
1468   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1469   tty->print_cr("adapters: %d",adapterCount);
1470   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1471   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1472   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1473   tty->print_cr("\nnmethod size distribution (non-zombie java)");
1474   tty->print_cr("-------------------------------------------------");
1475 
1476   for(int i=0; i<bucketLimit; i++) {
1477     if(buckets[i] != 0) {
1478       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1479       tty->fill_to(40);
1480       tty->print_cr("%d",buckets[i]);
1481     }
1482   }
1483 
1484   FREE_C_HEAP_ARRAY(int, buckets);
1485   print_memory_overhead();
1486 }
1487 
1488 #endif // !PRODUCT
1489 
1490 void CodeCache::print() {
1491   print_summary(tty);
1492 
1493 #ifndef PRODUCT
1494   if (!Verbose) return;
1495 
1496   CodeBlob_sizes live;
1497   CodeBlob_sizes dead;
1498 
1499   FOR_ALL_NMETHOD_HEAPS(heap) {
1500     FOR_ALL_BLOBS(cb, *heap) {
1501       if (!cb->is_alive()) {
1502         dead.add(cb);
1503       } else {
1504         live.add(cb);
1505       }
1506     }
1507   }
1508 
1509   tty->print_cr("CodeCache:");
1510   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1511 
1512   if (!live.is_empty()) {
1513     live.print("live");
1514   }
1515   if (!dead.is_empty()) {
1516     dead.print("dead");
1517   }
1518 
1519   if (WizardMode) {
1520      // print the oop_map usage
1521     int code_size = 0;
1522     int number_of_blobs = 0;
1523     int number_of_oop_maps = 0;
1524     int map_size = 0;
1525     FOR_ALL_NMETHOD_HEAPS(heap) {
1526       FOR_ALL_BLOBS(cb, *heap) {
1527         if (cb->is_alive()) {
1528           number_of_blobs++;
1529           code_size += cb->code_size();
1530           ImmutableOopMapSet* set = cb->oop_maps();
1531           if (set != NULL) {
1532             number_of_oop_maps += set->count();
1533             map_size           += set->nr_of_bytes();
1534           }
1535         }
1536       }
1537     }
1538     tty->print_cr("OopMaps");
1539     tty->print_cr("  #blobs    = %d", number_of_blobs);
1540     tty->print_cr("  code size = %d", code_size);
1541     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1542     tty->print_cr("  map size  = %d", map_size);
1543   }
1544 
1545 #endif // !PRODUCT
1546 }
1547 
1548 void CodeCache::print_summary(outputStream* st, bool detailed) {
1549   FOR_ALL_HEAPS(heap_iterator) {
1550     CodeHeap* heap = (*heap_iterator);
1551     size_t total = (heap->high_boundary() - heap->low_boundary());
1552     if (_heaps->length() >= 1) {
1553       st->print("%s:", heap->name());
1554     } else {
1555       st->print("CodeCache:");
1556     }
1557     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1558                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1559                  total/K, (total - heap->unallocated_capacity())/K,
1560                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1561 
1562     if (detailed) {
1563       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1564                    p2i(heap->low_boundary()),
1565                    p2i(heap->high()),
1566                    p2i(heap->high_boundary()));
1567     }
1568   }
1569 
1570   if (detailed) {
1571     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1572                        " adapters=" UINT32_FORMAT,
1573                        blob_count(), nmethod_count(), adapter_count());
1574     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1575                  "enabled" : Arguments::mode() == Arguments::_int ?
1576                  "disabled (interpreter mode)" :
1577                  "disabled (not enough contiguous free space left)");
1578   }
1579 }
1580 
1581 void CodeCache::print_codelist(outputStream* st) {
1582   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1583 
1584   NMethodIterator iter;
1585   while(iter.next_alive()) {
1586     nmethod* nm = iter.method();
1587     ResourceMark rm;
1588     char *method_name = nm->method()->name_and_sig_as_C_string();
1589     st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1590                  nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
1591                  (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1592   }
1593 }
1594 
1595 void CodeCache::print_layout(outputStream* st) {
1596   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1597   ResourceMark rm;
1598   print_summary(st, true);
1599 }
1600 
1601 void CodeCache::log_state(outputStream* st) {
1602   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1603             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1604             blob_count(), nmethod_count(), adapter_count(),
1605             unallocated_capacity());
1606 }
1607