1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "code/codeBlob.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/codeHeapState.hpp"
  30 #include "code/compiledIC.hpp"
  31 #include "code/dependencies.hpp"
  32 #include "code/dependencyContext.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nmethod.hpp"
  35 #include "code/pcDesc.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "jfr/jfrEvents.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/iterator.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "memory/universe.hpp"
  44 #include "oops/method.inline.hpp"
  45 #include "oops/objArrayOop.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "oops/verifyOopClosure.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/compilationPolicy.hpp"
  50 #include "runtime/deoptimization.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 #include "runtime/icache.hpp"
  53 #include "runtime/java.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/safepointVerifiers.hpp"
  56 #include "runtime/sweeper.hpp"
  57 #include "runtime/vmThread.hpp"
  58 #include "services/memoryService.hpp"
  59 #include "utilities/align.hpp"
  60 #include "utilities/vmError.hpp"
  61 #include "utilities/xmlstream.hpp"
  62 #ifdef COMPILER1
  63 #include "c1/c1_Compilation.hpp"
  64 #include "c1/c1_Compiler.hpp"
  65 #endif
  66 #ifdef COMPILER2
  67 #include "opto/c2compiler.hpp"
  68 #include "opto/compile.hpp"
  69 #include "opto/node.hpp"
  70 #endif
  71 
  72 // Helper class for printing in CodeCache
  73 class CodeBlob_sizes {
  74  private:
  75   int count;
  76   int total_size;
  77   int header_size;
  78   int code_size;
  79   int stub_size;
  80   int relocation_size;
  81   int scopes_oop_size;
  82   int scopes_metadata_size;
  83   int scopes_data_size;
  84   int scopes_pcs_size;
  85 
  86  public:
  87   CodeBlob_sizes() {
  88     count            = 0;
  89     total_size       = 0;
  90     header_size      = 0;
  91     code_size        = 0;
  92     stub_size        = 0;
  93     relocation_size  = 0;
  94     scopes_oop_size  = 0;
  95     scopes_metadata_size  = 0;
  96     scopes_data_size = 0;
  97     scopes_pcs_size  = 0;
  98   }
  99 
 100   int total()                                    { return total_size; }
 101   bool is_empty()                                { return count == 0; }
 102 
 103   void print(const char* title) {
 104     tty->print_cr(" #%d %s = %dK (hdr %d%%,  loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
 105                   count,
 106                   title,
 107                   (int)(total() / K),
 108                   header_size             * 100 / total_size,
 109                   relocation_size         * 100 / total_size,
 110                   code_size               * 100 / total_size,
 111                   stub_size               * 100 / total_size,
 112                   scopes_oop_size         * 100 / total_size,
 113                   scopes_metadata_size    * 100 / total_size,
 114                   scopes_data_size        * 100 / total_size,
 115                   scopes_pcs_size         * 100 / total_size);
 116   }
 117 
 118   void add(CodeBlob* cb) {
 119     count++;
 120     total_size       += cb->size();
 121     header_size      += cb->header_size();
 122     relocation_size  += cb->relocation_size();
 123     if (cb->is_nmethod()) {
 124       nmethod* nm = cb->as_nmethod_or_null();
 125       code_size        += nm->insts_size();
 126       stub_size        += nm->stub_size();
 127 
 128       scopes_oop_size  += nm->oops_size();
 129       scopes_metadata_size  += nm->metadata_size();
 130       scopes_data_size += nm->scopes_data_size();
 131       scopes_pcs_size  += nm->scopes_pcs_size();
 132     } else {
 133       code_size        += cb->code_size();
 134     }
 135   }
 136 };
 137 
 138 // Iterate over all CodeHeaps
 139 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
 140 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
 141 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
 142 
 143 // Iterate over all CodeBlobs (cb) on the given CodeHeap
 144 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
 145 
 146 address CodeCache::_low_bound = 0;
 147 address CodeCache::_high_bound = 0;
 148 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 149 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
 150 
 151 // Initialize arrays of CodeHeap subsets
 152 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 153 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 154 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 155 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 156 
 157 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
 158   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
 159   // Prepare error message
 160   const char* error = "Invalid code heap sizes";
 161   err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
 162                   " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
 163           non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
 164 
 165   if (total_size > cache_size) {
 166     // Some code heap sizes were explicitly set: total_size must be <= cache_size
 167     message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 168     vm_exit_during_initialization(error, message);
 169   } else if (all_set && total_size != cache_size) {
 170     // All code heap sizes were explicitly set: total_size must equal cache_size
 171     message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
 172     vm_exit_during_initialization(error, message);
 173   }
 174 }
 175 
 176 void CodeCache::initialize_heaps() {
 177   bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
 178   bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
 179   bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
 180   size_t min_size           = os::vm_page_size();
 181   size_t cache_size         = ReservedCodeCacheSize;
 182   size_t non_nmethod_size   = NonNMethodCodeHeapSize;
 183   size_t profiled_size      = ProfiledCodeHeapSize;
 184   size_t non_profiled_size  = NonProfiledCodeHeapSize;
 185   // Check if total size set via command line flags exceeds the reserved size
 186   check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
 187                    (profiled_set     ? profiled_size     : min_size),
 188                    (non_profiled_set ? non_profiled_size : min_size),
 189                    cache_size,
 190                    non_nmethod_set && profiled_set && non_profiled_set);
 191 
 192   // Determine size of compiler buffers
 193   size_t code_buffers_size = 0;
 194 #ifdef COMPILER1
 195   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 196   const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
 197   code_buffers_size += c1_count * Compiler::code_buffer_size();
 198 #endif
 199 #ifdef COMPILER2
 200   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 201   const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
 202   // Initial size of constant table (this may be increased if a compiled method needs more space)
 203   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 204 #endif
 205 
 206   // Increase default non_nmethod_size to account for compiler buffers
 207   if (!non_nmethod_set) {
 208     non_nmethod_size += code_buffers_size;
 209   }
 210   // Calculate default CodeHeap sizes if not set by user
 211   if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
 212     // Check if we have enough space for the non-nmethod code heap
 213     if (cache_size > non_nmethod_size) {
 214       // Use the default value for non_nmethod_size and one half of the
 215       // remaining size for non-profiled and one half for profiled methods
 216       size_t remaining_size = cache_size - non_nmethod_size;
 217       profiled_size = remaining_size / 2;
 218       non_profiled_size = remaining_size - profiled_size;
 219     } else {
 220       // Use all space for the non-nmethod heap and set other heaps to minimal size
 221       non_nmethod_size = cache_size - 2 * min_size;
 222       profiled_size = min_size;
 223       non_profiled_size = min_size;
 224     }
 225   } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
 226     // The user explicitly set some code heap sizes. Increase or decrease the (default)
 227     // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
 228     // code heap sizes and then only change non-nmethod code heap size if still necessary.
 229     intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
 230     if (non_profiled_set) {
 231       if (!profiled_set) {
 232         // Adapt size of profiled code heap
 233         if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
 234           // Not enough space available, set to minimum size
 235           diff_size += profiled_size - min_size;
 236           profiled_size = min_size;
 237         } else {
 238           profiled_size += diff_size;
 239           diff_size = 0;
 240         }
 241       }
 242     } else if (profiled_set) {
 243       // Adapt size of non-profiled code heap
 244       if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
 245         // Not enough space available, set to minimum size
 246         diff_size += non_profiled_size - min_size;
 247         non_profiled_size = min_size;
 248       } else {
 249         non_profiled_size += diff_size;
 250         diff_size = 0;
 251       }
 252     } else if (non_nmethod_set) {
 253       // Distribute remaining size between profiled and non-profiled code heaps
 254       diff_size = cache_size - non_nmethod_size;
 255       profiled_size = diff_size / 2;
 256       non_profiled_size = diff_size - profiled_size;
 257       diff_size = 0;
 258     }
 259     if (diff_size != 0) {
 260       // Use non-nmethod code heap for remaining space requirements
 261       assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
 262       non_nmethod_size += diff_size;
 263     }
 264   }
 265 
 266   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 267   if (!heap_available(CodeBlobType::MethodProfiled)) {
 268     non_profiled_size += profiled_size;
 269     profiled_size = 0;
 270   }
 271   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 272   if (!heap_available(CodeBlobType::MethodNonProfiled)) {
 273     non_nmethod_size += non_profiled_size;
 274     non_profiled_size = 0;
 275   }
 276   // Make sure we have enough space for VM internal code
 277   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 278   if (non_nmethod_size < min_code_cache_size) {
 279     vm_exit_during_initialization(err_msg(
 280         "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
 281         non_nmethod_size/K, min_code_cache_size/K));
 282   }
 283 
 284   // Verify sizes and update flag values
 285   assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
 286   FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
 287   FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
 288   FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
 289 
 290   // If large page support is enabled, align code heaps according to large
 291   // page size to make sure that code cache is covered by large pages.
 292   const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
 293   non_nmethod_size = align_up(non_nmethod_size, alignment);
 294   profiled_size    = align_down(profiled_size, alignment);
 295 
 296   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 297   // parts for the individual heaps. The memory layout looks like this:
 298   // ---------- high -----------
 299   //    Non-profiled nmethods
 300   //      Profiled nmethods
 301   //         Non-nmethods
 302   // ---------- low ------------
 303   ReservedCodeSpace rs = reserve_heap_memory(cache_size);
 304   ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
 305   ReservedSpace rest                = rs.last_part(non_nmethod_size);
 306   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 307   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 308 
 309   // Non-nmethods (stubs, adapters, ...)
 310   add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
 311   // Tier 2 and tier 3 (profiled) methods
 312   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
 313   // Tier 1 and tier 4 (non-profiled) methods and native methods
 314   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
 315 }
 316 
 317 size_t CodeCache::page_size(bool aligned, size_t min_pages) {
 318   if (os::can_execute_large_page_memory()) {
 319     if (InitialCodeCacheSize < ReservedCodeCacheSize) {
 320       // Make sure that the page size allows for an incremental commit of the reserved space
 321       min_pages = MAX2(min_pages, (size_t)8);
 322     }
 323     return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
 324                      os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
 325   } else {
 326     return os::vm_page_size();
 327   }
 328 }
 329 
 330 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 331   // Align and reserve space for code cache
 332   const size_t rs_ps = page_size();
 333   const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
 334   const size_t rs_size = align_up(size, rs_align);
 335   ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size());
 336   if (!rs.is_reserved()) {
 337     vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
 338                                           rs_size/K));
 339   }
 340 
 341   // Initialize bounds
 342   _low_bound = (address)rs.base();
 343   _high_bound = _low_bound + rs.size();
 344   return rs;
 345 }
 346 
 347 // Heaps available for allocation
 348 bool CodeCache::heap_available(int code_blob_type) {
 349   if (!SegmentedCodeCache) {
 350     // No segmentation: use a single code heap
 351     return (code_blob_type == CodeBlobType::All);
 352   } else if (Arguments::is_interpreter_only()) {
 353     // Interpreter only: we don't need any method code heaps
 354     return (code_blob_type == CodeBlobType::NonNMethod);
 355   } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
 356     // Tiered compilation: use all code heaps
 357     return (code_blob_type < CodeBlobType::All);
 358   } else {
 359     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 360     return (code_blob_type == CodeBlobType::NonNMethod) ||
 361            (code_blob_type == CodeBlobType::MethodNonProfiled);
 362   }
 363 }
 364 
 365 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
 366   switch(code_blob_type) {
 367   case CodeBlobType::NonNMethod:
 368     return "NonNMethodCodeHeapSize";
 369     break;
 370   case CodeBlobType::MethodNonProfiled:
 371     return "NonProfiledCodeHeapSize";
 372     break;
 373   case CodeBlobType::MethodProfiled:
 374     return "ProfiledCodeHeapSize";
 375     break;
 376   }
 377   ShouldNotReachHere();
 378   return NULL;
 379 }
 380 
 381 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
 382   if (lhs->code_blob_type() == rhs->code_blob_type()) {
 383     return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
 384   } else {
 385     return lhs->code_blob_type() - rhs->code_blob_type();
 386   }
 387 }
 388 
 389 void CodeCache::add_heap(CodeHeap* heap) {
 390   assert(!Universe::is_fully_initialized(), "late heap addition?");
 391 
 392   _heaps->insert_sorted<code_heap_compare>(heap);
 393 
 394   int type = heap->code_blob_type();
 395   if (code_blob_type_accepts_compiled(type)) {
 396     _compiled_heaps->insert_sorted<code_heap_compare>(heap);
 397   }
 398   if (code_blob_type_accepts_nmethod(type)) {
 399     _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
 400   }
 401   if (code_blob_type_accepts_allocable(type)) {
 402     _allocable_heaps->insert_sorted<code_heap_compare>(heap);
 403   }
 404 }
 405 
 406 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
 407   // Check if heap is needed
 408   if (!heap_available(code_blob_type)) {
 409     return;
 410   }
 411 
 412   // Create CodeHeap
 413   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 414   add_heap(heap);
 415 
 416   // Reserve Space
 417   size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
 418   size_initial = align_up(size_initial, os::vm_page_size());
 419   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 420     vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
 421                                           heap->name(), size_initial/K));
 422   }
 423 
 424   // Register the CodeHeap
 425   MemoryService::add_code_heap_memory_pool(heap, name);
 426 }
 427 
 428 CodeHeap* CodeCache::get_code_heap_containing(void* start) {
 429   FOR_ALL_HEAPS(heap) {
 430     if ((*heap)->contains(start)) {
 431       return *heap;
 432     }
 433   }
 434   return NULL;
 435 }
 436 
 437 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
 438   assert(cb != NULL, "CodeBlob is null");
 439   FOR_ALL_HEAPS(heap) {
 440     if ((*heap)->contains_blob(cb)) {
 441       return *heap;
 442     }
 443   }
 444   ShouldNotReachHere();
 445   return NULL;
 446 }
 447 
 448 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
 449   FOR_ALL_HEAPS(heap) {
 450     if ((*heap)->accepts(code_blob_type)) {
 451       return *heap;
 452     }
 453   }
 454   return NULL;
 455 }
 456 
 457 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
 458   assert_locked_or_safepoint(CodeCache_lock);
 459   assert(heap != NULL, "heap is null");
 460   return (CodeBlob*)heap->first();
 461 }
 462 
 463 CodeBlob* CodeCache::first_blob(int code_blob_type) {
 464   if (heap_available(code_blob_type)) {
 465     return first_blob(get_code_heap(code_blob_type));
 466   } else {
 467     return NULL;
 468   }
 469 }
 470 
 471 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 472   assert_locked_or_safepoint(CodeCache_lock);
 473   assert(heap != NULL, "heap is null");
 474   return (CodeBlob*)heap->next(cb);
 475 }
 476 
 477 /**
 478  * Do not seize the CodeCache lock here--if the caller has not
 479  * already done so, we are going to lose bigtime, since the code
 480  * cache will contain a garbage CodeBlob until the caller can
 481  * run the constructor for the CodeBlob subclass he is busy
 482  * instantiating.
 483  */
 484 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
 485   // Possibly wakes up the sweeper thread.
 486   NMethodSweeper::notify(code_blob_type);
 487   assert_locked_or_safepoint(CodeCache_lock);
 488   assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
 489   if (size <= 0) {
 490     return NULL;
 491   }
 492   CodeBlob* cb = NULL;
 493 
 494   // Get CodeHeap for the given CodeBlobType
 495   CodeHeap* heap = get_code_heap(code_blob_type);
 496   assert(heap != NULL, "heap is null");
 497 
 498   while (true) {
 499     cb = (CodeBlob*)heap->allocate(size);
 500     if (cb != NULL) break;
 501     if (!heap->expand_by(CodeCacheExpansionSize)) {
 502       // Save original type for error reporting
 503       if (orig_code_blob_type == CodeBlobType::All) {
 504         orig_code_blob_type = code_blob_type;
 505       }
 506       // Expansion failed
 507       if (SegmentedCodeCache) {
 508         // Fallback solution: Try to store code in another code heap.
 509         // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
 510         // Note that in the sweeper, we check the reverse_free_ratio of the code heap
 511         // and force stack scanning if less than 10% of the code heap are free.
 512         int type = code_blob_type;
 513         switch (type) {
 514         case CodeBlobType::NonNMethod:
 515           type = CodeBlobType::MethodNonProfiled;
 516           break;
 517         case CodeBlobType::MethodNonProfiled:
 518           type = CodeBlobType::MethodProfiled;
 519           break;
 520         case CodeBlobType::MethodProfiled:
 521           // Avoid loop if we already tried that code heap
 522           if (type == orig_code_blob_type) {
 523             type = CodeBlobType::MethodNonProfiled;
 524           }
 525           break;
 526         }
 527         if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
 528           if (PrintCodeCacheExtension) {
 529             tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
 530                           heap->name(), get_code_heap(type)->name());
 531           }
 532           return allocate(size, type, orig_code_blob_type);
 533         }
 534       }
 535       MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 536       CompileBroker::handle_full_code_cache(orig_code_blob_type);
 537       return NULL;
 538     }
 539     if (PrintCodeCacheExtension) {
 540       ResourceMark rm;
 541       if (_nmethod_heaps->length() >= 1) {
 542         tty->print("%s", heap->name());
 543       } else {
 544         tty->print("CodeCache");
 545       }
 546       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 547                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 548                     (address)heap->high() - (address)heap->low_boundary());
 549     }
 550   }
 551   print_trace("allocation", cb, size);
 552   return cb;
 553 }
 554 
 555 void CodeCache::free(CodeBlob* cb) {
 556   assert_locked_or_safepoint(CodeCache_lock);
 557   CodeHeap* heap = get_code_heap(cb);
 558   print_trace("free", cb);
 559   if (cb->is_nmethod()) {
 560     heap->set_nmethod_count(heap->nmethod_count() - 1);
 561     if (((nmethod *)cb)->has_dependencies()) {
 562       _number_of_nmethods_with_dependencies--;
 563     }
 564   }
 565   if (cb->is_adapter_blob()) {
 566     heap->set_adapter_count(heap->adapter_count() - 1);
 567   }
 568 
 569   // Get heap for given CodeBlob and deallocate
 570   get_code_heap(cb)->deallocate(cb);
 571 
 572   assert(heap->blob_count() >= 0, "sanity check");
 573 }
 574 
 575 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
 576   assert_locked_or_safepoint(CodeCache_lock);
 577   guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
 578   print_trace("free_unused_tail", cb);
 579 
 580   // We also have to account for the extra space (i.e. header) used by the CodeBlob
 581   // which provides the memory (see BufferBlob::create() in codeBlob.cpp).
 582   used += CodeBlob::align_code_offset(cb->header_size());
 583 
 584   // Get heap for given CodeBlob and deallocate its unused tail
 585   get_code_heap(cb)->deallocate_tail(cb, used);
 586   // Adjust the sizes of the CodeBlob
 587   cb->adjust_size(used);
 588 }
 589 
 590 void CodeCache::commit(CodeBlob* cb) {
 591   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 592   assert_locked_or_safepoint(CodeCache_lock);
 593   CodeHeap* heap = get_code_heap(cb);
 594   if (cb->is_nmethod()) {
 595     heap->set_nmethod_count(heap->nmethod_count() + 1);
 596     if (((nmethod *)cb)->has_dependencies()) {
 597       _number_of_nmethods_with_dependencies++;
 598     }
 599   }
 600   if (cb->is_adapter_blob()) {
 601     heap->set_adapter_count(heap->adapter_count() + 1);
 602   }
 603 
 604   // flush the hardware I-cache
 605   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 606 }
 607 
 608 bool CodeCache::contains(void *p) {
 609   // S390 uses contains() in current_frame(), which is used before
 610   // code cache initialization if NativeMemoryTracking=detail is set.
 611   S390_ONLY(if (_heaps == NULL) return false;)
 612   // It should be ok to call contains without holding a lock.
 613   FOR_ALL_HEAPS(heap) {
 614     if ((*heap)->contains(p)) {
 615       return true;
 616     }
 617   }
 618   return false;
 619 }
 620 
 621 bool CodeCache::contains(nmethod *nm) {
 622   return contains((void *)nm);
 623 }
 624 
 625 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
 626 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
 627 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 628 CodeBlob* CodeCache::find_blob(void* start) {
 629   CodeBlob* result = find_blob_unsafe(start);
 630   // We could potentially look up non_entrant methods
 631   guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method");
 632   return result;
 633 }
 634 
 635 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
 636 // what you are doing)
 637 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
 638   // NMT can walk the stack before code cache is created
 639   if (_heaps != NULL) {
 640     CodeHeap* heap = get_code_heap_containing(start);
 641     if (heap != NULL) {
 642       return heap->find_blob_unsafe(start);
 643     }
 644   }
 645   return NULL;
 646 }
 647 
 648 nmethod* CodeCache::find_nmethod(void* start) {
 649   CodeBlob* cb = find_blob(start);
 650   assert(cb->is_nmethod(), "did not find an nmethod");
 651   return (nmethod*)cb;
 652 }
 653 
 654 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 655   assert_locked_or_safepoint(CodeCache_lock);
 656   FOR_ALL_HEAPS(heap) {
 657     FOR_ALL_BLOBS(cb, *heap) {
 658       f(cb);
 659     }
 660   }
 661 }
 662 
 663 void CodeCache::nmethods_do(void f(nmethod* nm)) {
 664   assert_locked_or_safepoint(CodeCache_lock);
 665   NMethodIterator iter(NMethodIterator::all_blobs);
 666   while(iter.next()) {
 667     f(iter.method());
 668   }
 669 }
 670 
 671 void CodeCache::metadata_do(MetadataClosure* f) {
 672   assert_locked_or_safepoint(CodeCache_lock);
 673   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 674   while(iter.next()) {
 675     iter.method()->metadata_do(f);
 676   }
 677   AOTLoader::metadata_do(f);
 678 }
 679 
 680 int CodeCache::alignment_unit() {
 681   return (int)_heaps->first()->alignment_unit();
 682 }
 683 
 684 int CodeCache::alignment_offset() {
 685   return (int)_heaps->first()->alignment_offset();
 686 }
 687 
 688 // Mark nmethods for unloading if they contain otherwise unreachable oops.
 689 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
 690   assert_locked_or_safepoint(CodeCache_lock);
 691   UnloadingScope scope(is_alive);
 692   CompiledMethodIterator iter(CompiledMethodIterator::only_alive);
 693   while(iter.next()) {
 694     iter.method()->do_unloading(unloading_occurred);
 695   }
 696 }
 697 
 698 void CodeCache::blobs_do(CodeBlobClosure* f) {
 699   assert_locked_or_safepoint(CodeCache_lock);
 700   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 701     FOR_ALL_BLOBS(cb, *heap) {
 702       if (cb->is_alive()) {
 703         f->do_code_blob(cb);
 704 #ifdef ASSERT
 705         if (cb->is_nmethod()) {
 706           Universe::heap()->verify_nmethod((nmethod*)cb);
 707         }
 708 #endif //ASSERT
 709       }
 710     }
 711   }
 712 }
 713 
 714 void CodeCache::verify_clean_inline_caches() {
 715 #ifdef ASSERT
 716   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 717   while(iter.next()) {
 718     nmethod* nm = iter.method();
 719     assert(!nm->is_unloaded(), "Tautology");
 720     nm->verify_clean_inline_caches();
 721     nm->verify();
 722   }
 723 #endif
 724 }
 725 
 726 void CodeCache::verify_icholder_relocations() {
 727 #ifdef ASSERT
 728   // make sure that we aren't leaking icholders
 729   int count = 0;
 730   FOR_ALL_HEAPS(heap) {
 731     FOR_ALL_BLOBS(cb, *heap) {
 732       CompiledMethod *nm = cb->as_compiled_method_or_null();
 733       if (nm != NULL) {
 734         count += nm->verify_icholder_relocations();
 735       }
 736     }
 737   }
 738   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 739          CompiledICHolder::live_count(), "must agree");
 740 #endif
 741 }
 742 
 743 // Defer freeing of concurrently cleaned ExceptionCache entries until
 744 // after a global handshake operation.
 745 void CodeCache::release_exception_cache(ExceptionCache* entry) {
 746   if (SafepointSynchronize::is_at_safepoint()) {
 747     delete entry;
 748   } else {
 749     for (;;) {
 750       ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
 751       entry->set_purge_list_next(purge_list_head);
 752       if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) {
 753         break;
 754       }
 755     }
 756   }
 757 }
 758 
 759 // Delete exception caches that have been concurrently unlinked,
 760 // followed by a global handshake operation.
 761 void CodeCache::purge_exception_caches() {
 762   ExceptionCache* curr = _exception_cache_purge_list;
 763   while (curr != NULL) {
 764     ExceptionCache* next = curr->purge_list_next();
 765     delete curr;
 766     curr = next;
 767   }
 768   _exception_cache_purge_list = NULL;
 769 }
 770 
 771 uint8_t CodeCache::_unloading_cycle = 1;
 772 
 773 void CodeCache::increment_unloading_cycle() {
 774   // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
 775   // 0 is reserved for new methods.
 776   _unloading_cycle = (_unloading_cycle + 1) % 4;
 777   if (_unloading_cycle == 0) {
 778     _unloading_cycle = 1;
 779   }
 780 }
 781 
 782 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
 783   : _is_unloading_behaviour(is_alive)
 784 {
 785   _saved_behaviour = IsUnloadingBehaviour::current();
 786   IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
 787   increment_unloading_cycle();
 788   DependencyContext::cleaning_start();
 789 }
 790 
 791 CodeCache::UnloadingScope::~UnloadingScope() {
 792   IsUnloadingBehaviour::set_current(_saved_behaviour);
 793   DependencyContext::cleaning_end();
 794 }
 795 
 796 void CodeCache::verify_oops() {
 797   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 798   VerifyOopClosure voc;
 799   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 800   while(iter.next()) {
 801     nmethod* nm = iter.method();
 802     nm->oops_do(&voc);
 803     nm->verify_oop_relocations();
 804   }
 805 }
 806 
 807 int CodeCache::blob_count(int code_blob_type) {
 808   CodeHeap* heap = get_code_heap(code_blob_type);
 809   return (heap != NULL) ? heap->blob_count() : 0;
 810 }
 811 
 812 int CodeCache::blob_count() {
 813   int count = 0;
 814   FOR_ALL_HEAPS(heap) {
 815     count += (*heap)->blob_count();
 816   }
 817   return count;
 818 }
 819 
 820 int CodeCache::nmethod_count(int code_blob_type) {
 821   CodeHeap* heap = get_code_heap(code_blob_type);
 822   return (heap != NULL) ? heap->nmethod_count() : 0;
 823 }
 824 
 825 int CodeCache::nmethod_count() {
 826   int count = 0;
 827   FOR_ALL_NMETHOD_HEAPS(heap) {
 828     count += (*heap)->nmethod_count();
 829   }
 830   return count;
 831 }
 832 
 833 int CodeCache::adapter_count(int code_blob_type) {
 834   CodeHeap* heap = get_code_heap(code_blob_type);
 835   return (heap != NULL) ? heap->adapter_count() : 0;
 836 }
 837 
 838 int CodeCache::adapter_count() {
 839   int count = 0;
 840   FOR_ALL_HEAPS(heap) {
 841     count += (*heap)->adapter_count();
 842   }
 843   return count;
 844 }
 845 
 846 address CodeCache::low_bound(int code_blob_type) {
 847   CodeHeap* heap = get_code_heap(code_blob_type);
 848   return (heap != NULL) ? (address)heap->low_boundary() : NULL;
 849 }
 850 
 851 address CodeCache::high_bound(int code_blob_type) {
 852   CodeHeap* heap = get_code_heap(code_blob_type);
 853   return (heap != NULL) ? (address)heap->high_boundary() : NULL;
 854 }
 855 
 856 size_t CodeCache::capacity() {
 857   size_t cap = 0;
 858   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 859     cap += (*heap)->capacity();
 860   }
 861   return cap;
 862 }
 863 
 864 size_t CodeCache::unallocated_capacity(int code_blob_type) {
 865   CodeHeap* heap = get_code_heap(code_blob_type);
 866   return (heap != NULL) ? heap->unallocated_capacity() : 0;
 867 }
 868 
 869 size_t CodeCache::unallocated_capacity() {
 870   size_t unallocated_cap = 0;
 871   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 872     unallocated_cap += (*heap)->unallocated_capacity();
 873   }
 874   return unallocated_cap;
 875 }
 876 
 877 size_t CodeCache::max_capacity() {
 878   size_t max_cap = 0;
 879   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 880     max_cap += (*heap)->max_capacity();
 881   }
 882   return max_cap;
 883 }
 884 
 885 /**
 886  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
 887  * is free, reverse_free_ratio() returns 4.
 888  */
 889 double CodeCache::reverse_free_ratio(int code_blob_type) {
 890   CodeHeap* heap = get_code_heap(code_blob_type);
 891   if (heap == NULL) {
 892     return 0;
 893   }
 894 
 895   double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
 896   double max_capacity = (double)heap->max_capacity();
 897   double result = max_capacity / unallocated_capacity;
 898   assert (max_capacity >= unallocated_capacity, "Must be");
 899   assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
 900   return result;
 901 }
 902 
 903 size_t CodeCache::bytes_allocated_in_freelists() {
 904   size_t allocated_bytes = 0;
 905   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 906     allocated_bytes += (*heap)->allocated_in_freelist();
 907   }
 908   return allocated_bytes;
 909 }
 910 
 911 int CodeCache::allocated_segments() {
 912   int number_of_segments = 0;
 913   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 914     number_of_segments += (*heap)->allocated_segments();
 915   }
 916   return number_of_segments;
 917 }
 918 
 919 size_t CodeCache::freelists_length() {
 920   size_t length = 0;
 921   FOR_ALL_ALLOCABLE_HEAPS(heap) {
 922     length += (*heap)->freelist_length();
 923   }
 924   return length;
 925 }
 926 
 927 void icache_init();
 928 
 929 void CodeCache::initialize() {
 930   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 931 #ifdef COMPILER2
 932   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 933 #endif
 934   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 935   // This was originally just a check of the alignment, causing failure, instead, round
 936   // the code cache to the page size.  In particular, Solaris is moving to a larger
 937   // default page size.
 938   CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size());
 939 
 940   if (SegmentedCodeCache) {
 941     // Use multiple code heaps
 942     initialize_heaps();
 943   } else {
 944     // Use a single code heap
 945     FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0);
 946     FLAG_SET_ERGO(ProfiledCodeHeapSize, 0);
 947     FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0);
 948     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
 949     add_heap(rs, "CodeCache", CodeBlobType::All);
 950   }
 951 
 952   // Initialize ICache flush mechanism
 953   // This service is needed for os::register_code_area
 954   icache_init();
 955 
 956   // Give OS a chance to register generated code area.
 957   // This is used on Windows 64 bit platforms to register
 958   // Structured Exception Handlers for our generated code.
 959   os::register_code_area((char*)low_bound(), (char*)high_bound());
 960 }
 961 
 962 void codeCache_init() {
 963   CodeCache::initialize();
 964   // Load AOT libraries and add AOT code heaps.
 965   AOTLoader::initialize();
 966 }
 967 
 968 //------------------------------------------------------------------------------------------------
 969 
 970 int CodeCache::number_of_nmethods_with_dependencies() {
 971   return _number_of_nmethods_with_dependencies;
 972 }
 973 
 974 void CodeCache::clear_inline_caches() {
 975   assert_locked_or_safepoint(CodeCache_lock);
 976   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
 977   while(iter.next()) {
 978     iter.method()->clear_inline_caches();
 979   }
 980 }
 981 
 982 void CodeCache::cleanup_inline_caches() {
 983   assert_locked_or_safepoint(CodeCache_lock);
 984   NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
 985   while(iter.next()) {
 986     iter.method()->cleanup_inline_caches(/*clean_all=*/true);
 987   }
 988 }
 989 
 990 // Keeps track of time spent for checking dependencies
 991 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
 992 
 993 int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
 994   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 995   int number_of_marked_CodeBlobs = 0;
 996 
 997   // search the hierarchy looking for nmethods which are affected by the loading of this class
 998 
 999   // then search the interfaces this class implements looking for nmethods
1000   // which might be dependent of the fact that an interface only had one
1001   // implementor.
1002   // nmethod::check_all_dependencies works only correctly, if no safepoint
1003   // can happen
1004   NoSafepointVerifier nsv;
1005   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
1006     Klass* d = str.klass();
1007     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
1008   }
1009 
1010 #ifndef PRODUCT
1011   if (VerifyDependencies) {
1012     // Object pointers are used as unique identifiers for dependency arguments. This
1013     // is only possible if no safepoint, i.e., GC occurs during the verification code.
1014     dependentCheckTime.start();
1015     nmethod::check_all_dependencies(changes);
1016     dependentCheckTime.stop();
1017   }
1018 #endif
1019 
1020   return number_of_marked_CodeBlobs;
1021 }
1022 
1023 CompiledMethod* CodeCache::find_compiled(void* start) {
1024   CodeBlob *cb = find_blob(start);
1025   assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1026   return (CompiledMethod*)cb;
1027 }
1028 
1029 bool CodeCache::is_far_target(address target) {
1030 #if INCLUDE_AOT
1031   return NativeCall::is_far_call(_low_bound,  target) ||
1032          NativeCall::is_far_call(_high_bound, target);
1033 #else
1034   return false;
1035 #endif
1036 }
1037 
1038 #ifdef INCLUDE_JVMTI
1039 // RedefineClasses support for unloading nmethods that are dependent on "old" methods.
1040 // We don't really expect this table to grow very large.  If it does, it can become a hashtable.
1041 static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL;
1042 
1043 static void add_to_old_table(CompiledMethod* c) {
1044   if (old_compiled_method_table == NULL) {
1045     old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, true);
1046   }
1047   old_compiled_method_table->push(c);
1048 }
1049 
1050 static void reset_old_method_table() {
1051   if (old_compiled_method_table != NULL) {
1052     delete old_compiled_method_table;
1053     old_compiled_method_table = NULL;
1054   }
1055 }
1056 
1057 // Remove this method when zombied or unloaded.
1058 void CodeCache::unregister_old_nmethod(CompiledMethod* c) {
1059   assert_lock_strong(CodeCache_lock);
1060   if (old_compiled_method_table != NULL) {
1061     int index = old_compiled_method_table->find(c);
1062     if (index != -1) {
1063       old_compiled_method_table->delete_at(index);
1064     }
1065   }
1066 }
1067 
1068 void CodeCache::old_nmethods_do(MetadataClosure* f) {
1069   // Walk old method table and mark those on stack.
1070   int length = 0;
1071   if (old_compiled_method_table != NULL) {
1072     length = old_compiled_method_table->length();
1073     for (int i = 0; i < length; i++) {
1074       CompiledMethod* cm = old_compiled_method_table->at(i);
1075       // Only walk alive nmethods, the dead ones will get removed by the sweeper.
1076       if (cm->is_alive()) {
1077         old_compiled_method_table->at(i)->metadata_do(f);
1078       }
1079     }
1080   }
1081   log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length);
1082 }
1083 
1084 // Just marks the methods in this class as needing deoptimization
1085 void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
1086   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1087 
1088   // Mark dependent AOT nmethods, which are only found via the class redefined.
1089   // TODO: add dependencies to aotCompiledMethod's metadata section so this isn't
1090   // needed.
1091   AOTLoader::mark_evol_dependent_methods(dependee);
1092 }
1093 
1094 
1095 // Walk compiled methods and mark dependent methods for deoptimization.
1096 int CodeCache::mark_dependents_for_evol_deoptimization() {
1097   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1098   // Each redefinition creates a new set of nmethods that have references to "old" Methods
1099   // So delete old method table and create a new one.
1100   reset_old_method_table();
1101 
1102   int number_of_marked_CodeBlobs = 0;
1103   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1104   while(iter.next()) {
1105     CompiledMethod* nm = iter.method();
1106     // Walk all alive nmethods to check for old Methods.
1107     // This includes methods whose inline caches point to old methods, so
1108     // inline cache clearing is unnecessary.
1109     if (nm->has_evol_metadata()) {
1110       nm->mark_for_deoptimization();
1111       add_to_old_table(nm);
1112       number_of_marked_CodeBlobs++;
1113     }
1114   }
1115 
1116   // return total count of nmethods marked for deoptimization, if zero the caller
1117   // can skip deoptimization
1118   return number_of_marked_CodeBlobs;
1119 }
1120 
1121 void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
1122   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1123   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1124   while(iter.next()) {
1125     CompiledMethod* nm = iter.method();
1126     if (!nm->method()->is_method_handle_intrinsic()) {
1127       nm->mark_for_deoptimization();
1128       if (nm->has_evol_metadata()) {
1129         add_to_old_table(nm);
1130       }
1131     }
1132   }
1133 }
1134 
1135 // Flushes compiled methods dependent on redefined classes, that have already been
1136 // marked for deoptimization.
1137 void CodeCache::flush_evol_dependents() {
1138   assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
1139 
1140   // CodeCache can only be updated by a thread_in_VM and they will all be
1141   // stopped during the safepoint so CodeCache will be safe to update without
1142   // holding the CodeCache_lock.
1143 
1144   // At least one nmethod has been marked for deoptimization
1145 
1146   // All this already happens inside a VM_Operation, so we'll do all the work here.
1147   // Stuff copied from VM_Deoptimize and modified slightly.
1148 
1149   // We do not want any GCs to happen while we are in the middle of this VM operation
1150   ResourceMark rm;
1151   DeoptimizationMarker dm;
1152 
1153   // Deoptimize all activations depending on marked nmethods
1154   Deoptimization::deoptimize_dependents();
1155 
1156   // Make the dependent methods not entrant
1157   make_marked_nmethods_not_entrant();
1158 }
1159 #endif // INCLUDE_JVMTI
1160 
1161 // Deoptimize all methods
1162 void CodeCache::mark_all_nmethods_for_deoptimization() {
1163   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1164   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1165   while(iter.next()) {
1166     CompiledMethod* nm = iter.method();
1167     if (!nm->method()->is_method_handle_intrinsic()) {
1168       nm->mark_for_deoptimization();
1169     }
1170   }
1171 }
1172 
1173 int CodeCache::mark_for_deoptimization(Method* dependee) {
1174   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1175   int number_of_marked_CodeBlobs = 0;
1176 
1177   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1178   while(iter.next()) {
1179     CompiledMethod* nm = iter.method();
1180     if (nm->is_dependent_on_method(dependee)) {
1181       ResourceMark rm;
1182       nm->mark_for_deoptimization();
1183       number_of_marked_CodeBlobs++;
1184     }
1185   }
1186 
1187   return number_of_marked_CodeBlobs;
1188 }
1189 
1190 void CodeCache::make_marked_nmethods_not_entrant() {
1191   assert_locked_or_safepoint(CodeCache_lock);
1192   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1193   while(iter.next()) {
1194     CompiledMethod* nm = iter.method();
1195     if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) {
1196       nm->make_not_entrant();
1197     }
1198   }
1199 }
1200 
1201 // Flushes compiled methods dependent on dependee.
1202 void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
1203   assert_lock_strong(Compile_lock);
1204 
1205   if (number_of_nmethods_with_dependencies() == 0) return;
1206 
1207   // CodeCache can only be updated by a thread_in_VM and they will all be
1208   // stopped during the safepoint so CodeCache will be safe to update without
1209   // holding the CodeCache_lock.
1210 
1211   KlassDepChange changes(dependee);
1212 
1213   // Compute the dependent nmethods
1214   if (mark_for_deoptimization(changes) > 0) {
1215     // At least one nmethod has been marked for deoptimization
1216     VM_Deoptimize op;
1217     VMThread::execute(&op);
1218   }
1219 }
1220 
1221 // Flushes compiled methods dependent on dependee
1222 void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
1223   // --- Compile_lock is not held. However we are at a safepoint.
1224   assert_locked_or_safepoint(Compile_lock);
1225 
1226   // CodeCache can only be updated by a thread_in_VM and they will all be
1227   // stopped dring the safepoint so CodeCache will be safe to update without
1228   // holding the CodeCache_lock.
1229 
1230   // Compute the dependent nmethods
1231   if (mark_for_deoptimization(m_h()) > 0) {
1232     // At least one nmethod has been marked for deoptimization
1233 
1234     // All this already happens inside a VM_Operation, so we'll do all the work here.
1235     // Stuff copied from VM_Deoptimize and modified slightly.
1236 
1237     // We do not want any GCs to happen while we are in the middle of this VM operation
1238     ResourceMark rm;
1239     DeoptimizationMarker dm;
1240 
1241     // Deoptimize all activations depending on marked nmethods
1242     Deoptimization::deoptimize_dependents();
1243 
1244     // Make the dependent methods not entrant
1245     make_marked_nmethods_not_entrant();
1246   }
1247 }
1248 
1249 void CodeCache::verify() {
1250   assert_locked_or_safepoint(CodeCache_lock);
1251   FOR_ALL_HEAPS(heap) {
1252     (*heap)->verify();
1253     FOR_ALL_BLOBS(cb, *heap) {
1254       if (cb->is_alive()) {
1255         cb->verify();
1256       }
1257     }
1258   }
1259 }
1260 
1261 // A CodeHeap is full. Print out warning and report event.
1262 PRAGMA_DIAG_PUSH
1263 PRAGMA_FORMAT_NONLITERAL_IGNORED
1264 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1265   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1266   CodeHeap* heap = get_code_heap(code_blob_type);
1267   assert(heap != NULL, "heap is null");
1268 
1269   if ((heap->full_count() == 0) || print) {
1270     // Not yet reported for this heap, report
1271     if (SegmentedCodeCache) {
1272       ResourceMark rm;
1273       stringStream msg1_stream, msg2_stream;
1274       msg1_stream.print("%s is full. Compiler has been disabled.",
1275                         get_code_heap_name(code_blob_type));
1276       msg2_stream.print("Try increasing the code heap size using -XX:%s=",
1277                  get_code_heap_flag_name(code_blob_type));
1278       const char *msg1 = msg1_stream.as_string();
1279       const char *msg2 = msg2_stream.as_string();
1280 
1281       log_warning(codecache)("%s", msg1);
1282       log_warning(codecache)("%s", msg2);
1283       warning("%s", msg1);
1284       warning("%s", msg2);
1285     } else {
1286       const char *msg1 = "CodeCache is full. Compiler has been disabled.";
1287       const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=";
1288 
1289       log_warning(codecache)("%s", msg1);
1290       log_warning(codecache)("%s", msg2);
1291       warning("%s", msg1);
1292       warning("%s", msg2);
1293     }
1294     ResourceMark rm;
1295     stringStream s;
1296     // Dump code cache into a buffer before locking the tty.
1297     {
1298       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1299       print_summary(&s);
1300     }
1301     {
1302       ttyLocker ttyl;
1303       tty->print("%s", s.as_string());
1304     }
1305 
1306     if (heap->full_count() == 0) {
1307       if (PrintCodeHeapAnalytics) {
1308         CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
1309       }
1310     }
1311   }
1312 
1313   heap->report_full();
1314 
1315   EventCodeCacheFull event;
1316   if (event.should_commit()) {
1317     event.set_codeBlobType((u1)code_blob_type);
1318     event.set_startAddress((u8)heap->low_boundary());
1319     event.set_commitedTopAddress((u8)heap->high());
1320     event.set_reservedTopAddress((u8)heap->high_boundary());
1321     event.set_entryCount(heap->blob_count());
1322     event.set_methodCount(heap->nmethod_count());
1323     event.set_adaptorCount(heap->adapter_count());
1324     event.set_unallocatedCapacity(heap->unallocated_capacity());
1325     event.set_fullCount(heap->full_count());
1326     event.commit();
1327   }
1328 }
1329 PRAGMA_DIAG_POP
1330 
1331 void CodeCache::print_memory_overhead() {
1332   size_t wasted_bytes = 0;
1333   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1334       CodeHeap* curr_heap = *heap;
1335       for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1336         HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1337         wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1338       }
1339   }
1340   // Print bytes that are allocated in the freelist
1341   ttyLocker ttl;
1342   tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT,       freelists_length());
1343   tty->print_cr("Allocated in freelist:          " SSIZE_FORMAT "kB",  bytes_allocated_in_freelists()/K);
1344   tty->print_cr("Unused bytes in CodeBlobs:      " SSIZE_FORMAT "kB",  (wasted_bytes/K));
1345   tty->print_cr("Segment map size:               " SSIZE_FORMAT "kB",  allocated_segments()/K); // 1 byte per segment
1346 }
1347 
1348 //------------------------------------------------------------------------------------------------
1349 // Non-product version
1350 
1351 #ifndef PRODUCT
1352 
1353 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1354   if (PrintCodeCache2) {  // Need to add a new flag
1355     ResourceMark rm;
1356     if (size == 0)  size = cb->size();
1357     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1358   }
1359 }
1360 
1361 void CodeCache::print_internals() {
1362   int nmethodCount = 0;
1363   int runtimeStubCount = 0;
1364   int adapterCount = 0;
1365   int deoptimizationStubCount = 0;
1366   int uncommonTrapStubCount = 0;
1367   int bufferBlobCount = 0;
1368   int total = 0;
1369   int nmethodAlive = 0;
1370   int nmethodNotEntrant = 0;
1371   int nmethodZombie = 0;
1372   int nmethodUnloaded = 0;
1373   int nmethodJava = 0;
1374   int nmethodNative = 0;
1375   int max_nm_size = 0;
1376   ResourceMark rm;
1377 
1378   int i = 0;
1379   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1380     if ((_nmethod_heaps->length() >= 1) && Verbose) {
1381       tty->print_cr("-- %s --", (*heap)->name());
1382     }
1383     FOR_ALL_BLOBS(cb, *heap) {
1384       total++;
1385       if (cb->is_nmethod()) {
1386         nmethod* nm = (nmethod*)cb;
1387 
1388         if (Verbose && nm->method() != NULL) {
1389           ResourceMark rm;
1390           char *method_name = nm->method()->name_and_sig_as_C_string();
1391           tty->print("%s", method_name);
1392           if(nm->is_alive()) { tty->print_cr(" alive"); }
1393           if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1394           if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1395         }
1396 
1397         nmethodCount++;
1398 
1399         if(nm->is_alive()) { nmethodAlive++; }
1400         if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1401         if(nm->is_zombie()) { nmethodZombie++; }
1402         if(nm->is_unloaded()) { nmethodUnloaded++; }
1403         if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1404 
1405         if(nm->method() != NULL && nm->is_java_method()) {
1406           nmethodJava++;
1407           max_nm_size = MAX2(max_nm_size, nm->size());
1408         }
1409       } else if (cb->is_runtime_stub()) {
1410         runtimeStubCount++;
1411       } else if (cb->is_deoptimization_stub()) {
1412         deoptimizationStubCount++;
1413       } else if (cb->is_uncommon_trap_stub()) {
1414         uncommonTrapStubCount++;
1415       } else if (cb->is_adapter_blob()) {
1416         adapterCount++;
1417       } else if (cb->is_buffer_blob()) {
1418         bufferBlobCount++;
1419       }
1420     }
1421   }
1422 
1423   int bucketSize = 512;
1424   int bucketLimit = max_nm_size / bucketSize + 1;
1425   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1426   memset(buckets, 0, sizeof(int) * bucketLimit);
1427 
1428   NMethodIterator iter(NMethodIterator::all_blobs);
1429   while(iter.next()) {
1430     nmethod* nm = iter.method();
1431     if(nm->method() != NULL && nm->is_java_method()) {
1432       buckets[nm->size() / bucketSize]++;
1433     }
1434   }
1435 
1436   tty->print_cr("Code Cache Entries (total of %d)",total);
1437   tty->print_cr("-------------------------------------------------");
1438   tty->print_cr("nmethods: %d",nmethodCount);
1439   tty->print_cr("\talive: %d",nmethodAlive);
1440   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1441   tty->print_cr("\tzombie: %d",nmethodZombie);
1442   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1443   tty->print_cr("\tjava: %d",nmethodJava);
1444   tty->print_cr("\tnative: %d",nmethodNative);
1445   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1446   tty->print_cr("adapters: %d",adapterCount);
1447   tty->print_cr("buffer blobs: %d",bufferBlobCount);
1448   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1449   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1450   tty->print_cr("\nnmethod size distribution (non-zombie java)");
1451   tty->print_cr("-------------------------------------------------");
1452 
1453   for(int i=0; i<bucketLimit; i++) {
1454     if(buckets[i] != 0) {
1455       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1456       tty->fill_to(40);
1457       tty->print_cr("%d",buckets[i]);
1458     }
1459   }
1460 
1461   FREE_C_HEAP_ARRAY(int, buckets);
1462   print_memory_overhead();
1463 }
1464 
1465 #endif // !PRODUCT
1466 
1467 void CodeCache::print() {
1468   print_summary(tty);
1469 
1470 #ifndef PRODUCT
1471   if (!Verbose) return;
1472 
1473   CodeBlob_sizes live;
1474   CodeBlob_sizes dead;
1475 
1476   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1477     FOR_ALL_BLOBS(cb, *heap) {
1478       if (!cb->is_alive()) {
1479         dead.add(cb);
1480       } else {
1481         live.add(cb);
1482       }
1483     }
1484   }
1485 
1486   tty->print_cr("CodeCache:");
1487   tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1488 
1489   if (!live.is_empty()) {
1490     live.print("live");
1491   }
1492   if (!dead.is_empty()) {
1493     dead.print("dead");
1494   }
1495 
1496   if (WizardMode) {
1497      // print the oop_map usage
1498     int code_size = 0;
1499     int number_of_blobs = 0;
1500     int number_of_oop_maps = 0;
1501     int map_size = 0;
1502     FOR_ALL_ALLOCABLE_HEAPS(heap) {
1503       FOR_ALL_BLOBS(cb, *heap) {
1504         if (cb->is_alive()) {
1505           number_of_blobs++;
1506           code_size += cb->code_size();
1507           ImmutableOopMapSet* set = cb->oop_maps();
1508           if (set != NULL) {
1509             number_of_oop_maps += set->count();
1510             map_size           += set->nr_of_bytes();
1511           }
1512         }
1513       }
1514     }
1515     tty->print_cr("OopMaps");
1516     tty->print_cr("  #blobs    = %d", number_of_blobs);
1517     tty->print_cr("  code size = %d", code_size);
1518     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1519     tty->print_cr("  map size  = %d", map_size);
1520   }
1521 
1522 #endif // !PRODUCT
1523 }
1524 
1525 void CodeCache::print_summary(outputStream* st, bool detailed) {
1526   int full_count = 0;
1527   FOR_ALL_HEAPS(heap_iterator) {
1528     CodeHeap* heap = (*heap_iterator);
1529     size_t total = (heap->high_boundary() - heap->low_boundary());
1530     if (_heaps->length() >= 1) {
1531       st->print("%s:", heap->name());
1532     } else {
1533       st->print("CodeCache:");
1534     }
1535     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1536                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1537                  total/K, (total - heap->unallocated_capacity())/K,
1538                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1539 
1540     if (detailed) {
1541       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1542                    p2i(heap->low_boundary()),
1543                    p2i(heap->high()),
1544                    p2i(heap->high_boundary()));
1545 
1546       full_count += get_codemem_full_count(heap->code_blob_type());
1547     }
1548   }
1549 
1550   if (detailed) {
1551     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1552                        " adapters=" UINT32_FORMAT,
1553                        blob_count(), nmethod_count(), adapter_count());
1554     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1555                  "enabled" : Arguments::mode() == Arguments::_int ?
1556                  "disabled (interpreter mode)" :
1557                  "disabled (not enough contiguous free space left)");
1558     st->print_cr("              stopped_count=%d, restarted_count=%d",
1559                  CompileBroker::get_total_compiler_stopped_count(),
1560                  CompileBroker::get_total_compiler_restarted_count());
1561     st->print_cr(" full_count=%d", full_count);
1562   }
1563 }
1564 
1565 void CodeCache::print_codelist(outputStream* st) {
1566   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1567 
1568   CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
1569   while (iter.next()) {
1570     CompiledMethod* cm = iter.method();
1571     ResourceMark rm;
1572     char* method_name = cm->method()->name_and_sig_as_C_string();
1573     st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1574                  cm->compile_id(), cm->comp_level(), cm->get_state(),
1575                  method_name,
1576                  (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end());
1577   }
1578 }
1579 
1580 void CodeCache::print_layout(outputStream* st) {
1581   MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1582   ResourceMark rm;
1583   print_summary(st, true);
1584 }
1585 
1586 void CodeCache::log_state(outputStream* st) {
1587   st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1588             " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1589             blob_count(), nmethod_count(), adapter_count(),
1590             unallocated_capacity());
1591 }
1592 
1593 //---<  BEGIN  >--- CodeHeap State Analytics.
1594 
1595 void CodeCache::aggregate(outputStream *out, size_t granularity) {
1596   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1597     CodeHeapState::aggregate(out, (*heap), granularity);
1598   }
1599 }
1600 
1601 void CodeCache::discard(outputStream *out) {
1602   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1603     CodeHeapState::discard(out, (*heap));
1604   }
1605 }
1606 
1607 void CodeCache::print_usedSpace(outputStream *out) {
1608   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1609     CodeHeapState::print_usedSpace(out, (*heap));
1610   }
1611 }
1612 
1613 void CodeCache::print_freeSpace(outputStream *out) {
1614   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1615     CodeHeapState::print_freeSpace(out, (*heap));
1616   }
1617 }
1618 
1619 void CodeCache::print_count(outputStream *out) {
1620   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1621     CodeHeapState::print_count(out, (*heap));
1622   }
1623 }
1624 
1625 void CodeCache::print_space(outputStream *out) {
1626   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1627     CodeHeapState::print_space(out, (*heap));
1628   }
1629 }
1630 
1631 void CodeCache::print_age(outputStream *out) {
1632   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1633     CodeHeapState::print_age(out, (*heap));
1634   }
1635 }
1636 
1637 void CodeCache::print_names(outputStream *out) {
1638   FOR_ALL_ALLOCABLE_HEAPS(heap) {
1639     CodeHeapState::print_names(out, (*heap));
1640   }
1641 }
1642 //---<  END  >--- CodeHeap State Analytics.