src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/code

src/share/vm/code/codeCache.cpp

Print this page




 141 
 142 // Initialize array of CodeHeaps
 143 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 144 
 145 void CodeCache::initialize_heaps() {
 146   // Determine size of compiler buffers
 147   size_t code_buffers_size = 0;
 148 #ifdef COMPILER1
 149   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 150   const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
 151   code_buffers_size += c1_count * Compiler::code_buffer_size();
 152 #endif
 153 #ifdef COMPILER2
 154   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 155   const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
 156   // Initial size of constant table (this may be increased if a compiled method needs more space)
 157   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 158 #endif
 159 
 160   // Calculate default CodeHeap sizes if not set by user
 161   if (!FLAG_IS_CMDLINE(NonMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
 162       && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
 163     // Increase default NonMethodCodeHeapSize to account for compiler buffers
 164     FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + code_buffers_size);
 165 
 166     // Check if we have enough space for the non-method code heap
 167     if (ReservedCodeCacheSize > NonMethodCodeHeapSize) {
 168       // Use the default value for NonMethodCodeHeapSize and one half of the
 169       // remaining size for non-profiled methods and one half for profiled methods
 170       size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize;
 171       size_t profiled_size = remaining_size / 2;
 172       size_t non_profiled_size = remaining_size - profiled_size;
 173       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
 174       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
 175     } else {
 176       // Use all space for the non-method heap and set other heaps to minimal size
 177       FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
 178       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
 179       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
 180     }
 181   }
 182 
 183   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 184   if(!heap_available(CodeBlobType::MethodProfiled)) {
 185     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
 186     FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
 187   }
 188   // We do not need the non-profiled CodeHeap, use all space for the non-method CodeHeap
 189   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 190     FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + NonProfiledCodeHeapSize);
 191     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
 192   }
 193 
 194   // Make sure we have enough space for VM internal code
 195   uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
 196   if (NonMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
 197     vm_exit_during_initialization("Not enough space in non-method code heap to run VM.");
 198   }
 199   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
 200 
 201   // Align reserved sizes of CodeHeaps
 202   size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize);
 203   size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
 204   size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 205 
 206   // Compute initial sizes of CodeHeaps
 207   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
 208   size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
 209   size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
 210 
 211   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 212   // parts for the individual heaps. The memory layout looks like this:
 213   // ---------- high -----------
 214   //    Non-profiled nmethods
 215   //      Profiled nmethods
 216   //         Non-methods
 217   // ---------- low ------------
 218   ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
 219   ReservedSpace non_method_space    = rs.first_part(non_method_size);
 220   ReservedSpace rest                = rs.last_part(non_method_size);
 221   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 222   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 223 
 224   // Non-methods (stubs, adapters, ...)
 225   add_heap(non_method_space, "Code Heap 'non-methods'", init_non_method_size, CodeBlobType::NonMethod);
 226   // Tier 2 and tier 3 (profiled) methods
 227   add_heap(profiled_space, "Code Heap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled);
 228   // Tier 1 and tier 4 (non-profiled) methods and native methods
 229   add_heap(non_profiled_space, "Code Heap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
 230 }
 231 
 232 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 233   // Determine alignment
 234   const size_t page_size = os::can_execute_large_page_memory() ?
 235           MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
 236                os::page_size_for_region(size, 8)) :
 237           os::vm_page_size();
 238   const size_t granularity = os::vm_allocation_granularity();
 239   const size_t r_align = MAX2(page_size, granularity);
 240   const size_t r_size = align_size_up(size, r_align);
 241   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 242     MAX2(page_size, granularity);
 243 
 244   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 245 
 246   // Initialize bounds
 247   _low_bound = (address)rs.base();
 248   _high_bound = _low_bound + rs.size();
 249 
 250   return rs;
 251 }
 252 
 253 bool CodeCache::heap_available(int code_blob_type) {
 254   if (!SegmentedCodeCache) {
 255     // No segmentation: use a single code heap
 256     return (code_blob_type == CodeBlobType::All);
 257   } else if ((Arguments::mode() == Arguments::_int) ||
 258              (TieredStopAtLevel == CompLevel_none)) {
 259     // Interpreter only: we don't need any method code heaps
 260     return (code_blob_type == CodeBlobType::NonMethod);
 261   } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
 262     // Tiered compilation: use all code heaps
 263     return (code_blob_type < CodeBlobType::All);
 264   } else {
 265     // No TieredCompilation: we only need the non-method and non-profiled code heap
 266     return (code_blob_type == CodeBlobType::NonMethod) ||
 267            (code_blob_type == CodeBlobType::MethodNonProfiled);
 268   }
 269 }
 270 
 271 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
 272   // Check if heap is needed
 273   if (!heap_available(code_blob_type)) {
 274     return;
 275   }
 276 
 277   // Create CodeHeap
 278   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 279   _heaps->append(heap);
 280 
 281   // Reserve Space
 282   size_initial = round_to(size_initial, os::vm_page_size());
 283 
 284   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 285     vm_exit_during_initialization("Could not reserve enough space for code cache");
 286   }


 338   // already done so, we are going to lose bigtime, since the code
 339   // cache will contain a garbage CodeBlob until the caller can
 340   // run the constructor for the CodeBlob subclass he is busy
 341   // instantiating.
 342   assert_locked_or_safepoint(CodeCache_lock);
 343   assert(size > 0, "allocation request must be reasonable");
 344   if (size <= 0) {
 345     return NULL;
 346   }
 347   CodeBlob* cb = NULL;
 348 
 349   // Get CodeHeap for the given CodeBlobType
 350   CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
 351   assert (heap != NULL, "heap is null");
 352 
 353   while (true) {
 354     cb = (CodeBlob*)heap->allocate(size, is_critical);
 355     if (cb != NULL) break;
 356     if (!heap->expand_by(CodeCacheExpansionSize)) {
 357       // Expansion failed
 358       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonMethod)) {
 359         // Fallback solution: Store non-method code in the non-profiled code heap
 360         return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
 361       }
 362       return NULL;
 363     }
 364     if (PrintCodeCacheExtension) {
 365       ResourceMark rm;
 366       if (SegmentedCodeCache) {
 367         tty->print("%s", heap->name());
 368       } else {
 369         tty->print("Code Cache");
 370       }
 371       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 372                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 373                     (address)heap->high() - (address)heap->low_boundary());
 374     }
 375   }
 376   print_trace("allocation", cb, size);
 377   _number_of_blobs++;
 378   return cb;
 379 }
 380 
 381 void CodeCache::free(CodeBlob* cb) {
 382   assert_locked_or_safepoint(CodeCache_lock);
 383 
 384   print_trace("free", cb);
 385   if (cb->is_nmethod()) {
 386     _number_of_nmethods--;
 387     if (((nmethod *)cb)->has_dependencies()) {
 388       _number_of_nmethods_with_dependencies--;
 389     }


 803 
 804 void icache_init();
 805 
 806 void CodeCache::initialize() {
 807   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 808 #ifdef COMPILER2
 809   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 810 #endif
 811   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 812   // This was originally just a check of the alignment, causing failure, instead, round
 813   // the code cache to the page size.  In particular, Solaris is moving to a larger
 814   // default page size.
 815   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 816 
 817   if (SegmentedCodeCache) {
 818     // Use multiple code heaps
 819     initialize_heaps();
 820   } else {
 821     // Use a single code heap
 822     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
 823     add_heap(rs, "Code Cache", InitialCodeCacheSize, CodeBlobType::All);
 824   }
 825 
 826   // Initialize ICache flush mechanism
 827   // This service is needed for os::register_code_area
 828   icache_init();
 829 
 830   // Give OS a chance to register generated code area.
 831   // This is used on Windows 64 bit platforms to register
 832   // Structured Exception Handlers for our generated code.
 833   os::register_code_area((char*)low_bound(), (char*)high_bound());
 834 }
 835 
 836 void codeCache_init() {
 837   CodeCache::initialize();
 838 }
 839 
 840 //------------------------------------------------------------------------------------------------
 841 
 842 int CodeCache::number_of_nmethods_with_dependencies() {
 843   return _number_of_nmethods_with_dependencies;


1224         }
1225       }
1226     }
1227     tty->print_cr("OopMaps");
1228     tty->print_cr("  #blobs    = %d", number_of_blobs);
1229     tty->print_cr("  code size = %d", code_size);
1230     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1231     tty->print_cr("  map size  = %d", map_size);
1232   }
1233 
1234 #endif // !PRODUCT
1235 }
1236 
1237 void CodeCache::print_summary(outputStream* st, bool detailed) {
1238   FOR_ALL_HEAPS(heap_iterator) {
1239     CodeHeap* heap = (*heap_iterator);
1240     size_t total = (heap->high_boundary() - heap->low_boundary());
1241     if (SegmentedCodeCache) {
1242       st->print("%s:", heap->name());
1243     } else {
1244       st->print("Code Cache:");
1245     }
1246     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1247                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1248                  total/K, (total - heap->unallocated_capacity())/K,
1249                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1250 
1251     if (detailed) {
1252       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1253                    p2i(heap->low_boundary()),
1254                    p2i(heap->high()),
1255                    p2i(heap->high_boundary()));
1256     }
1257   }
1258 
1259   if (detailed) {
1260     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1261                        " adapters=" UINT32_FORMAT,
1262                        nof_blobs(), nof_nmethods(), nof_adapters());
1263     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1264                  "enabled" : Arguments::mode() == Arguments::_int ?




 141 
 142 // Initialize array of CodeHeaps
 143 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
 144 
 145 void CodeCache::initialize_heaps() {
 146   // Determine size of compiler buffers
 147   size_t code_buffers_size = 0;
 148 #ifdef COMPILER1
 149   // C1 temporary code buffers (see Compiler::init_buffer_blob())
 150   const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
 151   code_buffers_size += c1_count * Compiler::code_buffer_size();
 152 #endif
 153 #ifdef COMPILER2
 154   // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
 155   const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
 156   // Initial size of constant table (this may be increased if a compiled method needs more space)
 157   code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
 158 #endif
 159 
 160   // Calculate default CodeHeap sizes if not set by user
 161   if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
 162       && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
 163     // Increase default NonNMethodCodeHeapSize to account for compiler buffers
 164     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);
 165 
 166     // Check if we have enough space for the non-nmethod code heap
 167     if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
 168       // Use the default value for NonNMethodCodeHeapSize and one half of the
 169       // remaining size for non-profiled methods and one half for profiled methods
 170       size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
 171       size_t profiled_size = remaining_size / 2;
 172       size_t non_profiled_size = remaining_size - profiled_size;
 173       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
 174       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
 175     } else {
 176       // Use all space for the non-nmethod heap and set other heaps to minimal size
 177       FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
 178       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
 179       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
 180     }
 181   }
 182 
 183   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 184   if(!heap_available(CodeBlobType::MethodProfiled)) {
 185     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
 186     FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
 187   }
 188   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 189   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 190     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
 191     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
 192   }
 193 
 194   // Make sure we have enough space for VM internal code
 195   uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
 196   if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
 197     vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
 198   }
 199   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
 200 
 201   // Align reserved sizes of CodeHeaps
 202   size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
 203   size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
 204   size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 205 
 206   // Compute initial sizes of CodeHeaps
 207   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
 208   size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
 209   size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
 210 
 211   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 212   // parts for the individual heaps. The memory layout looks like this:
 213   // ---------- high -----------
 214   //    Non-profiled nmethods
 215   //      Profiled nmethods
 216   //         Non-nmethods
 217   // ---------- low ------------
 218   ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
 219   ReservedSpace non_method_space    = rs.first_part(non_method_size);
 220   ReservedSpace rest                = rs.last_part(non_method_size);
 221   ReservedSpace profiled_space      = rest.first_part(profiled_size);
 222   ReservedSpace non_profiled_space  = rest.last_part(profiled_size);
 223 
 224   // Non-nmethods (stubs, adapters, ...)
 225   add_heap(non_method_space, "CodeHeap 'non-nmethods'", init_non_method_size, CodeBlobType::NonNMethod);
 226   // Tier 2 and tier 3 (profiled) methods
 227   add_heap(profiled_space, "CodeHeap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled);
 228   // Tier 1 and tier 4 (non-profiled) methods and native methods
 229   add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
 230 }
 231 
 232 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
 233   // Determine alignment
 234   const size_t page_size = os::can_execute_large_page_memory() ?
 235           MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
 236                os::page_size_for_region(size, 8)) :
 237           os::vm_page_size();
 238   const size_t granularity = os::vm_allocation_granularity();
 239   const size_t r_align = MAX2(page_size, granularity);
 240   const size_t r_size = align_size_up(size, r_align);
 241   const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
 242     MAX2(page_size, granularity);
 243 
 244   ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
 245 
 246   // Initialize bounds
 247   _low_bound = (address)rs.base();
 248   _high_bound = _low_bound + rs.size();
 249 
 250   return rs;
 251 }
 252 
 253 bool CodeCache::heap_available(int code_blob_type) {
 254   if (!SegmentedCodeCache) {
 255     // No segmentation: use a single code heap
 256     return (code_blob_type == CodeBlobType::All);
 257   } else if ((Arguments::mode() == Arguments::_int) ||
 258              (TieredStopAtLevel == CompLevel_none)) {
 259     // Interpreter only: we don't need any method code heaps
 260     return (code_blob_type == CodeBlobType::NonNMethod);
 261   } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
 262     // Tiered compilation: use all code heaps
 263     return (code_blob_type < CodeBlobType::All);
 264   } else {
 265     // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
 266     return (code_blob_type == CodeBlobType::NonNMethod) ||
 267            (code_blob_type == CodeBlobType::MethodNonProfiled);
 268   }
 269 }
 270 
 271 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
 272   // Check if heap is needed
 273   if (!heap_available(code_blob_type)) {
 274     return;
 275   }
 276 
 277   // Create CodeHeap
 278   CodeHeap* heap = new CodeHeap(name, code_blob_type);
 279   _heaps->append(heap);
 280 
 281   // Reserve Space
 282   size_initial = round_to(size_initial, os::vm_page_size());
 283 
 284   if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
 285     vm_exit_during_initialization("Could not reserve enough space for code cache");
 286   }


 338   // already done so, we are going to lose bigtime, since the code
 339   // cache will contain a garbage CodeBlob until the caller can
 340   // run the constructor for the CodeBlob subclass he is busy
 341   // instantiating.
 342   assert_locked_or_safepoint(CodeCache_lock);
 343   assert(size > 0, "allocation request must be reasonable");
 344   if (size <= 0) {
 345     return NULL;
 346   }
 347   CodeBlob* cb = NULL;
 348 
 349   // Get CodeHeap for the given CodeBlobType
 350   CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
 351   assert (heap != NULL, "heap is null");
 352 
 353   while (true) {
 354     cb = (CodeBlob*)heap->allocate(size, is_critical);
 355     if (cb != NULL) break;
 356     if (!heap->expand_by(CodeCacheExpansionSize)) {
 357       // Expansion failed
 358       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
 359         // Fallback solution: Store non-nmethod code in the non-profiled code heap
 360         return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
 361       }
 362       return NULL;
 363     }
 364     if (PrintCodeCacheExtension) {
 365       ResourceMark rm;
 366       if (SegmentedCodeCache) {
 367         tty->print("%s", heap->name());
 368       } else {
 369         tty->print("CodeCache");
 370       }
 371       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 372                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 373                     (address)heap->high() - (address)heap->low_boundary());
 374     }
 375   }
 376   print_trace("allocation", cb, size);
 377   _number_of_blobs++;
 378   return cb;
 379 }
 380 
 381 void CodeCache::free(CodeBlob* cb) {
 382   assert_locked_or_safepoint(CodeCache_lock);
 383 
 384   print_trace("free", cb);
 385   if (cb->is_nmethod()) {
 386     _number_of_nmethods--;
 387     if (((nmethod *)cb)->has_dependencies()) {
 388       _number_of_nmethods_with_dependencies--;
 389     }


 803 
 804 void icache_init();
 805 
 806 void CodeCache::initialize() {
 807   assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
 808 #ifdef COMPILER2
 809   assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment,  "CodeCacheSegmentSize must be large enough to align inner loops");
 810 #endif
 811   assert(CodeCacheSegmentSize >= sizeof(jdouble),    "CodeCacheSegmentSize must be large enough to align constants");
 812   // This was originally just a check of the alignment, causing failure, instead, round
 813   // the code cache to the page size.  In particular, Solaris is moving to a larger
 814   // default page size.
 815   CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
 816 
 817   if (SegmentedCodeCache) {
 818     // Use multiple code heaps
 819     initialize_heaps();
 820   } else {
 821     // Use a single code heap
 822     ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
 823     add_heap(rs, "CodeCache", InitialCodeCacheSize, CodeBlobType::All);
 824   }
 825 
 826   // Initialize ICache flush mechanism
 827   // This service is needed for os::register_code_area
 828   icache_init();
 829 
 830   // Give OS a chance to register generated code area.
 831   // This is used on Windows 64 bit platforms to register
 832   // Structured Exception Handlers for our generated code.
 833   os::register_code_area((char*)low_bound(), (char*)high_bound());
 834 }
 835 
 836 void codeCache_init() {
 837   CodeCache::initialize();
 838 }
 839 
 840 //------------------------------------------------------------------------------------------------
 841 
 842 int CodeCache::number_of_nmethods_with_dependencies() {
 843   return _number_of_nmethods_with_dependencies;


1224         }
1225       }
1226     }
1227     tty->print_cr("OopMaps");
1228     tty->print_cr("  #blobs    = %d", number_of_blobs);
1229     tty->print_cr("  code size = %d", code_size);
1230     tty->print_cr("  #oop_maps = %d", number_of_oop_maps);
1231     tty->print_cr("  map size  = %d", map_size);
1232   }
1233 
1234 #endif // !PRODUCT
1235 }
1236 
1237 void CodeCache::print_summary(outputStream* st, bool detailed) {
1238   FOR_ALL_HEAPS(heap_iterator) {
1239     CodeHeap* heap = (*heap_iterator);
1240     size_t total = (heap->high_boundary() - heap->low_boundary());
1241     if (SegmentedCodeCache) {
1242       st->print("%s:", heap->name());
1243     } else {
1244       st->print("CodeCache:");
1245     }
1246     st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1247                  "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1248                  total/K, (total - heap->unallocated_capacity())/K,
1249                  heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1250 
1251     if (detailed) {
1252       st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1253                    p2i(heap->low_boundary()),
1254                    p2i(heap->high()),
1255                    p2i(heap->high_boundary()));
1256     }
1257   }
1258 
1259   if (detailed) {
1260     st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1261                        " adapters=" UINT32_FORMAT,
1262                        nof_blobs(), nof_nmethods(), nof_adapters());
1263     st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1264                  "enabled" : Arguments::mode() == Arguments::_int ?


src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File