src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/code

src/share/vm/code/codeCache.cpp

Print this page




  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "gc_implementation/shared/markSweep.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/gcLocker.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/objArrayOop.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/icache.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutexLocker.hpp"

  47 #include "runtime/compilationPolicy.hpp"
  48 #include "services/memoryService.hpp"
  49 #include "trace/tracing.hpp"
  50 #include "utilities/xmlstream.hpp"
  51 #ifdef COMPILER1
  52 #include "c1/c1_Compilation.hpp"
  53 #include "c1/c1_Compiler.hpp"
  54 #endif
  55 #ifdef COMPILER2
  56 #include "opto/c2compiler.hpp"
  57 #include "opto/compile.hpp"
  58 #include "opto/node.hpp"
  59 #endif
  60 
  61 // Helper class for printing in CodeCache
  62 class CodeBlob_sizes {
  63  private:
  64   int count;
  65   int total_size;
  66   int header_size;


 175     } else {
 176       // Use all space for the non-nmethod heap and set other heaps to minimal size
 177       FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
 178       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
 179       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
 180     }
 181   }
 182 
 183   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 184   if(!heap_available(CodeBlobType::MethodProfiled)) {
 185     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
 186     FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
 187   }
 188   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 189   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 190     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
 191     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
 192   }
 193 
 194   // Make sure we have enough space for VM internal code
 195   uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
 196   if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
 197     vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
 198   }
 199   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
 200 
 201   // Align reserved sizes of CodeHeaps
 202   size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
 203   size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
 204   size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 205 
 206   // Compute initial sizes of CodeHeaps
 207   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
 208   size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
 209   size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
 210 
 211   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 212   // parts for the individual heaps. The memory layout looks like this:
 213   // ---------- high -----------
 214   //    Non-profiled nmethods
 215   //      Profiled nmethods


 316 }
 317 
 318 CodeBlob* CodeCache::first_blob(int code_blob_type) {
 319   if (heap_available(code_blob_type)) {
 320     return first_blob(get_code_heap(code_blob_type));
 321   } else {
 322     return NULL;
 323   }
 324 }
 325 
 326 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 327   assert_locked_or_safepoint(CodeCache_lock);
 328   assert(heap != NULL, "heap is null");
 329   return (CodeBlob*)heap->next(cb);
 330 }
 331 
 332 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
 333   return next_blob(get_code_heap(cb), cb);
 334 }
 335 
 336 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
 337   // Do not seize the CodeCache lock here--if the caller has not
 338   // already done so, we are going to lose bigtime, since the code
 339   // cache will contain a garbage CodeBlob until the caller can
 340   // run the constructor for the CodeBlob subclass he is busy
 341   // instantiating.



 342   assert_locked_or_safepoint(CodeCache_lock);
 343   assert(size > 0, "allocation request must be reasonable");
 344   if (size <= 0) {
 345     return NULL;
 346   }
 347   CodeBlob* cb = NULL;
 348 
 349   // Get CodeHeap for the given CodeBlobType
 350   CodeHeap* heap = get_code_heap(code_blob_type);
 351   assert(heap != NULL, "heap is null");
 352 
 353   while (true) {
 354     cb = (CodeBlob*)heap->allocate(size, is_critical);
 355     if (cb != NULL) break;
 356     if (!heap->expand_by(CodeCacheExpansionSize)) {



 357       // Expansion failed
 358       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
 359         // Fallback solution: Store non-nmethod code in the non-profiled code heap
 360         return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
 361       }
 362       return NULL;
 363     }
 364     if (PrintCodeCacheExtension) {
 365       ResourceMark rm;
 366       if (SegmentedCodeCache) {
 367         tty->print("%s", heap->name());
 368       } else {
 369         tty->print("CodeCache");
 370       }
 371       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 372                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 373                     (address)heap->high() - (address)heap->low_boundary());
 374     }
 375   }
 376   print_trace("allocation", cb, size);
 377   _number_of_blobs++;
 378   return cb;
 379 }
 380 


 739   return (heap != NULL) ? heap->unallocated_capacity() : 0;
 740 }
 741 
 742 size_t CodeCache::unallocated_capacity() {
 743   size_t unallocated_cap = 0;
 744   FOR_ALL_HEAPS(heap) {
 745     unallocated_cap += (*heap)->unallocated_capacity();
 746   }
 747   return unallocated_cap;
 748 }
 749 
 750 size_t CodeCache::max_capacity() {
 751   size_t max_cap = 0;
 752   FOR_ALL_HEAPS(heap) {
 753     max_cap += (*heap)->max_capacity();
 754   }
 755   return max_cap;
 756 }
 757 
 758 /**
 759  * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
 760  */
 761 bool CodeCache::is_full(int* code_blob_type) {
 762   FOR_ALL_HEAPS(heap) {
 763     if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
 764       *code_blob_type = (*heap)->code_blob_type();
 765       return true;
 766     }
 767   }
 768   return false;
 769 }
 770 
 771 /**
 772  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
 773  * is free, reverse_free_ratio() returns 4.
 774  */
 775 double CodeCache::reverse_free_ratio(int code_blob_type) {
 776   CodeHeap* heap = get_code_heap(code_blob_type);
 777   if (heap == NULL) {
 778     return 0;
 779   }
 780   double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
 781   double max_capacity = (double)heap->max_capacity();
 782   return max_capacity / unallocated_capacity;
 783 }
 784 
 785 size_t CodeCache::bytes_allocated_in_freelists() {
 786   size_t allocated_bytes = 0;
 787   FOR_ALL_HEAPS(heap) {
 788     allocated_bytes += (*heap)->allocated_in_freelist();
 789   }
 790   return allocated_bytes;
 791 }
 792 
 793 int CodeCache::allocated_segments() {
 794   int number_of_segments = 0;
 795   FOR_ALL_HEAPS(heap) {
 796     number_of_segments += (*heap)->allocated_segments();
 797   }
 798   return number_of_segments;
 799 }
 800 




  27 #include "code/codeCache.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "code/dependencies.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "compiler/compileBroker.hpp"
  34 #include "gc_implementation/shared/markSweep.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/gcLocker.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/method.hpp"
  40 #include "oops/objArrayOop.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/icache.hpp"
  45 #include "runtime/java.hpp"
  46 #include "runtime/mutexLocker.hpp"
  47 #include "runtime/sweeper.hpp"
  48 #include "runtime/compilationPolicy.hpp"
  49 #include "services/memoryService.hpp"
  50 #include "trace/tracing.hpp"
  51 #include "utilities/xmlstream.hpp"
  52 #ifdef COMPILER1
  53 #include "c1/c1_Compilation.hpp"
  54 #include "c1/c1_Compiler.hpp"
  55 #endif
  56 #ifdef COMPILER2
  57 #include "opto/c2compiler.hpp"
  58 #include "opto/compile.hpp"
  59 #include "opto/node.hpp"
  60 #endif
  61 
  62 // Helper class for printing in CodeCache
  63 class CodeBlob_sizes {
  64  private:
  65   int count;
  66   int total_size;
  67   int header_size;


 176     } else {
 177       // Use all space for the non-nmethod heap and set other heaps to minimal size
 178       FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
 179       FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
 180       FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
 181     }
 182   }
 183 
 184   // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
 185   if(!heap_available(CodeBlobType::MethodProfiled)) {
 186     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
 187     FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
 188   }
 189   // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
 190   if(!heap_available(CodeBlobType::MethodNonProfiled)) {
 191     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
 192     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
 193   }
 194 
 195   // Make sure we have enough space for VM internal code
 196   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
 197   if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
 198     vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
 199   }
 200   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
 201 
 202   // Align reserved sizes of CodeHeaps
 203   size_t non_method_size   = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
 204   size_t profiled_size     = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
 205   size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 206 
 207   // Compute initial sizes of CodeHeaps
 208   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
 209   size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
 210   size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
 211 
 212   // Reserve one continuous chunk of memory for CodeHeaps and split it into
 213   // parts for the individual heaps. The memory layout looks like this:
 214   // ---------- high -----------
 215   //    Non-profiled nmethods
 216   //      Profiled nmethods


 317 }
 318 
 319 CodeBlob* CodeCache::first_blob(int code_blob_type) {
 320   if (heap_available(code_blob_type)) {
 321     return first_blob(get_code_heap(code_blob_type));
 322   } else {
 323     return NULL;
 324   }
 325 }
 326 
 327 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
 328   assert_locked_or_safepoint(CodeCache_lock);
 329   assert(heap != NULL, "heap is null");
 330   return (CodeBlob*)heap->next(cb);
 331 }
 332 
 333 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
 334   return next_blob(get_code_heap(cb), cb);
 335 }
 336 
 337 /**
 338  * Do not seize the CodeCache lock here--if the caller has not
 339  * already done so, we are going to lose bigtime, since the code
 340  * cache will contain a garbage CodeBlob until the caller can
 341  * run the constructor for the CodeBlob subclass he is busy
 342  * instantiating.
 343  */
 344 CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
 345   NMethodSweeper::notify();
 346   assert_locked_or_safepoint(CodeCache_lock);
 347   assert(size > 0, "Code cache allocation request must be > 0");
 348   if (size <= 0) {
 349     return NULL;
 350   }
 351   CodeBlob* cb = NULL;
 352 
 353   // Get CodeHeap for the given CodeBlobType
 354   CodeHeap* heap = get_code_heap(code_blob_type);
 355   assert(heap != NULL, "heap is null");
 356 
 357   while (true) {
 358     cb = (CodeBlob*)heap->allocate(size);
 359     if (cb != NULL) break;
 360     if (!heap->expand_by(CodeCacheExpansionSize)) {
 361       MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 362       CompileBroker::handle_full_code_cache(code_blob_type);
 363 
 364       // Expansion failed
 365       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
 366         // Fallback solution: Store non-nmethod code in the non-profiled code heap
 367         return allocate(size, CodeBlobType::MethodNonProfiled);
 368       }
 369       return NULL;
 370     }
 371     if (PrintCodeCacheExtension) {
 372       ResourceMark rm;
 373       if (SegmentedCodeCache) {
 374         tty->print("%s", heap->name());
 375       } else {
 376         tty->print("CodeCache");
 377       }
 378       tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
 379                     (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
 380                     (address)heap->high() - (address)heap->low_boundary());
 381     }
 382   }
 383   print_trace("allocation", cb, size);
 384   _number_of_blobs++;
 385   return cb;
 386 }
 387 


 746   return (heap != NULL) ? heap->unallocated_capacity() : 0;
 747 }
 748 
 749 size_t CodeCache::unallocated_capacity() {
 750   size_t unallocated_cap = 0;
 751   FOR_ALL_HEAPS(heap) {
 752     unallocated_cap += (*heap)->unallocated_capacity();
 753   }
 754   return unallocated_cap;
 755 }
 756 
 757 size_t CodeCache::max_capacity() {
 758   size_t max_cap = 0;
 759   FOR_ALL_HEAPS(heap) {
 760     max_cap += (*heap)->max_capacity();
 761   }
 762   return max_cap;
 763 }
 764 
 765 /**













 766  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
 767  * is free, reverse_free_ratio() returns 4.
 768  */
 769 double CodeCache::reverse_free_ratio(int code_blob_type) {
 770   CodeHeap* heap = get_code_heap(code_blob_type);
 771   if (heap == NULL) {
 772     return 0;
 773   }
 774   double unallocated_capacity = (double)CodeCache::unallocated_capacity() + 1; // Avoid division by 0
 775   double max_capacity = (double)heap->max_capacity();
 776   return max_capacity / unallocated_capacity;
 777 }
 778 
 779 size_t CodeCache::bytes_allocated_in_freelists() {
 780   size_t allocated_bytes = 0;
 781   FOR_ALL_HEAPS(heap) {
 782     allocated_bytes += (*heap)->allocated_in_freelist();
 783   }
 784   return allocated_bytes;
 785 }
 786 
 787 int CodeCache::allocated_segments() {
 788   int number_of_segments = 0;
 789   FOR_ALL_HEAPS(heap) {
 790     number_of_segments += (*heap)->allocated_segments();
 791   }
 792   return number_of_segments;
 793 }
 794 


src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File