src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8029799 Sdiff src/share/vm/code

src/share/vm/code/codeCache.cpp

Print this page




 181   guarantee(size >= 0, "allocation request must be reasonable");
 182   assert_locked_or_safepoint(CodeCache_lock);
 183   CodeBlob* cb = NULL;
 184   _number_of_blobs++;
 185   while (true) {
 186     cb = (CodeBlob*)_heap->allocate(size, is_critical);
 187     if (cb != NULL) break;
 188     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 189       // Expansion failed
 190       return NULL;
 191     }
 192     if (PrintCodeCacheExtension) {
 193       ResourceMark rm;
 194       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
 195                     (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
 196                     (address)_heap->high() - (address)_heap->low_boundary());
 197     }
 198   }
 199   maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
 200                           (address)_heap->low_boundary()) - unallocated_capacity());
 201   verify_if_often();
 202   print_trace("allocation", cb, size);
 203   return cb;
 204 }
 205 
 206 void CodeCache::free(CodeBlob* cb) {
 207   assert_locked_or_safepoint(CodeCache_lock);
 208   verify_if_often();
 209 
 210   print_trace("free", cb);
 211   if (cb->is_nmethod()) {
 212     _number_of_nmethods--;
 213     if (((nmethod *)cb)->has_dependencies()) {
 214       _number_of_nmethods_with_dependencies--;
 215     }
 216   }
 217   if (cb->is_adapter_blob()) {
 218     _number_of_adapters--;
 219   }
 220   _number_of_blobs--;
 221 
 222   _heap->deallocate(cb);
 223 
 224   verify_if_often();
 225   assert(_number_of_blobs >= 0, "sanity check");
 226 }
 227 
 228 
 229 void CodeCache::commit(CodeBlob* cb) {
 230   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 231   assert_locked_or_safepoint(CodeCache_lock);
 232   if (cb->is_nmethod()) {
 233     _number_of_nmethods++;
 234     if (((nmethod *)cb)->has_dependencies()) {
 235       _number_of_nmethods_with_dependencies++;
 236     }
 237   }
 238   if (cb->is_adapter_blob()) {
 239     _number_of_adapters++;
 240   }
 241 
 242   // flush the hardware I-cache
 243   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 244 }
 245 
 246 
 247 void CodeCache::flush() {
 248   assert_locked_or_safepoint(CodeCache_lock);
 249   Unimplemented();
 250 }
 251 
 252 
 253 // Iteration over CodeBlobs
 254 
 255 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 256 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 257 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 258 
 259 
 260 bool CodeCache::contains(void *p) {
 261   // It should be ok to call contains without holding a lock
 262   return _heap->contains(p);
 263 }
 264 
 265 
 266 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 267 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 268 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 269 CodeBlob* CodeCache::find_blob(void* start) {
 270   CodeBlob* result = find_blob_unsafe(start);
 271   if (result == NULL) return NULL;
 272   // We could potientially look up non_entrant methods
 273   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 274   return result;
 275 }
 276 
 277 nmethod* CodeCache::find_nmethod(void* start) {
 278   CodeBlob *cb = find_blob(start);
 279   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 280   return (nmethod*)cb;
 281 }
 282 
 283 
 284 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 285   assert_locked_or_safepoint(CodeCache_lock);
 286   FOR_ALL_BLOBS(p) {
 287     f(p);
 288   }
 289 }
 290 
 291 
 292 void CodeCache::nmethods_do(void f(nmethod* nm)) {


 727     p->verify();
 728   }
 729 }
 730 
 731 void CodeCache::report_codemem_full() {
 732   _codemem_full_count++;
 733   EventCodeCacheFull event;
 734   if (event.should_commit()) {
 735     event.set_startAddress((u8)low_bound());
 736     event.set_commitedTopAddress((u8)high());
 737     event.set_reservedTopAddress((u8)high_bound());
 738     event.set_entryCount(nof_blobs());
 739     event.set_methodCount(nof_nmethods());
 740     event.set_adaptorCount(nof_adapters());
 741     event.set_unallocatedCapacity(unallocated_capacity()/K);
 742     event.set_fullCount(_codemem_full_count);
 743     event.commit();
 744   }
 745 }
 746 















 747 //------------------------------------------------------------------------------------------------
 748 // Non-product version
 749 
 750 #ifndef PRODUCT
 751 
 752 void CodeCache::verify_if_often() {
 753   if (VerifyCodeCacheOften) {
 754     _heap->verify();
 755   }
 756 }
 757 
 758 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 759   if (PrintCodeCache2) {  // Need to add a new flag
 760     ResourceMark rm;
 761     if (size == 0)  size = cb->size();
 762     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
 763   }
 764 }
 765 
 766 void CodeCache::print_internals() {
 767   int nmethodCount = 0;
 768   int runtimeStubCount = 0;
 769   int adapterCount = 0;
 770   int deoptimizationStubCount = 0;
 771   int uncommonTrapStubCount = 0;
 772   int bufferBlobCount = 0;
 773   int total = 0;
 774   int nmethodAlive = 0;
 775   int nmethodNotEntrant = 0;
 776   int nmethodZombie = 0;
 777   int nmethodUnloaded = 0;
 778   int nmethodJava = 0;
 779   int nmethodNative = 0;
 780   int maxCodeSize = 0;
 781   ResourceMark rm;
 782 
 783   CodeBlob *cb;
 784   for (cb = first(); cb != NULL; cb = next(cb)) {
 785     total++;
 786     if (cb->is_nmethod()) {
 787       nmethod* nm = (nmethod*)cb;
 788 
 789       if (Verbose && nm->method() != NULL) {
 790         ResourceMark rm;
 791         char *method_name = nm->method()->name_and_sig_as_C_string();
 792         tty->print("%s", method_name);
 793         if(nm->is_alive()) { tty->print_cr(" alive"); }
 794         if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
 795         if(nm->is_zombie()) { tty->print_cr(" zombie"); }
 796       }
 797 
 798       nmethodCount++;
 799 
 800       if(nm->is_alive()) { nmethodAlive++; }
 801       if(nm->is_not_entrant()) { nmethodNotEntrant++; }
 802       if(nm->is_zombie()) { nmethodZombie++; }
 803       if(nm->is_unloaded()) { nmethodUnloaded++; }
 804       if(nm->is_native_method()) { nmethodNative++; }
 805 
 806       if(nm->method() != NULL && nm->is_java_method()) {
 807         nmethodJava++;
 808         if (nm->insts_size() > maxCodeSize) {
 809           maxCodeSize = nm->insts_size();
 810         }
 811       }
 812     } else if (cb->is_runtime_stub()) {
 813       runtimeStubCount++;
 814     } else if (cb->is_deoptimization_stub()) {
 815       deoptimizationStubCount++;
 816     } else if (cb->is_uncommon_trap_stub()) {
 817       uncommonTrapStubCount++;
 818     } else if (cb->is_adapter_blob()) {
 819       adapterCount++;
 820     } else if (cb->is_buffer_blob()) {
 821       bufferBlobCount++;
 822     }
 823   }
 824 
 825   int bucketSize = 512;
 826   int bucketLimit = maxCodeSize / bucketSize + 1;
 827   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
 828   memset(buckets,0,sizeof(int) * bucketLimit);
 829 
 830   for (cb = first(); cb != NULL; cb = next(cb)) {
 831     if (cb->is_nmethod()) {
 832       nmethod* nm = (nmethod*)cb;
 833       if(nm->is_java_method()) {
 834         buckets[nm->insts_size() / bucketSize]++;
 835       }
 836     }
 837   }

 838   tty->print_cr("Code Cache Entries (total of %d)",total);
 839   tty->print_cr("-------------------------------------------------");
 840   tty->print_cr("nmethods: %d",nmethodCount);
 841   tty->print_cr("\talive: %d",nmethodAlive);
 842   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
 843   tty->print_cr("\tzombie: %d",nmethodZombie);
 844   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
 845   tty->print_cr("\tjava: %d",nmethodJava);
 846   tty->print_cr("\tnative: %d",nmethodNative);
 847   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
 848   tty->print_cr("adapters: %d",adapterCount);
 849   tty->print_cr("buffer blobs: %d",bufferBlobCount);
 850   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
 851   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
 852   tty->print_cr("\nnmethod size distribution (non-zombie java)");
 853   tty->print_cr("-------------------------------------------------");
 854 
 855   for(int i=0; i<bucketLimit; i++) {
 856     if(buckets[i] != 0) {
 857       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
 858       tty->fill_to(40);
 859       tty->print_cr("%d",buckets[i]);
 860     }
 861   }
 862 
 863   FREE_C_HEAP_ARRAY(int, buckets, mtCode);

 864 }
 865 
 866 #endif // !PRODUCT
 867 
 868 void CodeCache::print() {
 869   print_summary(tty);
 870 
 871 #ifndef PRODUCT
 872   if (!Verbose) return;
 873 
 874   CodeBlob_sizes live;
 875   CodeBlob_sizes dead;
 876 
 877   FOR_ALL_BLOBS(p) {
 878     if (!p->is_alive()) {
 879       dead.add(p);
 880     } else {
 881       live.add(p);
 882     }
 883   }




 181   guarantee(size >= 0, "allocation request must be reasonable");
 182   assert_locked_or_safepoint(CodeCache_lock);
 183   CodeBlob* cb = NULL;
 184   _number_of_blobs++;
 185   while (true) {
 186     cb = (CodeBlob*)_heap->allocate(size, is_critical);
 187     if (cb != NULL) break;
 188     if (!_heap->expand_by(CodeCacheExpansionSize)) {
 189       // Expansion failed
 190       return NULL;
 191     }
 192     if (PrintCodeCacheExtension) {
 193       ResourceMark rm;
 194       tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
 195                     (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
 196                     (address)_heap->high() - (address)_heap->low_boundary());
 197     }
 198   }
 199   maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
 200                           (address)_heap->low_boundary()) - unallocated_capacity());

 201   print_trace("allocation", cb, size);
 202   return cb;
 203 }
 204 
 205 void CodeCache::free(CodeBlob* cb) {
 206   assert_locked_or_safepoint(CodeCache_lock);

 207 
 208   print_trace("free", cb);
 209   if (cb->is_nmethod()) {
 210     _number_of_nmethods--;
 211     if (((nmethod *)cb)->has_dependencies()) {
 212       _number_of_nmethods_with_dependencies--;
 213     }
 214   }
 215   if (cb->is_adapter_blob()) {
 216     _number_of_adapters--;
 217   }
 218   _number_of_blobs--;
 219 
 220   _heap->deallocate(cb);
 221 

 222   assert(_number_of_blobs >= 0, "sanity check");
 223 }
 224 
 225 
 226 void CodeCache::commit(CodeBlob* cb) {
 227   // this is called by nmethod::nmethod, which must already own CodeCache_lock
 228   assert_locked_or_safepoint(CodeCache_lock);
 229   if (cb->is_nmethod()) {
 230     _number_of_nmethods++;
 231     if (((nmethod *)cb)->has_dependencies()) {
 232       _number_of_nmethods_with_dependencies++;
 233     }
 234   }
 235   if (cb->is_adapter_blob()) {
 236     _number_of_adapters++;
 237   }
 238 
 239   // flush the hardware I-cache
 240   ICache::invalidate_range(cb->content_begin(), cb->content_size());
 241 }
 242 
 243 






 244 // Iteration over CodeBlobs
 245 
 246 #define FOR_ALL_BLOBS(var)       for (CodeBlob *var =       first() ; var != NULL; var =       next(var) )
 247 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
 248 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
 249 
 250 
 251 bool CodeCache::contains(void *p) {
 252   // It should be ok to call contains without holding a lock
 253   return _heap->contains(p);
 254 }
 255 
 256 
 257 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
 258 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
 259 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
 260 CodeBlob* CodeCache::find_blob(void* start) {
 261   CodeBlob* result = find_blob_unsafe(start);
 262   if (result == NULL) return NULL;
 263   // We could potentially look up non_entrant methods
 264   guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
 265   return result;
 266 }
 267 
 268 nmethod* CodeCache::find_nmethod(void* start) {
 269   CodeBlob *cb = find_blob(start);
 270   assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
 271   return (nmethod*)cb;
 272 }
 273 
 274 
 275 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
 276   assert_locked_or_safepoint(CodeCache_lock);
 277   FOR_ALL_BLOBS(p) {
 278     f(p);
 279   }
 280 }
 281 
 282 
 283 void CodeCache::nmethods_do(void f(nmethod* nm)) {


 718     p->verify();
 719   }
 720 }
 721 
 722 void CodeCache::report_codemem_full() {
 723   _codemem_full_count++;
 724   EventCodeCacheFull event;
 725   if (event.should_commit()) {
 726     event.set_startAddress((u8)low_bound());
 727     event.set_commitedTopAddress((u8)high());
 728     event.set_reservedTopAddress((u8)high_bound());
 729     event.set_entryCount(nof_blobs());
 730     event.set_methodCount(nof_nmethods());
 731     event.set_adaptorCount(nof_adapters());
 732     event.set_unallocatedCapacity(unallocated_capacity()/K);
 733     event.set_fullCount(_codemem_full_count);
 734     event.commit();
 735   }
 736 }
 737 
 738 void CodeCache::print_memory_overhead() {
 739   int wasted_bytes = 0;
 740   CodeBlob *cb;
 741   for (cb = first(); cb != NULL; cb = next(cb)) {
 742     HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
 743     wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
 744   }
 745   // Print bytes that are allocated in the freelist
 746   ttyLocker ttl;
 747   tty->print_cr("Number of elements in freelist: %d",   freelist_length());
 748   tty->print_cr("Allocated in freelist:          %dkB", bytes_allocated_in_freelist()/K);
 749   tty->print_cr("Unused bytes in CodeBlobs:      %dkB",  wasted_bytes/K);
 750   tty->print_cr("Segment map size:               %dkB",  allocated_segments()/K); // 1 byte per segment
 751 }
 752 
 753 //------------------------------------------------------------------------------------------------
 754 // Non-product version
 755 
 756 #ifndef PRODUCT
 757 






 758 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
 759   if (PrintCodeCache2) {  // Need to add a new flag
 760     ResourceMark rm;
 761     if (size == 0)  size = cb->size();
 762     tty->print_cr("CodeCache %s:  addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
 763   }
 764 }
 765 
 766 void CodeCache::print_internals() {
 767   int nmethodCount = 0;
 768   int runtimeStubCount = 0;
 769   int adapterCount = 0;
 770   int deoptimizationStubCount = 0;
 771   int uncommonTrapStubCount = 0;
 772   int bufferBlobCount = 0;
 773   int total = 0;
 774   int nmethodAlive = 0;
 775   int nmethodNotEntrant = 0;
 776   int nmethodZombie = 0;
 777   int nmethodUnloaded = 0;
 778   int nmethodJava = 0;
 779   int nmethodNative = 0;
 780   int max_nm_size = 0;
 781   ResourceMark rm;
 782 
 783   CodeBlob *cb;
 784   for (cb = first(); cb != NULL; cb = next(cb)) {
 785     total++;
 786     if (cb->is_nmethod()) {
 787       nmethod* nm = (nmethod*)cb;
 788 
 789       if (Verbose && nm->method() != NULL) {
 790         ResourceMark rm;
 791         char *method_name = nm->method()->name_and_sig_as_C_string();
 792         tty->print("%s", method_name);
 793         if(nm->is_alive()) { tty->print_cr(" alive"); }
 794         if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
 795         if(nm->is_zombie()) { tty->print_cr(" zombie"); }
 796       }
 797 
 798       nmethodCount++;
 799 
 800       if(nm->is_alive()) { nmethodAlive++; }
 801       if(nm->is_not_entrant()) { nmethodNotEntrant++; }
 802       if(nm->is_zombie()) { nmethodZombie++; }
 803       if(nm->is_unloaded()) { nmethodUnloaded++; }
 804       if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
 805 
 806       if(nm->method() != NULL && nm->is_java_method()) {
 807         nmethodJava++;
 808         max_nm_size = MAX2(max_nm_size, nm->size());


 809       }
 810     } else if (cb->is_runtime_stub()) {
 811       runtimeStubCount++;
 812     } else if (cb->is_deoptimization_stub()) {
 813       deoptimizationStubCount++;
 814     } else if (cb->is_uncommon_trap_stub()) {
 815       uncommonTrapStubCount++;
 816     } else if (cb->is_adapter_blob()) {
 817       adapterCount++;
 818     } else if (cb->is_buffer_blob()) {
 819       bufferBlobCount++;
 820     }
 821   }
 822 
 823   int bucketSize = 512;
 824   int bucketLimit = max_nm_size / bucketSize + 1;
 825   int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
 826   memset(buckets, 0, sizeof(int) * bucketLimit);
 827 
 828   for (cb = first(); cb != NULL; cb = next(cb)) {
 829     if (cb->is_nmethod()) {
 830       nmethod* nm = (nmethod*)cb;
 831       if(nm->is_java_method()) {
 832         buckets[nm->size() / bucketSize]++;
 833        }
 834     }
 835   }
 836 
 837   tty->print_cr("Code Cache Entries (total of %d)",total);
 838   tty->print_cr("-------------------------------------------------");
 839   tty->print_cr("nmethods: %d",nmethodCount);
 840   tty->print_cr("\talive: %d",nmethodAlive);
 841   tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
 842   tty->print_cr("\tzombie: %d",nmethodZombie);
 843   tty->print_cr("\tunloaded: %d",nmethodUnloaded);
 844   tty->print_cr("\tjava: %d",nmethodJava);
 845   tty->print_cr("\tnative: %d",nmethodNative);
 846   tty->print_cr("runtime_stubs: %d",runtimeStubCount);
 847   tty->print_cr("adapters: %d",adapterCount);
 848   tty->print_cr("buffer blobs: %d",bufferBlobCount);
 849   tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
 850   tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
 851   tty->print_cr("\nnmethod size distribution (non-zombie java)");
 852   tty->print_cr("-------------------------------------------------");
 853 
 854   for(int i=0; i<bucketLimit; i++) {
 855     if(buckets[i] != 0) {
 856       tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
 857       tty->fill_to(40);
 858       tty->print_cr("%d",buckets[i]);
 859     }
 860   }
 861 
 862   FREE_C_HEAP_ARRAY(int, buckets, mtCode);
 863   print_memory_overhead();
 864 }
 865 
 866 #endif // !PRODUCT
 867 
 868 void CodeCache::print() {
 869   print_summary(tty);
 870 
 871 #ifndef PRODUCT
 872   if (!Verbose) return;
 873 
 874   CodeBlob_sizes live;
 875   CodeBlob_sizes dead;
 876 
 877   FOR_ALL_BLOBS(p) {
 878     if (!p->is_alive()) {
 879       dead.add(p);
 880     } else {
 881       live.add(p);
 882     }
 883   }


src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File