< prev index next >

src/share/vm/code/codeCache.cpp

Print this page




 729   FOR_ALL_HEAPS(heap) {
 730     FOR_ALL_BLOBS(cb, *heap) {
 731       if (cb->is_nmethod()) {
 732         nmethod* nm = (nmethod*)cb;
 733         count += nm->verify_icholder_relocations();
 734       }
 735     }
 736   }
 737 
 738   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 739          CompiledICHolder::live_count(), "must agree");
 740 #endif
 741 }
 742 
 743 void CodeCache::gc_prologue() {
 744 }
 745 
 746 void CodeCache::gc_epilogue() {
 747   assert_locked_or_safepoint(CodeCache_lock);
 748   NMethodIterator iter;
 749   while(iter.next_alive()) {
 750     nmethod* nm = iter.method();
 751     assert(!nm->is_unloaded(), "Tautology");
 752     if (needs_cache_clean()) {


 753       nm->cleanup_inline_caches();
 754     }
 755     DEBUG_ONLY(nm->verify());
 756     DEBUG_ONLY(nm->verify_oop_relocations());
 757   }

 758   set_needs_cache_clean(false);
 759   prune_scavenge_root_nmethods();
 760 
 761   verify_icholder_relocations();
 762 }
 763 
 764 void CodeCache::verify_oops() {
 765   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 766   VerifyOopClosure voc;
 767   NMethodIterator iter;
 768   while(iter.next_alive()) {
 769     nmethod* nm = iter.method();
 770     nm->oops_do(&voc);
 771     nm->verify_oop_relocations();
 772   }
 773 }
 774 
 775 size_t CodeCache::capacity() {
 776   size_t cap = 0;
 777   FOR_ALL_HEAPS(heap) {


 976   }
 977 }
 978 
 979 int CodeCache::mark_for_deoptimization(Method* dependee) {
 980   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 981   int number_of_marked_CodeBlobs = 0;
 982 
 983   NMethodIterator iter;
 984   while(iter.next_alive()) {
 985     nmethod* nm = iter.method();
 986     if (nm->is_dependent_on_method(dependee)) {
 987       ResourceMark rm;
 988       nm->mark_for_deoptimization();
 989       number_of_marked_CodeBlobs++;
 990     }
 991   }
 992 
 993   return number_of_marked_CodeBlobs;
 994 }
 995 
 996 void CodeCache::make_marked_nmethods_zombies() {
 997   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 998   NMethodIterator iter;
 999   while(iter.next_alive()) {
1000     nmethod* nm = iter.method();
1001     if (nm->is_marked_for_deoptimization()) {
1002 
1003       // If the nmethod has already been made non-entrant and it can be converted
1004       // then zombie it now. Otherwise make it non-entrant and it will eventually
1005       // be zombied when it is no longer seen on the stack. Note that the nmethod
1006       // might be "entrant" and not on the stack and so could be zombied immediately
1007       // but we can't tell because we don't track it on stack until it becomes
1008       // non-entrant.
1009 
1010       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
1011         nm->make_zombie();
1012       } else {
1013         nm->make_not_entrant();
1014       }
1015     }
1016   }
1017 }
1018 
1019 void CodeCache::make_marked_nmethods_not_entrant() {
1020   assert_locked_or_safepoint(CodeCache_lock);
1021   NMethodIterator iter;
1022   while(iter.next_alive()) {
1023     nmethod* nm = iter.method();
1024     if (nm->is_marked_for_deoptimization()) {
1025       nm->make_not_entrant();
1026     }
1027   }
1028 }
1029 
1030 // Flushes compiled methods dependent on dependee.
1031 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
1032   assert_lock_strong(Compile_lock);
1033 
1034   if (number_of_nmethods_with_dependencies() == 0) return;
1035 
1036   // CodeCache can only be updated by a thread_in_VM and they will all be
1037   // stopped during the safepoint so CodeCache will be safe to update without
1038   // holding the CodeCache_lock.


1055   if (number_of_nmethods_with_dependencies() == 0) return;
1056 
1057   // CodeCache can only be updated by a thread_in_VM and they will all be
1058   // stopped during the safepoint so CodeCache will be safe to update without
1059   // holding the CodeCache_lock.
1060 
1061   // Compute the dependent nmethods
1062   if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1063     // At least one nmethod has been marked for deoptimization
1064 
1065     // All this already happens inside a VM_Operation, so we'll do all the work here.
1066     // Stuff copied from VM_Deoptimize and modified slightly.
1067 
1068     // We do not want any GCs to happen while we are in the middle of this VM operation
1069     ResourceMark rm;
1070     DeoptimizationMarker dm;
1071 
1072     // Deoptimize all activations depending on marked nmethods
1073     Deoptimization::deoptimize_dependents();
1074 
1075     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1076     make_marked_nmethods_not_entrant();
1077   }
1078 }
1079 #endif // HOTSWAP
1080 
1081 
1082 // Flushes compiled methods dependent on dependee
1083 void CodeCache::flush_dependents_on_method(methodHandle m_h) {
1084   // --- Compile_lock is not held. However we are at a safepoint.
1085   assert_locked_or_safepoint(Compile_lock);
1086 
1087   // CodeCache can only be updated by a thread_in_VM and they will all be
1088   // stopped dring the safepoint so CodeCache will be safe to update without
1089   // holding the CodeCache_lock.
1090 
1091   // Compute the dependent nmethods
1092   if (mark_for_deoptimization(m_h()) > 0) {
1093     // At least one nmethod has been marked for deoptimization
1094 
1095     // All this already happens inside a VM_Operation, so we'll do all the work here.
1096     // Stuff copied from VM_Deoptimize and modified slightly.
1097 
1098     // We do not want any GCs to happen while we are in the middle of this VM operation
1099     ResourceMark rm;
1100     DeoptimizationMarker dm;
1101 
1102     // Deoptimize all activations depending on marked nmethods
1103     Deoptimization::deoptimize_dependents();
1104 
1105     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1106     make_marked_nmethods_not_entrant();
1107   }
1108 }
1109 
1110 void CodeCache::verify() {
1111   assert_locked_or_safepoint(CodeCache_lock);
1112   FOR_ALL_HEAPS(heap) {
1113     (*heap)->verify();
1114     FOR_ALL_BLOBS(cb, *heap) {
1115       if (cb->is_alive()) {
1116         cb->verify();
1117       }
1118     }
1119   }
1120 }
1121 
1122 // A CodeHeap is full. Print out warning and report event.
1123 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1124   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1125   CodeHeap* heap = get_code_heap(code_blob_type);




 729   FOR_ALL_HEAPS(heap) {
 730     FOR_ALL_BLOBS(cb, *heap) {
 731       if (cb->is_nmethod()) {
 732         nmethod* nm = (nmethod*)cb;
 733         count += nm->verify_icholder_relocations();
 734       }
 735     }
 736   }
 737 
 738   assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
 739          CompiledICHolder::live_count(), "must agree");
 740 #endif
 741 }
 742 
 743 void CodeCache::gc_prologue() {
 744 }
 745 
 746 void CodeCache::gc_epilogue() {
 747   assert_locked_or_safepoint(CodeCache_lock);
 748   NMethodIterator iter;
 749   while(iter.next()) {
 750     nmethod* nm = iter.method();
 751     if (!nm->is_zombie()) {
 752       if (needs_cache_clean()) {
 753         // Clean ICs of unloaded nmethods as well because they may reference other
 754         // unloaded nmethods that may be flushed earlier in the sweeper cycle.
 755         nm->cleanup_inline_caches();
 756       }
 757       DEBUG_ONLY(nm->verify());
 758       DEBUG_ONLY(nm->verify_oop_relocations());
 759     }
 760   }
 761   set_needs_cache_clean(false);
 762   prune_scavenge_root_nmethods();
 763 
 764   verify_icholder_relocations();
 765 }
 766 
 767 void CodeCache::verify_oops() {
 768   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 769   VerifyOopClosure voc;
 770   NMethodIterator iter;
 771   while(iter.next_alive()) {
 772     nmethod* nm = iter.method();
 773     nm->oops_do(&voc);
 774     nm->verify_oop_relocations();
 775   }
 776 }
 777 
 778 size_t CodeCache::capacity() {
 779   size_t cap = 0;
 780   FOR_ALL_HEAPS(heap) {


 979   }
 980 }
 981 
 982 int CodeCache::mark_for_deoptimization(Method* dependee) {
 983   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 984   int number_of_marked_CodeBlobs = 0;
 985 
 986   NMethodIterator iter;
 987   while(iter.next_alive()) {
 988     nmethod* nm = iter.method();
 989     if (nm->is_dependent_on_method(dependee)) {
 990       ResourceMark rm;
 991       nm->mark_for_deoptimization();
 992       number_of_marked_CodeBlobs++;
 993     }
 994   }
 995 
 996   return number_of_marked_CodeBlobs;
 997 }
 998 























 999 void CodeCache::make_marked_nmethods_not_entrant() {
1000   assert_locked_or_safepoint(CodeCache_lock);
1001   NMethodIterator iter;
1002   while(iter.next_alive()) {
1003     nmethod* nm = iter.method();
1004     if (nm->is_marked_for_deoptimization()) {
1005       nm->make_not_entrant();
1006     }
1007   }
1008 }
1009 
1010 // Flushes compiled methods dependent on dependee.
1011 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
1012   assert_lock_strong(Compile_lock);
1013 
1014   if (number_of_nmethods_with_dependencies() == 0) return;
1015 
1016   // CodeCache can only be updated by a thread_in_VM and they will all be
1017   // stopped during the safepoint so CodeCache will be safe to update without
1018   // holding the CodeCache_lock.


1035   if (number_of_nmethods_with_dependencies() == 0) return;
1036 
1037   // CodeCache can only be updated by a thread_in_VM and they will all be
1038   // stopped during the safepoint so CodeCache will be safe to update without
1039   // holding the CodeCache_lock.
1040 
1041   // Compute the dependent nmethods
1042   if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1043     // At least one nmethod has been marked for deoptimization
1044 
1045     // All this already happens inside a VM_Operation, so we'll do all the work here.
1046     // Stuff copied from VM_Deoptimize and modified slightly.
1047 
1048     // We do not want any GCs to happen while we are in the middle of this VM operation
1049     ResourceMark rm;
1050     DeoptimizationMarker dm;
1051 
1052     // Deoptimize all activations depending on marked nmethods
1053     Deoptimization::deoptimize_dependents();
1054 
1055     // Make the dependent methods not entrant
1056     make_marked_nmethods_not_entrant();
1057   }
1058 }
1059 #endif // HOTSWAP
1060 
1061 
1062 // Flushes compiled methods dependent on dependee
1063 void CodeCache::flush_dependents_on_method(methodHandle m_h) {
1064   // --- Compile_lock is not held. However we are at a safepoint.
1065   assert_locked_or_safepoint(Compile_lock);
1066 
1067   // CodeCache can only be updated by a thread_in_VM and they will all be
1068   // stopped dring the safepoint so CodeCache will be safe to update without
1069   // holding the CodeCache_lock.
1070 
1071   // Compute the dependent nmethods
1072   if (mark_for_deoptimization(m_h()) > 0) {
1073     // At least one nmethod has been marked for deoptimization
1074 
1075     // All this already happens inside a VM_Operation, so we'll do all the work here.
1076     // Stuff copied from VM_Deoptimize and modified slightly.
1077 
1078     // We do not want any GCs to happen while we are in the middle of this VM operation
1079     ResourceMark rm;
1080     DeoptimizationMarker dm;
1081 
1082     // Deoptimize all activations depending on marked nmethods
1083     Deoptimization::deoptimize_dependents();
1084 
1085     // Make the dependent methods not entrant
1086     make_marked_nmethods_not_entrant();
1087   }
1088 }
1089 
1090 void CodeCache::verify() {
1091   assert_locked_or_safepoint(CodeCache_lock);
1092   FOR_ALL_HEAPS(heap) {
1093     (*heap)->verify();
1094     FOR_ALL_BLOBS(cb, *heap) {
1095       if (cb->is_alive()) {
1096         cb->verify();
1097       }
1098     }
1099   }
1100 }
1101 
1102 // A CodeHeap is full. Print out warning and report event.
1103 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1104   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1105   CodeHeap* heap = get_code_heap(code_blob_type);


< prev index next >