src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8067836 Sdiff src/share/vm/code

src/share/vm/code/codeCache.cpp

Print this page
rev 7616 : 8067836: The Universe::flush_foo methods belong in CodeCache.
Summary: Move this code to CodeCache.
Reviewed-by: kbarrett, kvn
   1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 985       // but we can't tell because we don't track it on stack until it becomes
 986       // non-entrant.
 987 
 988       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 989         nm->make_zombie();
 990       } else {
 991         nm->make_not_entrant();
 992       }
 993     }
 994   }
 995 }
 996 
 997 void CodeCache::make_marked_nmethods_not_entrant() {
 998   assert_locked_or_safepoint(CodeCache_lock);
 999   NMethodIterator iter;
1000   while(iter.next_alive()) {
1001     nmethod* nm = iter.method();
1002     if (nm->is_marked_for_deoptimization()) {
1003       nm->make_not_entrant();
1004     }















































































































1005   }
1006 }
1007 
1008 void CodeCache::verify() {
1009   assert_locked_or_safepoint(CodeCache_lock);
1010   FOR_ALL_HEAPS(heap) {
1011     (*heap)->verify();
1012     FOR_ALL_BLOBS(cb, *heap) {
1013       if (cb->is_alive()) {
1014         cb->verify();
1015       }
1016     }
1017   }
1018 }
1019 
1020 // A CodeHeap is full. Print out warning and report event.
1021 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1022   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1023   CodeHeap* heap = get_code_heap(code_blob_type);
1024   assert(heap != NULL, "heap is null");


   1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 985       // but we can't tell because we don't track it on stack until it becomes
 986       // non-entrant.
 987 
 988       if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
 989         nm->make_zombie();
 990       } else {
 991         nm->make_not_entrant();
 992       }
 993     }
 994   }
 995 }
 996 
 997 void CodeCache::make_marked_nmethods_not_entrant() {
 998   assert_locked_or_safepoint(CodeCache_lock);
 999   NMethodIterator iter;
1000   while(iter.next_alive()) {
1001     nmethod* nm = iter.method();
1002     if (nm->is_marked_for_deoptimization()) {
1003       nm->make_not_entrant();
1004     }
1005   }
1006 }
1007 
1008 // Flushes compiled methods dependent on dependee.
1009 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
1010   assert_lock_strong(Compile_lock);
1011 
1012   if (number_of_nmethods_with_dependencies() == 0) return;
1013 
1014   // CodeCache can only be updated by a thread_in_VM and they will all be
1015   // stopped during the safepoint so CodeCache will be safe to update without
1016   // holding the CodeCache_lock.
1017 
1018   KlassDepChange changes(dependee);
1019 
1020   // Compute the dependent nmethods
1021   if (mark_for_deoptimization(changes) > 0) {
1022     // At least one nmethod has been marked for deoptimization
1023     VM_Deoptimize op;
1024     VMThread::execute(&op);
1025   }
1026 }
1027 
1028 // Flushes compiled methods dependent on a particular CallSite
1029 // instance when its target is different than the given MethodHandle.
1030 void CodeCache::flush_dependents_on(Handle call_site, Handle method_handle) {
1031   assert_lock_strong(Compile_lock);
1032 
1033   if (number_of_nmethods_with_dependencies() == 0) return;
1034 
1035   // CodeCache can only be updated by a thread_in_VM and they will all be
1036   // stopped during the safepoint so CodeCache will be safe to update without
1037   // holding the CodeCache_lock.
1038 
1039   CallSiteDepChange changes(call_site(), method_handle());
1040 
1041   // Compute the dependent nmethods that have a reference to a
1042   // CallSite object.  We use InstanceKlass::mark_dependent_nmethod
1043   // directly instead of CodeCache::mark_for_deoptimization because we
1044   // want dependents on the call site class only not all classes in
1045   // the ContextStream.
1046   int marked = 0;
1047   {
1048     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1049     InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1050     marked = call_site_klass->mark_dependent_nmethods(changes);
1051   }
1052   if (marked > 0) {
1053     // At least one nmethod has been marked for deoptimization
1054     VM_Deoptimize op;
1055     VMThread::execute(&op);
1056   }
1057 }
1058 
1059 #ifdef HOTSWAP
1060 // Flushes compiled methods dependent on dependee in the evolutionary sense
1061 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1062   // --- Compile_lock is not held. However we are at a safepoint.
1063   assert_locked_or_safepoint(Compile_lock);
1064   if (number_of_nmethods_with_dependencies() == 0) return;
1065 
1066   // CodeCache can only be updated by a thread_in_VM and they will all be
1067   // stopped during the safepoint so CodeCache will be safe to update without
1068   // holding the CodeCache_lock.
1069 
1070   // Compute the dependent nmethods
1071   if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1072     // At least one nmethod has been marked for deoptimization
1073 
1074     // All this already happens inside a VM_Operation, so we'll do all the work here.
1075     // Stuff copied from VM_Deoptimize and modified slightly.
1076 
1077     // We do not want any GCs to happen while we are in the middle of this VM operation
1078     ResourceMark rm;
1079     DeoptimizationMarker dm;
1080 
1081     // Deoptimize all activations depending on marked nmethods
1082     Deoptimization::deoptimize_dependents();
1083 
1084     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1085     make_marked_nmethods_not_entrant();
1086   }
1087 }
1088 #endif // HOTSWAP
1089 
1090 
1091 // Flushes compiled methods dependent on dependee
1092 void CodeCache::flush_dependents_on_method(methodHandle m_h) {
1093   // --- Compile_lock is not held. However we are at a safepoint.
1094   assert_locked_or_safepoint(Compile_lock);
1095 
1096   // CodeCache can only be updated by a thread_in_VM and they will all be
1097   // stopped dring the safepoint so CodeCache will be safe to update without
1098   // holding the CodeCache_lock.
1099 
1100   // Compute the dependent nmethods
1101   if (mark_for_deoptimization(m_h()) > 0) {
1102     // At least one nmethod has been marked for deoptimization
1103 
1104     // All this already happens inside a VM_Operation, so we'll do all the work here.
1105     // Stuff copied from VM_Deoptimize and modified slightly.
1106 
1107     // We do not want any GCs to happen while we are in the middle of this VM operation
1108     ResourceMark rm;
1109     DeoptimizationMarker dm;
1110 
1111     // Deoptimize all activations depending on marked nmethods
1112     Deoptimization::deoptimize_dependents();
1113 
1114     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1115     make_marked_nmethods_not_entrant();
1116   }
1117 }
1118 
1119 void CodeCache::verify() {
1120   assert_locked_or_safepoint(CodeCache_lock);
1121   FOR_ALL_HEAPS(heap) {
1122     (*heap)->verify();
1123     FOR_ALL_BLOBS(cb, *heap) {
1124       if (cb->is_alive()) {
1125         cb->verify();
1126       }
1127     }
1128   }
1129 }
1130 
1131 // A CodeHeap is full. Print out warning and report event.
1132 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1133   // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1134   CodeHeap* heap = get_code_heap(code_blob_type);
1135   assert(heap != NULL, "heap is null");


src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File