< prev index next >

src/hotspot/share/runtime/interfaceSupport.cpp

Print this page
rev 56101 : 8227745: Enable Escape Analysis for better performance when debugging
Reviewed-by: ???


  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "memory/universe.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/frame.inline.hpp"

  33 #include "runtime/handles.inline.hpp"
  34 #include "runtime/init.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/orderAccess.hpp"
  37 #include "runtime/os.inline.hpp"
  38 #include "runtime/thread.inline.hpp"
  39 #include "runtime/safepointVerifiers.hpp"
  40 #include "runtime/vframe.hpp"
  41 #include "runtime/vmThread.hpp"
  42 #include "utilities/preserveException.hpp"
  43 
  44 // Implementation of InterfaceSupport
  45 
  46 #ifdef ASSERT
  47 VMEntryWrapper::VMEntryWrapper() {
  48   if (VerifyLastFrame) {
  49     InterfaceSupport::verify_last_frame();
  50   }
  51 }
  52 
  53 VMEntryWrapper::~VMEntryWrapper() {
  54   InterfaceSupport::check_gc_alot();
  55   if (WalkStackALot) {
  56     InterfaceSupport::walk_stack();
  57   }
  58 #ifdef COMPILER2
  59   // This option is not used by Compiler 1
  60   if (StressDerivedPointers) {
  61     InterfaceSupport::stress_derived_pointers();
  62   }
  63 #endif
  64   if (DeoptimizeALot || DeoptimizeRandom) {
  65     InterfaceSupport::deoptimizeAll();
  66   }
  67   if (ZombieALot) {
  68     InterfaceSupport::zombieAll();
  69   }



  70   // do verification AFTER potential deoptimization
  71   if (VerifyStack) {
  72     InterfaceSupport::verify_stack();
  73   }
  74 }
  75 
  76 VMNativeEntryWrapper::VMNativeEntryWrapper() {
  77   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
  78 }
  79 
  80 VMNativeEntryWrapper::~VMNativeEntryWrapper() {
  81   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
  82 }
  83 
  84 long InterfaceSupport::_number_of_calls       = 0;
  85 long InterfaceSupport::_scavenge_alot_counter = 1;
  86 long InterfaceSupport::_fullgc_alot_counter   = 1;
  87 long InterfaceSupport::_fullgc_alot_invocation = 0;
  88 
  89 Histogram* RuntimeHistogram;


 182   // walk
 183   int i = 0;
 184   for (vframe* f = start_vf; f; f = f->sender() ) {
 185     if (i < 50) vframe_array[i++] = f;
 186   }
 187 }
 188 
 189 
 190 void InterfaceSupport::walk_stack() {
 191   JavaThread* thread = JavaThread::current();
 192   walk_stack_counter++;
 193   if (!thread->has_last_Java_frame()) return;
 194   ResourceMark rm(thread);
 195   RegisterMap reg_map(thread);
 196   walk_stack_from(thread->last_java_vframe(&reg_map));
 197 }
 198 
 199 // invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
 200 int deoptimizeAllCounter = 0;
 201 int zombieAllCounter = 0;

 202 
 203 void InterfaceSupport::zombieAll() {
 204   // This method is called by all threads when a thread make
 205   // transition to VM state (for example, runtime calls).
 206   // Divide number of calls by number of threads to avoid
 207   // dependence of ZombieAll events frequency on number of threads.
 208   int value = zombieAllCounter / Threads::number_of_threads();
 209   if (is_init_completed() && value > ZombieALotInterval) {
 210     zombieAllCounter = 0;
 211     VM_ZombieAll op;
 212     VMThread::execute(&op);
 213   }
 214   zombieAllCounter++;
 215 }
 216 
 217 void InterfaceSupport::deoptimizeAll() {
 218   // This method is called by all threads when a thread make
 219   // transition to VM state (for example, runtime calls).
 220   // Divide number of calls by number of threads to avoid
 221   // dependence of DeoptimizeAll events frequency on number of threads.
 222   int value = deoptimizeAllCounter / Threads::number_of_threads();
 223   if (is_init_completed()) {
 224     if (DeoptimizeALot && value > DeoptimizeALotInterval) {
 225       deoptimizeAllCounter = 0;
 226       VM_DeoptimizeAll op;
 227       VMThread::execute(&op);
 228     } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
 229       VM_DeoptimizeAll op;
 230       VMThread::execute(&op);
 231     }
 232   }
 233   deoptimizeAllCounter++;

















 234 }
 235 
 236 
 237 void InterfaceSupport::stress_derived_pointers() {
 238 #ifdef COMPILER2
 239   JavaThread *thread = JavaThread::current();
 240   if (!is_init_completed()) return;
 241   ResourceMark rm(thread);
 242   bool found = false;
 243   for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
 244     CodeBlob* cb = sfs.current()->cb();
 245     if (cb != NULL && cb->oop_maps() ) {
 246       // Find oopmap for current method
 247       const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc());
 248       assert(map != NULL, "no oopmap found for pc");
 249       found = map->has_derived_pointer();
 250     }
 251   }
 252   if (found) {
 253     // $$$ Not sure what to do here.




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/collectedHeap.hpp"
  27 #include "gc/shared/collectedHeap.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "memory/universe.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/frame.inline.hpp"
  33 #include "runtime/deoptimization.hpp"
  34 #include "runtime/handles.inline.hpp"
  35 #include "runtime/init.hpp"
  36 #include "runtime/interfaceSupport.inline.hpp"
  37 #include "runtime/orderAccess.hpp"
  38 #include "runtime/os.inline.hpp"
  39 #include "runtime/thread.inline.hpp"
  40 #include "runtime/safepointVerifiers.hpp"
  41 #include "runtime/vframe.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/preserveException.hpp"
  44 
  45 // Implementation of InterfaceSupport
  46 
  47 #ifdef ASSERT
  48 VMEntryWrapper::VMEntryWrapper() {
  49   if (VerifyLastFrame) {
  50     InterfaceSupport::verify_last_frame();
  51   }
  52 }
  53 
  54 VMEntryWrapper::~VMEntryWrapper() {
  55   InterfaceSupport::check_gc_alot();
  56   if (WalkStackALot) {
  57     InterfaceSupport::walk_stack();
  58   }
  59 #ifdef COMPILER2
  60   // This option is not used by Compiler 1
  61   if (StressDerivedPointers) {
  62     InterfaceSupport::stress_derived_pointers();
  63   }
  64 #endif
  65   if (DeoptimizeALot || DeoptimizeRandom) {
  66     InterfaceSupport::deoptimizeAll();
  67   }
  68   if (ZombieALot) {
  69     InterfaceSupport::zombieAll();
  70   }
  71   if (DeoptimizeObjectsALot == 1) {
  72     InterfaceSupport::deoptimizeAllObjects();
  73   }
  74   // do verification AFTER potential deoptimization
  75   if (VerifyStack) {
  76     InterfaceSupport::verify_stack();
  77   }
  78 }
  79 
  80 VMNativeEntryWrapper::VMNativeEntryWrapper() {
  81   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
  82 }
  83 
  84 VMNativeEntryWrapper::~VMNativeEntryWrapper() {
  85   if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
  86 }
  87 
  88 long InterfaceSupport::_number_of_calls       = 0;
  89 long InterfaceSupport::_scavenge_alot_counter = 1;
  90 long InterfaceSupport::_fullgc_alot_counter   = 1;
  91 long InterfaceSupport::_fullgc_alot_invocation = 0;
  92 
  93 Histogram* RuntimeHistogram;


 186   // walk
 187   int i = 0;
 188   for (vframe* f = start_vf; f; f = f->sender() ) {
 189     if (i < 50) vframe_array[i++] = f;
 190   }
 191 }
 192 
 193 
 194 void InterfaceSupport::walk_stack() {
 195   JavaThread* thread = JavaThread::current();
 196   walk_stack_counter++;
 197   if (!thread->has_last_Java_frame()) return;
 198   ResourceMark rm(thread);
 199   RegisterMap reg_map(thread);
 200   walk_stack_from(thread->last_java_vframe(&reg_map));
 201 }
 202 
 203 // invocation counter for InterfaceSupport::deoptimizeAll/zombieAll functions
 204 int deoptimizeAllCounter = 0;
 205 int zombieAllCounter = 0;
 206 int deoptimizeAllObjectsCounter = 0;
 207 
 208 void InterfaceSupport::zombieAll() {
 209   // This method is called by all threads when a thread make
 210   // transition to VM state (for example, runtime calls).
 211   // Divide number of calls by number of threads to avoid
 212   // dependence of ZombieAll events frequency on number of threads.
 213   int value = zombieAllCounter / Threads::number_of_threads();
 214   if (is_init_completed() && value > ZombieALotInterval) {
 215     zombieAllCounter = 0;
 216     VM_ZombieAll op;
 217     VMThread::execute(&op);
 218   }
 219   zombieAllCounter++;
 220 }
 221 
 222 void InterfaceSupport::deoptimizeAll() {
 223   // This method is called by all threads when a thread make
 224   // transition to VM state (for example, runtime calls).
 225   // Divide number of calls by number of threads to avoid
 226   // dependence of DeoptimizeAll events frequency on number of threads.
 227   int value = deoptimizeAllCounter / Threads::number_of_threads();
 228   if (is_init_completed()) {
 229     if (DeoptimizeALot && value > DeoptimizeALotInterval) {
 230       deoptimizeAllCounter = 0;
 231       VM_DeoptimizeAll op;
 232       VMThread::execute(&op);
 233     } else if (DeoptimizeRandom && (value & 0x1F) == (os::random() & 0x1F)) {
 234       VM_DeoptimizeAll op;
 235       VMThread::execute(&op);
 236     }
 237   }
 238   deoptimizeAllCounter++;
 239 }
 240 
 241 void InterfaceSupport::deoptimizeAllObjects() {
 242   // This method is called by all threads when a thread makes
 243   // transition to VM state (for example, runtime calls).
 244   // Divide number of calls by number of threads to avoid
 245   // dependence of DeoptimizeObjectsALot events frequency on number of threads.
 246   int value = deoptimizeAllObjectsCounter / Threads::number_of_threads();
 247   if (is_init_completed() && value > DeoptimizeObjectsALotInterval) {
 248     // Revert optimizations based on escape analysis for all compiled frames of all Java threads as
 249     // if objects local to a frame or a thread were escaping.
 250     deoptimizeAllObjectsCounter = 0;
 251     JavaThread* ct = JavaThread::current();
 252     JVMTIEscapeBarrier eb(ct, true);
 253     eb.deoptimize_objects_all_threads();
 254   }
 255   deoptimizeAllObjectsCounter++;
 256 }
 257 
 258 
 259 void InterfaceSupport::stress_derived_pointers() {
 260 #ifdef COMPILER2
 261   JavaThread *thread = JavaThread::current();
 262   if (!is_init_completed()) return;
 263   ResourceMark rm(thread);
 264   bool found = false;
 265   for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
 266     CodeBlob* cb = sfs.current()->cb();
 267     if (cb != NULL && cb->oop_maps() ) {
 268       // Find oopmap for current method
 269       const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc());
 270       assert(map != NULL, "no oopmap found for pc");
 271       found = map->has_derived_pointer();
 272     }
 273   }
 274   if (found) {
 275     // $$$ Not sure what to do here.


< prev index next >