1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  27 
  28 #include "memory/gcLocker.hpp"
  29 #include "runtime/handles.inline.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/orderAccess.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "runtime/thread.inline.hpp"
  35 #include "runtime/vmThread.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/preserveException.hpp"
  38 #include "utilities/top.hpp"
  39 
  40 // Wrapper for all entry points to the virtual machine.
  41 // The HandleMarkCleaner is a faster version of HandleMark.
  42 // It relies on the fact that there is a HandleMark further
  43 // down the stack (in JavaCalls::call_helper), and just resets
  44 // to the saved values in that HandleMark.
  45 
  46 class HandleMarkCleaner: public StackObj {
  47  private:
  48   Thread* _thread;
  49  public:
  50   HandleMarkCleaner(Thread* thread) {
  51     _thread = thread;
  52     _thread->last_handle_mark()->push();
  53   }
  54   ~HandleMarkCleaner() {
  55     _thread->last_handle_mark()->pop_and_restore();
  56   }
  57 
  58  private:
  59   inline void* operator new(size_t size, void* ptr) {
  60     return ptr;
  61   }
  62 };
  63 
  64 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
  65 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
  66 // the VM and perform checks upon leave of the VM.
  67 
  68 
  69 class InterfaceSupport: AllStatic {
  70 # ifdef ASSERT
  71  public:
  72   static long _scavenge_alot_counter;
  73   static long _fullgc_alot_counter;
  74   static long _number_of_calls;
  75   static long _fullgc_alot_invocation;
  76 
  77   // tracing
  78   static void trace(const char* result_type, const char* header);
  79 
  80   // Helper methods used to implement +ScavengeALot and +FullGCALot
  81   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
  82   static void gc_alot();
  83 
  84   static void walk_stack_from(vframe* start_vf);
  85   static void walk_stack();
  86 
  87 # ifdef ENABLE_ZAP_DEAD_LOCALS
  88   static void zap_dead_locals_old();
  89 # endif
  90 
  91   static void zombieAll();
  92   static void unlinkSymbols();
  93   static void deoptimizeAll();
  94   static void stress_derived_pointers();
  95   static void verify_stack();
  96   static void verify_last_frame();
  97 # endif
  98 
  99  public:
 100   // OS dependent stuff
 101 #ifdef TARGET_OS_FAMILY_linux
 102 # include "interfaceSupport_linux.hpp"
 103 #endif
 104 #ifdef TARGET_OS_FAMILY_solaris
 105 # include "interfaceSupport_solaris.hpp"
 106 #endif
 107 #ifdef TARGET_OS_FAMILY_windows
 108 # include "interfaceSupport_windows.hpp"
 109 #endif
 110 #ifdef TARGET_OS_FAMILY_bsd
 111 # include "interfaceSupport_bsd.hpp"
 112 #endif
 113 
 114 };
 115 
 116 
 117 // Basic class for all thread transition classes.
 118 
 119 class ThreadStateTransition : public StackObj {
 120  protected:
 121   JavaThread* _thread;
 122  public:
 123   ThreadStateTransition(JavaThread *thread) {
 124     _thread = thread;
 125     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
 126   }
 127 
 128   // Change threadstate in a manner, so safepoint can detect changes.
 129   // Time-critical: called on exit from every runtime routine
 130   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 131     assert(from != _thread_in_Java, "use transition_from_java");
 132     assert(from != _thread_in_native, "use transition_from_native");
 133     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 134     assert(thread->thread_state() == from, "coming from wrong thread state");
 135     // Change to transition state (assumes total store ordering!  -Urs)
 136     thread->set_thread_state((JavaThreadState)(from + 1));
 137 
 138     // Make sure new state is seen by VM thread
 139     if (os::is_MP()) {
 140       if (UseMembar) {
 141         // Force a fence between the write above and read below
 142         OrderAccess::fence();
 143       } else {
 144         // store to serialize page so VM thread can do pseudo remote membar
 145         os::write_memory_serialize_page(thread);
 146       }
 147     }
 148 
 149     if (SafepointSynchronize::do_call_back()) {
 150       SafepointSynchronize::block(thread);
 151     }
 152     thread->set_thread_state(to);
 153 
 154     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 155   }
 156 
 157   // transition_and_fence must be used on any thread state transition
 158   // where there might not be a Java call stub on the stack, in
 159   // particular on Windows where the Structured Exception Handler is
 160   // set up in the call stub. os::write_memory_serialize_page() can
 161   // fault and we can't recover from it on Windows without a SEH in
 162   // place.
 163   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 164     assert(thread->thread_state() == from, "coming from wrong thread state");
 165     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 166     // Change to transition state (assumes total store ordering!  -Urs)
 167     thread->set_thread_state((JavaThreadState)(from + 1));
 168 
 169     // Make sure new state is seen by VM thread
 170     if (os::is_MP()) {
 171       if (UseMembar) {
 172         // Force a fence between the write above and read below
 173         OrderAccess::fence();
 174       } else {
 175         // Must use this rather than serialization page in particular on Windows
 176         InterfaceSupport::serialize_memory(thread);
 177       }
 178     }
 179 
 180     if (SafepointSynchronize::do_call_back()) {
 181       SafepointSynchronize::block(thread);
 182     }
 183     thread->set_thread_state(to);
 184 
 185     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 186   }
 187 
 188   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
 189   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
 190   // have not been setup.
 191   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
 192     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
 193     thread->set_thread_state(to);
 194   }
 195 
 196   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
 197     assert((to & 1) == 0, "odd numbers are transitions states");
 198     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
 199     // Change to transition state (assumes total store ordering!  -Urs)
 200     thread->set_thread_state(_thread_in_native_trans);
 201 
 202     // Make sure new state is seen by GC thread
 203     if (os::is_MP()) {
 204       if (UseMembar) {
 205         // Force a fence between the write above and read below
 206         OrderAccess::fence();
 207       } else {
 208         // Must use this rather than serialization page in particular on Windows
 209         InterfaceSupport::serialize_memory(thread);
 210       }
 211     }
 212 
 213     // We never install asynchronous exceptions when coming (back) in
 214     // to the runtime from native code because the runtime is not set
 215     // up to handle exceptions floating around at arbitrary points.
 216     if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
 217       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 218 
 219       // Clear unhandled oops anywhere where we could block, even if we don't.
 220       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 221     }
 222 
 223     thread->set_thread_state(to);
 224   }
 225  protected:
 226    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
 227    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
 228    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
 229    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 230 };
 231 
 232 
 233 class ThreadInVMfromJava : public ThreadStateTransition {
 234  public:
 235   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
 236     trans_from_java(_thread_in_vm);
 237   }
 238   ~ThreadInVMfromJava()  {
 239     trans(_thread_in_vm, _thread_in_Java);
 240     // Check for pending. async. exceptions or suspends.
 241     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
 242   }
 243 };
 244 
 245 
 246 class ThreadInVMfromUnknown {
 247  private:
 248   JavaThread* _thread;
 249  public:
 250   ThreadInVMfromUnknown() : _thread(NULL) {
 251     Thread* t = Thread::current();
 252     if (t->is_Java_thread()) {
 253       JavaThread* t2 = (JavaThread*) t;
 254       if (t2->thread_state() == _thread_in_native) {
 255         _thread = t2;
 256         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
 257         // Used to have a HandleMarkCleaner but that is dangerous as
 258         // it could free a handle in our (indirect, nested) caller.
 259         // We expect any handles will be short lived and figure we
 260         // don't need an actual HandleMark.
 261       }
 262     }
 263   }
 264   ~ThreadInVMfromUnknown()  {
 265     if (_thread) {
 266       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
 267     }
 268   }
 269 };
 270 
 271 
 272 class ThreadInVMfromNative : public ThreadStateTransition {
 273  public:
 274   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
 275     trans_from_native(_thread_in_vm);
 276   }
 277   ~ThreadInVMfromNative() {
 278     trans_and_fence(_thread_in_vm, _thread_in_native);
 279   }
 280 };
 281 
 282 
 283 class ThreadToNativeFromVM : public ThreadStateTransition {
 284  public:
 285   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
 286     // We are leaving the VM at this point and going directly to native code.
 287     // Block, if we are in the middle of a safepoint synchronization.
 288     assert(!thread->owns_locks(), "must release all locks when leaving VM");
 289     thread->frame_anchor()->make_walkable(thread);
 290     trans_and_fence(_thread_in_vm, _thread_in_native);
 291     // Check for pending. async. exceptions or suspends.
 292     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
 293   }
 294 
 295   ~ThreadToNativeFromVM() {
 296     trans_from_native(_thread_in_vm);
 297     // We don't need to clear_walkable because it will happen automagically when we return to java
 298   }
 299 };
 300 
 301 
 302 class ThreadBlockInVM : public ThreadStateTransition {
 303  public:
 304   ThreadBlockInVM(JavaThread *thread)
 305   : ThreadStateTransition(thread) {
 306     // Once we are blocked vm expects stack to be walkable
 307     thread->frame_anchor()->make_walkable(thread);
 308     trans_and_fence(_thread_in_vm, _thread_blocked);
 309   }
 310   ~ThreadBlockInVM() {
 311     trans_and_fence(_thread_blocked, _thread_in_vm);
 312     // We don't need to clear_walkable because it will happen automagically when we return to java
 313   }
 314 };
 315 
 316 
 317 // This special transition class is only used to prevent asynchronous exceptions
 318 // from being installed on vm exit in situations where we can't tolerate them.
 319 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
 320 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
 321  public:
 322   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
 323     trans_from_java(_thread_in_vm);
 324   }
 325   ~ThreadInVMfromJavaNoAsyncException()  {
 326     trans(_thread_in_vm, _thread_in_Java);
 327     // NOTE: We do not check for pending. async. exceptions.
 328     // If we did and moved the pending async exception over into the
 329     // pending exception field, we would need to deopt (currently C2
 330     // only). However, to do so would require that we transition back
 331     // to the _thread_in_vm state. Instead we postpone the handling of
 332     // the async exception.
 333 
 334     // Check for pending. suspends only.
 335     if (_thread->has_special_runtime_exit_condition())
 336       _thread->handle_special_runtime_exit_condition(false);
 337   }
 338 };
 339 
 340 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
 341 // Can be used to verify properties on enter/exit of the VM.
 342 
 343 #ifdef ASSERT
 344 class VMEntryWrapper {
 345  public:
 346   VMEntryWrapper() {
 347     if (VerifyLastFrame) {
 348       InterfaceSupport::verify_last_frame();
 349     }
 350   }
 351 
 352   ~VMEntryWrapper() {
 353     InterfaceSupport::check_gc_alot();
 354     if (WalkStackALot) {
 355       InterfaceSupport::walk_stack();
 356     }
 357 #ifdef ENABLE_ZAP_DEAD_LOCALS
 358     if (ZapDeadLocalsOld) {
 359       InterfaceSupport::zap_dead_locals_old();
 360     }
 361 #endif
 362 #ifdef COMPILER2
 363     // This option is not used by Compiler 1
 364     if (StressDerivedPointers) {
 365       InterfaceSupport::stress_derived_pointers();
 366     }
 367 #endif
 368     if (DeoptimizeALot || DeoptimizeRandom) {
 369       InterfaceSupport::deoptimizeAll();
 370     }
 371     if (ZombieALot) {
 372       InterfaceSupport::zombieAll();
 373     }
 374     if (UnlinkSymbolsALot) {
 375       InterfaceSupport::unlinkSymbols();
 376     }
 377     // do verification AFTER potential deoptimization
 378     if (VerifyStack) {
 379       InterfaceSupport::verify_stack();
 380     }
 381 
 382   }
 383 };
 384 
 385 
 386 class VMNativeEntryWrapper {
 387  public:
 388   VMNativeEntryWrapper() {
 389     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 390   }
 391 
 392   ~VMNativeEntryWrapper() {
 393     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 394   }
 395 };
 396 
 397 #endif
 398 
 399 
 400 // VM-internal runtime interface support
 401 
 402 #ifdef ASSERT
 403 
 404 class RuntimeHistogramElement : public HistogramElement {
 405   public:
 406    RuntimeHistogramElement(const char* name);
 407 };
 408 
 409 #define TRACE_CALL(result_type, header)                            \
 410   InterfaceSupport::_number_of_calls++;                            \
 411   if (TraceRuntimeCalls)                                           \
 412     InterfaceSupport::trace(#result_type, #header);                \
 413   if (CountRuntimeCalls) {                                         \
 414     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
 415     if (e != NULL) e->increment_count();                           \
 416   }
 417 #else
 418 #define TRACE_CALL(result_type, header)                            \
 419   /* do nothing */
 420 #endif
 421 
 422 
 423 // LEAF routines do not lock, GC or throw exceptions
 424 
 425 #define VM_LEAF_BASE(result_type, header)                            \
 426   TRACE_CALL(result_type, header)                                    \
 427   debug_only(NoHandleMark __hm;)                                     \
 428   os::verify_stack_alignment();                                      \
 429   /* begin of body */
 430 
 431 
 432 // ENTRY routines may lock, GC and throw exceptions
 433 
 434 #define VM_ENTRY_BASE(result_type, header, thread)                   \
 435   TRACE_CALL(result_type, header)                                    \
 436   HandleMarkCleaner __hm(thread);                                    \
 437   Thread* THREAD = thread;                                           \
 438   os::verify_stack_alignment();                                      \
 439   /* begin of body */
 440 
 441 
 442 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
 443 
 444 #define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
 445   TRACE_CALL(result_type, header)                                    \
 446   debug_only(NoHandleMark __hm;)                                     \
 447   Thread* THREAD = thread;                                           \
 448   os::verify_stack_alignment();                                      \
 449   /* begin of body */
 450 
 451 
 452 // Definitions for IRT (Interpreter Runtime)
 453 // (thread is an argument passed in to all these routines)
 454 
 455 #define IRT_ENTRY(result_type, header)                               \
 456   result_type header {                                               \
 457     ThreadInVMfromJava __tiv(thread);                                \
 458     VM_ENTRY_BASE(result_type, header, thread)                       \
 459     debug_only(VMEntryWrapper __vew;)
 460 
 461 
 462 #define IRT_LEAF(result_type, header)                                \
 463   result_type header {                                               \
 464     VM_LEAF_BASE(result_type, header)                                \
 465     debug_only(No_Safepoint_Verifier __nspv(true);)
 466 
 467 
 468 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
 469   result_type header {                                               \
 470     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 471     VM_ENTRY_BASE(result_type, header, thread)                       \
 472     debug_only(VMEntryWrapper __vew;)
 473 
 474 // Another special case for nmethod_entry_point so the nmethod that the
 475 // interpreter is about to branch to doesn't get flushed before as we
 476 // branch to it's interpreter_entry_point.  Skip stress testing here too.
 477 // Also we don't allow async exceptions because it is just too painful.
 478 #define IRT_ENTRY_FOR_NMETHOD(result_type, header)                   \
 479   result_type header {                                               \
 480     nmethodLocker _nmlock(nm);                                       \
 481     ThreadInVMfromJavaNoAsyncException __tiv(thread);                                \
 482     VM_ENTRY_BASE(result_type, header, thread)
 483 
 484 #define IRT_END }
 485 
 486 
 487 // Definitions for JRT (Java (Compiler/Shared) Runtime)
 488 
 489 #define JRT_ENTRY(result_type, header)                               \
 490   result_type header {                                               \
 491     ThreadInVMfromJava __tiv(thread);                                \
 492     VM_ENTRY_BASE(result_type, header, thread)                       \
 493     debug_only(VMEntryWrapper __vew;)
 494 
 495 
 496 #define JRT_LEAF(result_type, header)                                \
 497   result_type header {                                               \
 498   VM_LEAF_BASE(result_type, header)                                  \
 499   debug_only(JRT_Leaf_Verifier __jlv;)
 500 
 501 
 502 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
 503   result_type header {                                               \
 504     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 505     VM_ENTRY_BASE(result_type, header, thread)                       \
 506     debug_only(VMEntryWrapper __vew;)
 507 
 508 // Same as JRT Entry but allows for return value after the safepoint
 509 // to get back into Java from the VM
 510 #define JRT_BLOCK_ENTRY(result_type, header)                         \
 511   result_type header {                                               \
 512     TRACE_CALL(result_type, header)                                  \
 513     HandleMarkCleaner __hm(thread);
 514 
 515 #define JRT_BLOCK                                                    \
 516     {                                                                \
 517     ThreadInVMfromJava __tiv(thread);                                \
 518     Thread* THREAD = thread;                                         \
 519     debug_only(VMEntryWrapper __vew;)
 520 
 521 #define JRT_BLOCK_END }
 522 
 523 #define JRT_END }
 524 
 525 // Definitions for JNI
 526 
 527 #define JNI_ENTRY(result_type, header)                               \
 528     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
 529     WeakPreserveExceptionMark __wem(thread);
 530 
 531 #define JNI_ENTRY_NO_PRESERVE(result_type, header)             \
 532 extern "C" {                                                         \
 533   result_type JNICALL header {                                \
 534     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 535     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 536     ThreadInVMfromNative __tiv(thread);                              \
 537     debug_only(VMNativeEntryWrapper __vew;)                          \
 538     VM_ENTRY_BASE(result_type, header, thread)
 539 
 540 
 541 // Ensure that the VMNativeEntryWrapper constructor, which can cause
 542 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
 543 #define JNI_QUICK_ENTRY(result_type, header)                         \
 544 extern "C" {                                                         \
 545   result_type JNICALL header {                                \
 546     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 547     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 548     ThreadInVMfromNative __tiv(thread);                              \
 549     debug_only(VMNativeEntryWrapper __vew;)                          \
 550     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 551 
 552 
 553 #define JNI_LEAF(result_type, header)                                \
 554 extern "C" {                                                         \
 555   result_type JNICALL header {                                \
 556     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 557     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 558     VM_LEAF_BASE(result_type, header)
 559 
 560 
 561 // Close the routine and the extern "C"
 562 #define JNI_END } }
 563 
 564 
 565 
 566 // Definitions for JVM
 567 
 568 #define JVM_ENTRY(result_type, header)                               \
 569 extern "C" {                                                         \
 570   result_type JNICALL header {                                       \
 571     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 572     ThreadInVMfromNative __tiv(thread);                              \
 573     debug_only(VMNativeEntryWrapper __vew;)                          \
 574     VM_ENTRY_BASE(result_type, header, thread)
 575 
 576 
 577 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 578 extern "C" {                                                         \
 579   result_type JNICALL header {                                       \
 580     JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
 581     ThreadInVMfromNative __tiv(thread);                              \
 582     debug_only(VMNativeEntryWrapper __vew;)                          \
 583     VM_ENTRY_BASE(result_type, header, thread)
 584 
 585 
 586 #define JVM_QUICK_ENTRY(result_type, header)                         \
 587 extern "C" {                                                         \
 588   result_type JNICALL header {                                       \
 589     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 590     ThreadInVMfromNative __tiv(thread);                              \
 591     debug_only(VMNativeEntryWrapper __vew;)                          \
 592     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 593 
 594 
 595 #define JVM_LEAF(result_type, header)                                \
 596 extern "C" {                                                         \
 597   result_type JNICALL header {                                       \
 598     VM_Exit::block_if_vm_exited();                                   \
 599     VM_LEAF_BASE(result_type, header)
 600 
 601 
 602 #define JVM_END } }
 603 
 604 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP