1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  27 
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "runtime/handles.inline.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/orderAccess.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "runtime/thread.inline.hpp"
  35 #include "runtime/vmThread.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/preserveException.hpp"
  38 
  39 // Wrapper for all entry points to the virtual machine.
  40 // The HandleMarkCleaner is a faster version of HandleMark.
  41 // It relies on the fact that there is a HandleMark further
  42 // down the stack (in JavaCalls::call_helper), and just resets
  43 // to the saved values in that HandleMark.
  44 
  45 class HandleMarkCleaner: public StackObj {
  46  private:
  47   Thread* _thread;
  48  public:
  49   HandleMarkCleaner(Thread* thread) {
  50     _thread = thread;
  51     _thread->last_handle_mark()->push();
  52   }
  53   ~HandleMarkCleaner() {
  54     _thread->last_handle_mark()->pop_and_restore();
  55   }
  56 
  57  private:
  58   inline void* operator new(size_t size, void* ptr) throw() {
  59     return ptr;
  60   }
  61 };
  62 
  63 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
  64 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
  65 // the VM and perform checks upon leave of the VM.
  66 
  67 
  68 class InterfaceSupport: AllStatic {
  69 # ifdef ASSERT
  70  public:
  71   static long _scavenge_alot_counter;
  72   static long _fullgc_alot_counter;
  73   static long _number_of_calls;
  74   static long _fullgc_alot_invocation;
  75 
  76   // Helper methods used to implement +ScavengeALot and +FullGCALot
  77   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
  78   static void gc_alot();
  79 
  80   static void walk_stack_from(vframe* start_vf);
  81   static void walk_stack();
  82 
  83   static void zombieAll();
  84   static void unlinkSymbols();
  85   static void deoptimizeAll();
  86   static void stress_derived_pointers();
  87   static void verify_stack();
  88   static void verify_last_frame();
  89 # endif
  90 
  91  public:
  92   // OS dependent stuff
  93 #ifdef TARGET_OS_FAMILY_linux
  94 # include "interfaceSupport_linux.hpp"
  95 #endif
  96 #ifdef TARGET_OS_FAMILY_solaris
  97 # include "interfaceSupport_solaris.hpp"
  98 #endif
  99 #ifdef TARGET_OS_FAMILY_windows
 100 # include "interfaceSupport_windows.hpp"
 101 #endif
 102 #ifdef TARGET_OS_FAMILY_aix
 103 # include "interfaceSupport_aix.hpp"
 104 #endif
 105 #ifdef TARGET_OS_FAMILY_bsd
 106 # include "interfaceSupport_bsd.hpp"
 107 #endif
 108 
 109 };
 110 
 111 
 112 // Basic class for all thread transition classes.
 113 
 114 class ThreadStateTransition : public StackObj {
 115  protected:
 116   JavaThread* _thread;
 117  public:
 118   ThreadStateTransition(JavaThread *thread) {
 119     _thread = thread;
 120     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
 121   }
 122 
 123   // Change threadstate in a manner, so safepoint can detect changes.
 124   // Time-critical: called on exit from every runtime routine
 125   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 126     assert(from != _thread_in_Java, "use transition_from_java");
 127     assert(from != _thread_in_native, "use transition_from_native");
 128     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 129     assert(thread->thread_state() == from, "coming from wrong thread state");
 130     // Change to transition state (assumes total store ordering!  -Urs)
 131     thread->set_thread_state((JavaThreadState)(from + 1));
 132 
 133     // Make sure new state is seen by VM thread
 134     if (os::is_MP()) {
 135       if (UseMembar) {
 136         // Force a fence between the write above and read below
 137         OrderAccess::fence();
 138       } else {
 139         // store to serialize page so VM thread can do pseudo remote membar
 140         os::write_memory_serialize_page(thread);
 141       }
 142     }
 143 
 144     if (SafepointSynchronize::do_call_back()) {
 145       SafepointSynchronize::block(thread);
 146     }
 147     thread->set_thread_state(to);
 148 
 149     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 150   }
 151 
 152   // transition_and_fence must be used on any thread state transition
 153   // where there might not be a Java call stub on the stack, in
 154   // particular on Windows where the Structured Exception Handler is
 155   // set up in the call stub. os::write_memory_serialize_page() can
 156   // fault and we can't recover from it on Windows without a SEH in
 157   // place.
 158   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 159     assert(thread->thread_state() == from, "coming from wrong thread state");
 160     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 161     // Change to transition state (assumes total store ordering!  -Urs)
 162     thread->set_thread_state((JavaThreadState)(from + 1));
 163 
 164     // Make sure new state is seen by VM thread
 165     if (os::is_MP()) {
 166       if (UseMembar) {
 167         // Force a fence between the write above and read below
 168         OrderAccess::fence();
 169       } else {
 170         // Must use this rather than serialization page in particular on Windows
 171         InterfaceSupport::serialize_memory(thread);
 172       }
 173     }
 174 
 175     if (SafepointSynchronize::do_call_back()) {
 176       SafepointSynchronize::block(thread);
 177     }
 178     thread->set_thread_state(to);
 179 
 180     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 181   }
 182 
 183   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
 184   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
 185   // have not been setup.
 186   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
 187     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
 188     thread->set_thread_state(to);
 189   }
 190 
 191   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
 192     assert((to & 1) == 0, "odd numbers are transitions states");
 193     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
 194     // Change to transition state (assumes total store ordering!  -Urs)
 195     thread->set_thread_state(_thread_in_native_trans);
 196 
 197     // Make sure new state is seen by GC thread
 198     if (os::is_MP()) {
 199       if (UseMembar) {
 200         // Force a fence between the write above and read below
 201         OrderAccess::fence();
 202       } else {
 203         // Must use this rather than serialization page in particular on Windows
 204         InterfaceSupport::serialize_memory(thread);
 205       }
 206     }
 207 
 208     // We never install asynchronous exceptions when coming (back) in
 209     // to the runtime from native code because the runtime is not set
 210     // up to handle exceptions floating around at arbitrary points.
 211     if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
 212       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 213 
 214       // Clear unhandled oops anywhere where we could block, even if we don't.
 215       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 216     }
 217 
 218     thread->set_thread_state(to);
 219   }
 220  protected:
 221    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
 222    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
 223    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
 224    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 225 };
 226 
 227 
 228 class ThreadInVMfromJava : public ThreadStateTransition {
 229  public:
 230   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
 231     trans_from_java(_thread_in_vm);
 232   }
 233   ~ThreadInVMfromJava()  {
 234     trans(_thread_in_vm, _thread_in_Java);
 235     // Check for pending. async. exceptions or suspends.
 236     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
 237   }
 238 };
 239 
 240 
 241 class ThreadInVMfromUnknown {
 242  private:
 243   JavaThread* _thread;
 244  public:
 245   ThreadInVMfromUnknown() : _thread(NULL) {
 246     Thread* t = Thread::current();
 247     if (t->is_Java_thread()) {
 248       JavaThread* t2 = (JavaThread*) t;
 249       if (t2->thread_state() == _thread_in_native) {
 250         _thread = t2;
 251         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
 252         // Used to have a HandleMarkCleaner but that is dangerous as
 253         // it could free a handle in our (indirect, nested) caller.
 254         // We expect any handles will be short lived and figure we
 255         // don't need an actual HandleMark.
 256       }
 257     }
 258   }
 259   ~ThreadInVMfromUnknown()  {
 260     if (_thread) {
 261       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
 262     }
 263   }
 264 };
 265 
 266 
 267 class ThreadInVMfromNative : public ThreadStateTransition {
 268  public:
 269   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
 270     trans_from_native(_thread_in_vm);
 271   }
 272   ~ThreadInVMfromNative() {
 273     trans_and_fence(_thread_in_vm, _thread_in_native);
 274   }
 275 };
 276 
 277 
 278 class ThreadToNativeFromVM : public ThreadStateTransition {
 279  public:
 280   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
 281     // We are leaving the VM at this point and going directly to native code.
 282     // Block, if we are in the middle of a safepoint synchronization.
 283     assert(!thread->owns_locks(), "must release all locks when leaving VM");
 284     thread->frame_anchor()->make_walkable(thread);
 285     trans_and_fence(_thread_in_vm, _thread_in_native);
 286     // Check for pending. async. exceptions or suspends.
 287     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
 288   }
 289 
 290   ~ThreadToNativeFromVM() {
 291     trans_from_native(_thread_in_vm);
 292     // We don't need to clear_walkable because it will happen automagically when we return to java
 293   }
 294 };
 295 
 296 
 297 class ThreadBlockInVM : public ThreadStateTransition {
 298  public:
 299   ThreadBlockInVM(JavaThread *thread)
 300   : ThreadStateTransition(thread) {
 301     // Once we are blocked vm expects stack to be walkable
 302     thread->frame_anchor()->make_walkable(thread);
 303     trans_and_fence(_thread_in_vm, _thread_blocked);
 304   }
 305   ~ThreadBlockInVM() {
 306     trans_and_fence(_thread_blocked, _thread_in_vm);
 307     // We don't need to clear_walkable because it will happen automagically when we return to java
 308   }
 309 };
 310 
 311 
 312 // This special transition class is only used to prevent asynchronous exceptions
 313 // from being installed on vm exit in situations where we can't tolerate them.
 314 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
 315 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
 316  public:
 317   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
 318     trans_from_java(_thread_in_vm);
 319   }
 320   ~ThreadInVMfromJavaNoAsyncException()  {
 321     trans(_thread_in_vm, _thread_in_Java);
 322     // NOTE: We do not check for pending. async. exceptions.
 323     // If we did and moved the pending async exception over into the
 324     // pending exception field, we would need to deopt (currently C2
 325     // only). However, to do so would require that we transition back
 326     // to the _thread_in_vm state. Instead we postpone the handling of
 327     // the async exception.
 328 
 329     // Check for pending. suspends only.
 330     if (_thread->has_special_runtime_exit_condition())
 331       _thread->handle_special_runtime_exit_condition(false);
 332   }
 333 };
 334 
 335 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
 336 // Can be used to verify properties on enter/exit of the VM.
 337 
 338 #ifdef ASSERT
 339 class VMEntryWrapper {
 340  public:
 341   VMEntryWrapper() {
 342     if (VerifyLastFrame) {
 343       InterfaceSupport::verify_last_frame();
 344     }
 345   }
 346 
 347   ~VMEntryWrapper() {
 348     InterfaceSupport::check_gc_alot();
 349     if (WalkStackALot) {
 350       InterfaceSupport::walk_stack();
 351     }
 352 #ifdef COMPILER2
 353     // This option is not used by Compiler 1
 354     if (StressDerivedPointers) {
 355       InterfaceSupport::stress_derived_pointers();
 356     }
 357 #endif
 358     if (DeoptimizeALot || DeoptimizeRandom) {
 359       InterfaceSupport::deoptimizeAll();
 360     }
 361     if (ZombieALot) {
 362       InterfaceSupport::zombieAll();
 363     }
 364     if (UnlinkSymbolsALot) {
 365       InterfaceSupport::unlinkSymbols();
 366     }
 367     // do verification AFTER potential deoptimization
 368     if (VerifyStack) {
 369       InterfaceSupport::verify_stack();
 370     }
 371 
 372   }
 373 };
 374 
 375 
 376 class VMNativeEntryWrapper {
 377  public:
 378   VMNativeEntryWrapper() {
 379     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 380   }
 381 
 382   ~VMNativeEntryWrapper() {
 383     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 384   }
 385 };
 386 
 387 #endif
 388 
 389 
 390 // VM-internal runtime interface support
 391 
 392 #ifdef ASSERT
 393 
 394 class RuntimeHistogramElement : public HistogramElement {
 395   public:
 396    RuntimeHistogramElement(const char* name);
 397 };
 398 
 399 #define TRACE_CALL(result_type, header)                            \
 400   InterfaceSupport::_number_of_calls++;                            \
 401   if (CountRuntimeCalls) {                                         \
 402     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
 403     if (e != NULL) e->increment_count();                           \
 404   }
 405 #else
 406 #define TRACE_CALL(result_type, header)                            \
 407   /* do nothing */
 408 #endif
 409 
 410 
 411 // LEAF routines do not lock, GC or throw exceptions
 412 
 413 #define VM_LEAF_BASE(result_type, header)                            \
 414   TRACE_CALL(result_type, header)                                    \
 415   debug_only(NoHandleMark __hm;)                                     \
 416   os::verify_stack_alignment();                                      \
 417   /* begin of body */
 418 
 419 #define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)         \
 420   TRACE_CALL(result_type, header)                                    \
 421   debug_only(ResetNoHandleMark __rnhm;)                              \
 422   HandleMarkCleaner __hm(thread);                                    \
 423   Thread* THREAD = thread;                                           \
 424   os::verify_stack_alignment();                                      \
 425   /* begin of body */
 426 
 427 
 428 // ENTRY routines may lock, GC and throw exceptions
 429 
 430 #define VM_ENTRY_BASE(result_type, header, thread)                   \
 431   TRACE_CALL(result_type, header)                                    \
 432   HandleMarkCleaner __hm(thread);                                    \
 433   Thread* THREAD = thread;                                           \
 434   os::verify_stack_alignment();                                      \
 435   /* begin of body */
 436 
 437 
 438 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
 439 
 440 #define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
 441   TRACE_CALL(result_type, header)                                    \
 442   debug_only(NoHandleMark __hm;)                                     \
 443   Thread* THREAD = thread;                                           \
 444   os::verify_stack_alignment();                                      \
 445   /* begin of body */
 446 
 447 
 448 // Definitions for IRT (Interpreter Runtime)
 449 // (thread is an argument passed in to all these routines)
 450 
 451 #define IRT_ENTRY(result_type, header)                               \
 452   result_type header {                                               \
 453     ThreadInVMfromJava __tiv(thread);                                \
 454     VM_ENTRY_BASE(result_type, header, thread)                       \
 455     debug_only(VMEntryWrapper __vew;)
 456 
 457 
 458 #define IRT_LEAF(result_type, header)                                \
 459   result_type header {                                               \
 460     VM_LEAF_BASE(result_type, header)                                \
 461     debug_only(NoSafepointVerifier __nspv(true);)
 462 
 463 
 464 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
 465   result_type header {                                               \
 466     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 467     VM_ENTRY_BASE(result_type, header, thread)                       \
 468     debug_only(VMEntryWrapper __vew;)
 469 
 470 #define IRT_END }
 471 
 472 
 473 // Definitions for JRT (Java (Compiler/Shared) Runtime)
 474 
 475 #define JRT_ENTRY(result_type, header)                               \
 476   result_type header {                                               \
 477     ThreadInVMfromJava __tiv(thread);                                \
 478     VM_ENTRY_BASE(result_type, header, thread)                       \
 479     debug_only(VMEntryWrapper __vew;)
 480 
 481 
 482 #define JRT_LEAF(result_type, header)                                \
 483   result_type header {                                               \
 484   VM_LEAF_BASE(result_type, header)                                  \
 485   debug_only(JRTLeafVerifier __jlv;)
 486 
 487 
 488 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
 489   result_type header {                                               \
 490     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 491     VM_ENTRY_BASE(result_type, header, thread)                       \
 492     debug_only(VMEntryWrapper __vew;)
 493 
 494 // Same as JRT Entry but allows for return value after the safepoint
 495 // to get back into Java from the VM
 496 #define JRT_BLOCK_ENTRY(result_type, header)                         \
 497   result_type header {                                               \
 498     TRACE_CALL(result_type, header)                                  \
 499     HandleMarkCleaner __hm(thread);
 500 
 501 #define JRT_BLOCK                                                    \
 502     {                                                                \
 503     ThreadInVMfromJava __tiv(thread);                                \
 504     Thread* THREAD = thread;                                         \
 505     debug_only(VMEntryWrapper __vew;)
 506 
 507 #define JRT_BLOCK_NO_ASYNC                                           \
 508     {                                                                \
 509     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 510     Thread* THREAD = thread;                                         \
 511     debug_only(VMEntryWrapper __vew;)
 512 
 513 #define JRT_BLOCK_END }
 514 
 515 #define JRT_END }
 516 
 517 // Definitions for JNI
 518 
 519 #define JNI_ENTRY(result_type, header)                               \
 520     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
 521     WeakPreserveExceptionMark __wem(thread);
 522 
 523 #define JNI_ENTRY_NO_PRESERVE(result_type, header)                   \
 524 extern "C" {                                                         \
 525   result_type JNICALL header {                                       \
 526     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 527     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 528     ThreadInVMfromNative __tiv(thread);                              \
 529     debug_only(VMNativeEntryWrapper __vew;)                          \
 530     VM_ENTRY_BASE(result_type, header, thread)
 531 
 532 
 533 // Ensure that the VMNativeEntryWrapper constructor, which can cause
 534 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
 535 #define JNI_QUICK_ENTRY(result_type, header)                         \
 536 extern "C" {                                                         \
 537   result_type JNICALL header {                                       \
 538     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 539     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 540     ThreadInVMfromNative __tiv(thread);                              \
 541     debug_only(VMNativeEntryWrapper __vew;)                          \
 542     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 543 
 544 
 545 #define JNI_LEAF(result_type, header)                                \
 546 extern "C" {                                                         \
 547   result_type JNICALL header {                                       \
 548     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 549     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 550     VM_LEAF_BASE(result_type, header)
 551 
 552 
 553 // Close the routine and the extern "C"
 554 #define JNI_END } }
 555 
 556 
 557 
 558 // Definitions for JVM
 559 
 560 #define JVM_ENTRY(result_type, header)                               \
 561 extern "C" {                                                         \
 562   result_type JNICALL header {                                       \
 563     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 564     ThreadInVMfromNative __tiv(thread);                              \
 565     debug_only(VMNativeEntryWrapper __vew;)                          \
 566     VM_ENTRY_BASE(result_type, header, thread)
 567 
 568 
 569 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 570 extern "C" {                                                         \
 571   result_type JNICALL header {                                       \
 572     JavaThread* thread = JavaThread::current();                      \
 573     ThreadInVMfromNative __tiv(thread);                              \
 574     debug_only(VMNativeEntryWrapper __vew;)                          \
 575     VM_ENTRY_BASE(result_type, header, thread)
 576 
 577 
 578 #define JVM_QUICK_ENTRY(result_type, header)                         \
 579 extern "C" {                                                         \
 580   result_type JNICALL header {                                       \
 581     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 582     ThreadInVMfromNative __tiv(thread);                              \
 583     debug_only(VMNativeEntryWrapper __vew;)                          \
 584     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 585 
 586 
 587 #define JVM_LEAF(result_type, header)                                \
 588 extern "C" {                                                         \
 589   result_type JNICALL header {                                       \
 590     VM_Exit::block_if_vm_exited();                                   \
 591     VM_LEAF_BASE(result_type, header)
 592 
 593 
 594 #define JVM_ENTRY_FROM_LEAF(env, result_type, header)                \
 595   { {                                                                \
 596     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 597     ThreadInVMfromNative __tiv(thread);                              \
 598     debug_only(VMNativeEntryWrapper __vew;)                          \
 599     VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
 600 
 601 
 602 #define JVM_END } }
 603 
 604 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP