1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  27 
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "runtime/handles.inline.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/orderAccess.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "runtime/thread.inline.hpp"
  35 #include "runtime/vmThread.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/macros.hpp"
  38 #include "utilities/preserveException.hpp"
  39 
  40 // Wrapper for all entry points to the virtual machine.
  41 // The HandleMarkCleaner is a faster version of HandleMark.
  42 // It relies on the fact that there is a HandleMark further
  43 // down the stack (in JavaCalls::call_helper), and just resets
  44 // to the saved values in that HandleMark.
  45 
  46 class HandleMarkCleaner: public StackObj {
  47  private:
  48   Thread* _thread;
  49  public:
  50   HandleMarkCleaner(Thread* thread) {
  51     _thread = thread;
  52     _thread->last_handle_mark()->push();
  53   }
  54   ~HandleMarkCleaner() {
  55     _thread->last_handle_mark()->pop_and_restore();
  56   }
  57 
  58  private:
  59   inline void* operator new(size_t size, void* ptr) throw() {
  60     return ptr;
  61   }
  62 };
  63 
  64 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
  65 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
  66 // the VM and perform checks upon leave of the VM.
  67 
  68 
  69 class InterfaceSupport: AllStatic {
  70 # ifdef ASSERT
  71  public:
  72   static long _scavenge_alot_counter;
  73   static long _fullgc_alot_counter;
  74   static long _number_of_calls;
  75   static long _fullgc_alot_invocation;
  76 
  77   // Helper methods used to implement +ScavengeALot and +FullGCALot
  78   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
  79   static void gc_alot();
  80 
  81   static void walk_stack_from(vframe* start_vf);
  82   static void walk_stack();
  83 
  84   static void zombieAll();
  85   static void unlinkSymbols();
  86   static void deoptimizeAll();
  87   static void stress_derived_pointers();
  88   static void verify_stack();
  89   static void verify_last_frame();
  90 # endif
  91 
  92  public:
  93   static void serialize_thread_state_with_handler(JavaThread* thread) {
  94     serialize_thread_state_internal(thread, true);
  95   }
  96 
  97   // Should only call this if we know that we have a proper SEH set up.
  98   static void serialize_thread_state(JavaThread* thread) {
  99     serialize_thread_state_internal(thread, false);
 100   }
 101 
 102  private:
 103   static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) {
 104     // Make sure new state is seen by VM thread
 105     if (os::is_MP()) {
 106       if (UseMembar) {
 107         // Force a fence between the write above and read below
 108         OrderAccess::fence();
 109       } else {
 110         // store to serialize page so VM thread can do pseudo remote membar
 111         if (needs_exception_handler) {
 112           os::write_memory_serialize_page_with_handler(thread);
 113         } else {
 114           os::write_memory_serialize_page(thread);
 115         }
 116       }
 117     }
 118   }
 119 };
 120 
 121 
 122 // Basic class for all thread transition classes.
 123 
 124 class ThreadStateTransition : public StackObj {
 125  protected:
 126   JavaThread* _thread;
 127  public:
 128   ThreadStateTransition(JavaThread *thread) {
 129     _thread = thread;
 130     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
 131   }
 132 
 133   // Change threadstate in a manner, so safepoint can detect changes.
 134   // Time-critical: called on exit from every runtime routine
 135   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 136     assert(from != _thread_in_Java, "use transition_from_java");
 137     assert(from != _thread_in_native, "use transition_from_native");
 138     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 139     assert(thread->thread_state() == from, "coming from wrong thread state");
 140     // Change to transition state
 141     thread->set_thread_state((JavaThreadState)(from + 1));
 142 
 143     InterfaceSupport::serialize_thread_state(thread);
 144 
 145     if (SafepointSynchronize::do_call_back()) {
 146       SafepointSynchronize::block(thread);
 147     }
 148     thread->set_thread_state(to);
 149 
 150     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 151   }
 152 
 153   // transition_and_fence must be used on any thread state transition
 154   // where there might not be a Java call stub on the stack, in
 155   // particular on Windows where the Structured Exception Handler is
 156   // set up in the call stub. os::write_memory_serialize_page() can
 157   // fault and we can't recover from it on Windows without a SEH in
 158   // place.
 159   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 160     assert(thread->thread_state() == from, "coming from wrong thread state");
 161     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 162     // Change to transition state
 163     thread->set_thread_state((JavaThreadState)(from + 1));
 164 
 165     InterfaceSupport::serialize_thread_state_with_handler(thread);
 166 
 167     if (SafepointSynchronize::do_call_back()) {
 168       SafepointSynchronize::block(thread);
 169     }
 170     thread->set_thread_state(to);
 171 
 172     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 173   }
 174 
 175   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
 176   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
 177   // have not been setup.
 178   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
 179     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
 180     thread->set_thread_state(to);
 181   }
 182 
 183   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
 184     assert((to & 1) == 0, "odd numbers are transitions states");
 185     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
 186     // Change to transition state
 187     thread->set_thread_state(_thread_in_native_trans);
 188 
 189     InterfaceSupport::serialize_thread_state_with_handler(thread);
 190 
 191     // We never install asynchronous exceptions when coming (back) in
 192     // to the runtime from native code because the runtime is not set
 193     // up to handle exceptions floating around at arbitrary points.
 194     if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
 195       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 196 
 197       // Clear unhandled oops anywhere where we could block, even if we don't.
 198       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 199     }
 200 
 201     thread->set_thread_state(to);
 202   }
 203  protected:
 204    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
 205    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
 206    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
 207    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 208 };
 209 
 210 
 211 class ThreadInVMfromJava : public ThreadStateTransition {
 212  public:
 213   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
 214     trans_from_java(_thread_in_vm);
 215   }
 216   ~ThreadInVMfromJava()  {
 217     if (_thread->stack_yellow_reserved_zone_disabled()) {
 218       _thread->enable_stack_yellow_reserved_zone();
 219     }
 220     trans(_thread_in_vm, _thread_in_Java);
 221     // Check for pending. async. exceptions or suspends.
 222     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
 223   }
 224 };
 225 
 226 
 227 class ThreadInVMfromUnknown {
 228  private:
 229   JavaThread* _thread;
 230  public:
 231   ThreadInVMfromUnknown() : _thread(NULL) {
 232     Thread* t = Thread::current();
 233     if (t->is_Java_thread()) {
 234       JavaThread* t2 = (JavaThread*) t;
 235       if (t2->thread_state() == _thread_in_native) {
 236         _thread = t2;
 237         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
 238         // Used to have a HandleMarkCleaner but that is dangerous as
 239         // it could free a handle in our (indirect, nested) caller.
 240         // We expect any handles will be short lived and figure we
 241         // don't need an actual HandleMark.
 242       }
 243     }
 244   }
 245   ~ThreadInVMfromUnknown()  {
 246     if (_thread) {
 247       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
 248     }
 249   }
 250 };
 251 
 252 
 253 class ThreadInVMfromNative : public ThreadStateTransition {
 254  public:
 255   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
 256     trans_from_native(_thread_in_vm);
 257   }
 258   ~ThreadInVMfromNative() {
 259     trans_and_fence(_thread_in_vm, _thread_in_native);
 260   }
 261 };
 262 
 263 
 264 class ThreadToNativeFromVM : public ThreadStateTransition {
 265  public:
 266   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
 267     // We are leaving the VM at this point and going directly to native code.
 268     // Block, if we are in the middle of a safepoint synchronization.
 269     assert(!thread->owns_locks(), "must release all locks when leaving VM");
 270     thread->frame_anchor()->make_walkable(thread);
 271     trans_and_fence(_thread_in_vm, _thread_in_native);
 272     // Check for pending. async. exceptions or suspends.
 273     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
 274   }
 275 
 276   ~ThreadToNativeFromVM() {
 277     trans_from_native(_thread_in_vm);
 278     assert(!_thread->is_pending_jni_exception_check(), "Pending JNI Exception Check");
 279     // We don't need to clear_walkable because it will happen automagically when we return to java
 280   }
 281 };
 282 
 283 
 284 class ThreadBlockInVM : public ThreadStateTransition {
 285  public:
 286   ThreadBlockInVM(JavaThread *thread)
 287   : ThreadStateTransition(thread) {
 288     // Once we are blocked vm expects stack to be walkable
 289     thread->frame_anchor()->make_walkable(thread);
 290     trans_and_fence(_thread_in_vm, _thread_blocked);
 291   }
 292   ~ThreadBlockInVM() {
 293     trans_and_fence(_thread_blocked, _thread_in_vm);
 294     // We don't need to clear_walkable because it will happen automagically when we return to java
 295   }
 296 };
 297 
 298 
 299 // This special transition class is only used to prevent asynchronous exceptions
 300 // from being installed on vm exit in situations where we can't tolerate them.
 301 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
 302 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
 303  public:
 304   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
 305     trans_from_java(_thread_in_vm);
 306   }
 307   ~ThreadInVMfromJavaNoAsyncException()  {
 308     if (_thread->stack_yellow_reserved_zone_disabled()) {
 309       _thread->enable_stack_yellow_reserved_zone();
 310     }
 311     trans(_thread_in_vm, _thread_in_Java);
 312     // NOTE: We do not check for pending. async. exceptions.
 313     // If we did and moved the pending async exception over into the
 314     // pending exception field, we would need to deopt (currently C2
 315     // only). However, to do so would require that we transition back
 316     // to the _thread_in_vm state. Instead we postpone the handling of
 317     // the async exception.
 318 
 319 
 320     // Check for pending. suspends only.
 321     if (_thread->has_special_runtime_exit_condition())
 322       _thread->handle_special_runtime_exit_condition(false);
 323   }
 324 };
 325 
 326 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
 327 // Can be used to verify properties on enter/exit of the VM.
 328 
 329 #ifdef ASSERT
 330 class VMEntryWrapper {
 331  public:
 332   VMEntryWrapper() {
 333     if (VerifyLastFrame) {
 334       InterfaceSupport::verify_last_frame();
 335     }
 336   }
 337 
 338   ~VMEntryWrapper() {
 339     InterfaceSupport::check_gc_alot();
 340     if (WalkStackALot) {
 341       InterfaceSupport::walk_stack();
 342     }
 343 #ifdef COMPILER2
 344     // This option is not used by Compiler 1
 345     if (StressDerivedPointers) {
 346       InterfaceSupport::stress_derived_pointers();
 347     }
 348 #endif
 349     if (DeoptimizeALot || DeoptimizeRandom) {
 350       InterfaceSupport::deoptimizeAll();
 351     }
 352     if (ZombieALot) {
 353       InterfaceSupport::zombieAll();
 354     }
 355     if (UnlinkSymbolsALot) {
 356       InterfaceSupport::unlinkSymbols();
 357     }
 358     // do verification AFTER potential deoptimization
 359     if (VerifyStack) {
 360       InterfaceSupport::verify_stack();
 361     }
 362 
 363   }
 364 };
 365 
 366 
 367 class VMNativeEntryWrapper {
 368  public:
 369   VMNativeEntryWrapper() {
 370     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 371   }
 372 
 373   ~VMNativeEntryWrapper() {
 374     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 375   }
 376 };
 377 
 378 #endif
 379 
 380 
 381 // VM-internal runtime interface support
 382 
 383 #ifdef ASSERT
 384 
 385 class RuntimeHistogramElement : public HistogramElement {
 386   public:
 387    RuntimeHistogramElement(const char* name);
 388 };
 389 
 390 #define TRACE_CALL(result_type, header)                            \
 391   InterfaceSupport::_number_of_calls++;                            \
 392   if (CountRuntimeCalls) {                                         \
 393     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
 394     if (e != NULL) e->increment_count();                           \
 395   }
 396 #else
 397 #define TRACE_CALL(result_type, header)                            \
 398   /* do nothing */
 399 #endif
 400 
 401 
 402 // LEAF routines do not lock, GC or throw exceptions
 403 
 404 #define VM_LEAF_BASE(result_type, header)                            \
 405   TRACE_CALL(result_type, header)                                    \
 406   debug_only(NoHandleMark __hm;)                                     \
 407   os::verify_stack_alignment();                                      \
 408   /* begin of body */
 409 
 410 #define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)         \
 411   TRACE_CALL(result_type, header)                                    \
 412   debug_only(ResetNoHandleMark __rnhm;)                              \
 413   HandleMarkCleaner __hm(thread);                                    \
 414   Thread* THREAD = thread;                                           \
 415   os::verify_stack_alignment();                                      \
 416   /* begin of body */
 417 
 418 
 419 // ENTRY routines may lock, GC and throw exceptions
 420 
 421 #define VM_ENTRY_BASE(result_type, header, thread)                   \
 422   TRACE_CALL(result_type, header)                                    \
 423   HandleMarkCleaner __hm(thread);                                    \
 424   Thread* THREAD = thread;                                           \
 425   os::verify_stack_alignment();                                      \
 426   /* begin of body */
 427 
 428 
 429 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
 430 
 431 #define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
 432   TRACE_CALL(result_type, header)                                    \
 433   debug_only(NoHandleMark __hm;)                                     \
 434   Thread* THREAD = thread;                                           \
 435   os::verify_stack_alignment();                                      \
 436   /* begin of body */
 437 
 438 
 439 // Definitions for IRT (Interpreter Runtime)
 440 // (thread is an argument passed in to all these routines)
 441 
 442 #define IRT_ENTRY(result_type, header)                               \
 443   result_type header {                                               \
 444     ThreadInVMfromJava __tiv(thread);                                \
 445     VM_ENTRY_BASE(result_type, header, thread)                       \
 446     debug_only(VMEntryWrapper __vew;)
 447 
 448 
 449 #define IRT_LEAF(result_type, header)                                \
 450   result_type header {                                               \
 451     VM_LEAF_BASE(result_type, header)                                \
 452     debug_only(NoSafepointVerifier __nspv(true);)
 453 
 454 
 455 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
 456   result_type header {                                               \
 457     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 458     VM_ENTRY_BASE(result_type, header, thread)                       \
 459     debug_only(VMEntryWrapper __vew;)
 460 
 461 #define IRT_END }
 462 
 463 
 464 // Definitions for JRT (Java (Compiler/Shared) Runtime)
 465 
 466 #define JRT_ENTRY(result_type, header)                               \
 467   result_type header {                                               \
 468     ThreadInVMfromJava __tiv(thread);                                \
 469     VM_ENTRY_BASE(result_type, header, thread)                       \
 470     debug_only(VMEntryWrapper __vew;)
 471 
 472 
 473 #define JRT_LEAF(result_type, header)                                \
 474   result_type header {                                               \
 475   VM_LEAF_BASE(result_type, header)                                  \
 476   debug_only(JRTLeafVerifier __jlv;)
 477 
 478 
 479 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
 480   result_type header {                                               \
 481     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 482     VM_ENTRY_BASE(result_type, header, thread)                       \
 483     debug_only(VMEntryWrapper __vew;)
 484 
 485 // Same as JRT Entry but allows for return value after the safepoint
 486 // to get back into Java from the VM
 487 #define JRT_BLOCK_ENTRY(result_type, header)                         \
 488   result_type header {                                               \
 489     TRACE_CALL(result_type, header)                                  \
 490     HandleMarkCleaner __hm(thread);
 491 
 492 #define JRT_BLOCK                                                    \
 493     {                                                                \
 494     ThreadInVMfromJava __tiv(thread);                                \
 495     Thread* THREAD = thread;                                         \
 496     debug_only(VMEntryWrapper __vew;)
 497 
 498 #define JRT_BLOCK_NO_ASYNC                                           \
 499     {                                                                \
 500     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 501     Thread* THREAD = thread;                                         \
 502     debug_only(VMEntryWrapper __vew;)
 503 
 504 #define JRT_BLOCK_END }
 505 
 506 #define JRT_END }
 507 
 508 // Definitions for JNI
 509 
 510 #define JNI_ENTRY(result_type, header)                               \
 511     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
 512     WeakPreserveExceptionMark __wem(thread);
 513 
 514 #define JNI_ENTRY_NO_PRESERVE(result_type, header)                   \
 515 extern "C" {                                                         \
 516   result_type JNICALL header {                                       \
 517     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 518     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 519     ThreadInVMfromNative __tiv(thread);                              \
 520     debug_only(VMNativeEntryWrapper __vew;)                          \
 521     VM_ENTRY_BASE(result_type, header, thread)
 522 
 523 
 524 // Ensure that the VMNativeEntryWrapper constructor, which can cause
 525 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
 526 #define JNI_QUICK_ENTRY(result_type, header)                         \
 527 extern "C" {                                                         \
 528   result_type JNICALL header {                                       \
 529     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 530     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 531     ThreadInVMfromNative __tiv(thread);                              \
 532     debug_only(VMNativeEntryWrapper __vew;)                          \
 533     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 534 
 535 
 536 #define JNI_LEAF(result_type, header)                                \
 537 extern "C" {                                                         \
 538   result_type JNICALL header {                                       \
 539     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 540     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 541     VM_LEAF_BASE(result_type, header)
 542 
 543 
 544 // Close the routine and the extern "C"
 545 #define JNI_END } }
 546 
 547 
 548 
 549 // Definitions for JVM
 550 
 551 #define JVM_ENTRY(result_type, header)                               \
 552 extern "C" {                                                         \
 553   result_type JNICALL header {                                       \
 554     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 555     ThreadInVMfromNative __tiv(thread);                              \
 556     debug_only(VMNativeEntryWrapper __vew;)                          \
 557     VM_ENTRY_BASE(result_type, header, thread)
 558 
 559 
 560 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 561 extern "C" {                                                         \
 562   result_type JNICALL header {                                       \
 563     JavaThread* thread = JavaThread::current();                      \
 564     ThreadInVMfromNative __tiv(thread);                              \
 565     debug_only(VMNativeEntryWrapper __vew;)                          \
 566     VM_ENTRY_BASE(result_type, header, thread)
 567 
 568 
 569 #define JVM_QUICK_ENTRY(result_type, header)                         \
 570 extern "C" {                                                         \
 571   result_type JNICALL header {                                       \
 572     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 573     ThreadInVMfromNative __tiv(thread);                              \
 574     debug_only(VMNativeEntryWrapper __vew;)                          \
 575     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 576 
 577 
 578 #define JVM_LEAF(result_type, header)                                \
 579 extern "C" {                                                         \
 580   result_type JNICALL header {                                       \
 581     VM_Exit::block_if_vm_exited();                                   \
 582     VM_LEAF_BASE(result_type, header)
 583 
 584 
 585 #define JVM_ENTRY_FROM_LEAF(env, result_type, header)                \
 586   { {                                                                \
 587     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 588     ThreadInVMfromNative __tiv(thread);                              \
 589     debug_only(VMNativeEntryWrapper __vew;)                          \
 590     VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
 591 
 592 
 593 #define JVM_END } }
 594 
 595 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP