1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
  27 
  28 #include "runtime/handles.inline.hpp"
  29 #include "runtime/mutexLocker.hpp"
  30 #include "runtime/orderAccess.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/safepointMechanism.inline.hpp"
  33 #include "runtime/safepointVerifiers.hpp"
  34 #include "runtime/thread.hpp"
  35 #include "runtime/vm_operations.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/macros.hpp"
  38 #include "utilities/preserveException.hpp"
  39 
  40 // Wrapper for all entry points to the virtual machine.
  41 
  42 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
  43 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
  44 // the VM and perform checks upon leave of the VM.
  45 
  46 
  47 class InterfaceSupport: AllStatic {
  48 # ifdef ASSERT
  49  public:
  50   static long _scavenge_alot_counter;
  51   static long _fullgc_alot_counter;
  52   static long _number_of_calls;
  53   static long _fullgc_alot_invocation;
  54 
  55   // Helper methods used to implement +ScavengeALot and +FullGCALot
  56   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
  57   static void gc_alot();
  58 
  59   static void walk_stack_from(vframe* start_vf);
  60   static void walk_stack();
  61 
  62   static void zombieAll();
  63   static void deoptimizeAll();
  64   static void stress_derived_pointers();
  65   static void verify_stack();
  66   static void verify_last_frame();
  67 # endif
  68 
  69  public:
  70   static void serialize_thread_state_with_handler(JavaThread* thread) {
  71     serialize_thread_state_internal(thread, true);
  72   }
  73 
  74   // Should only call this if we know that we have a proper SEH set up.
  75   static void serialize_thread_state(JavaThread* thread) {
  76     serialize_thread_state_internal(thread, false);
  77   }
  78 
  79  private:
  80   static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) {
  81     // Make sure new state is seen by VM thread
  82     if (UseMembar) {
  83       // Force a fence between the write above and read below
  84       OrderAccess::fence();
  85     } else {
  86       // store to serialize page so VM thread can do pseudo remote membar
  87       if (needs_exception_handler) {
  88         os::write_memory_serialize_page_with_handler(thread);
  89       } else {
  90         os::write_memory_serialize_page(thread);
  91       }
  92     }
  93   }
  94 };
  95 
  96 
  97 // Basic class for all thread transition classes.
  98 
  99 class ThreadStateTransition : public StackObj {
 100  protected:
 101   JavaThread* _thread;
 102  public:
 103   ThreadStateTransition(JavaThread *thread) {
 104     _thread = thread;
 105     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
 106   }
 107 
 108   // Change threadstate in a manner, so safepoint can detect changes.
 109   // Time-critical: called on exit from every runtime routine
 110   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 111     assert(from != _thread_in_Java, "use transition_from_java");
 112     assert(from != _thread_in_native, "use transition_from_native");
 113     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 114     assert(thread->thread_state() == from, "coming from wrong thread state");
 115     // Change to transition state
 116     thread->set_thread_state((JavaThreadState)(from + 1));
 117 
 118     InterfaceSupport::serialize_thread_state(thread);
 119 
 120     SafepointMechanism::block_if_requested(thread);
 121     thread->set_thread_state(to);
 122 
 123     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 124   }
 125 
 126   // transition_and_fence must be used on any thread state transition
 127   // where there might not be a Java call stub on the stack, in
 128   // particular on Windows where the Structured Exception Handler is
 129   // set up in the call stub. os::write_memory_serialize_page() can
 130   // fault and we can't recover from it on Windows without a SEH in
 131   // place.
 132   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 133     assert(thread->thread_state() == from, "coming from wrong thread state");
 134     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 135     // Change to transition state
 136     thread->set_thread_state((JavaThreadState)(from + 1));
 137 
 138     InterfaceSupport::serialize_thread_state_with_handler(thread);
 139 
 140     SafepointMechanism::block_if_requested(thread);
 141     thread->set_thread_state(to);
 142 
 143     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 144   }
 145 
 146   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
 147   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
 148   // have not been setup.
 149   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
 150     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
 151     thread->set_thread_state(to);
 152   }
 153 
 154   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
 155     assert((to & 1) == 0, "odd numbers are transitions states");
 156     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
 157     // Change to transition state
 158     thread->set_thread_state(_thread_in_native_trans);
 159 
 160     InterfaceSupport::serialize_thread_state_with_handler(thread);
 161 
 162     // We never install asynchronous exceptions when coming (back) in
 163     // to the runtime from native code because the runtime is not set
 164     // up to handle exceptions floating around at arbitrary points.
 165     if (SafepointMechanism::poll(thread) || thread->is_suspend_after_native()) {
 166       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 167 
 168       // Clear unhandled oops anywhere where we could block, even if we don't.
 169       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 170     }
 171 
 172     thread->set_thread_state(to);
 173   }
 174  protected:
 175    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
 176    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
 177    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
 178    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 179 };
 180 
 181 class ThreadInVMForHandshake : public ThreadStateTransition {
 182   const JavaThreadState _original_state;
 183 
 184   void transition_back() {
 185     // This can be invoked from transition states and must return to the original state properly
 186     assert(_thread->thread_state() == _thread_in_vm, "should only call when leaving VM after handshake");
 187     _thread->set_thread_state(_thread_in_vm_trans);
 188 
 189     InterfaceSupport::serialize_thread_state(_thread);
 190 
 191     SafepointMechanism::block_if_requested(_thread);
 192 
 193     _thread->set_thread_state(_original_state);
 194   }
 195 
 196  public:
 197 
 198   ThreadInVMForHandshake(JavaThread* thread) : ThreadStateTransition(thread),
 199       _original_state(thread->thread_state()) {
 200 
 201     if (thread->has_last_Java_frame()) {
 202       thread->frame_anchor()->make_walkable(thread);
 203     }
 204 
 205     thread->set_thread_state(_thread_in_vm);
 206   }
 207 
 208   ~ThreadInVMForHandshake() {
 209     transition_back();
 210   }
 211 
 212 };
 213 
 214 class ThreadInVMfromJava : public ThreadStateTransition {
 215  public:
 216   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
 217     trans_from_java(_thread_in_vm);
 218   }
 219   ~ThreadInVMfromJava()  {
 220     if (_thread->stack_yellow_reserved_zone_disabled()) {
 221       _thread->enable_stack_yellow_reserved_zone();
 222     }
 223     trans(_thread_in_vm, _thread_in_Java);
 224     // Check for pending. async. exceptions or suspends.
 225     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
 226   }
 227 };
 228 
 229 
 230 class ThreadInVMfromUnknown {
 231  private:
 232   JavaThread* _thread;
 233  public:
 234   ThreadInVMfromUnknown() : _thread(NULL) {
 235     Thread* t = Thread::current();
 236     if (t->is_Java_thread()) {
 237       JavaThread* t2 = (JavaThread*) t;
 238       if (t2->thread_state() == _thread_in_native) {
 239         _thread = t2;
 240         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
 241         // Used to have a HandleMarkCleaner but that is dangerous as
 242         // it could free a handle in our (indirect, nested) caller.
 243         // We expect any handles will be short lived and figure we
 244         // don't need an actual HandleMark.
 245       }
 246     }
 247   }
 248   ~ThreadInVMfromUnknown()  {
 249     if (_thread) {
 250       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
 251     }
 252   }
 253 };
 254 
 255 
 256 class ThreadInVMfromNative : public ThreadStateTransition {
 257  public:
 258   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
 259     trans_from_native(_thread_in_vm);
 260   }
 261   ~ThreadInVMfromNative() {
 262     trans_and_fence(_thread_in_vm, _thread_in_native);
 263   }
 264 };
 265 
 266 
 267 class ThreadToNativeFromVM : public ThreadStateTransition {
 268  public:
 269   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
 270     // We are leaving the VM at this point and going directly to native code.
 271     // Block, if we are in the middle of a safepoint synchronization.
 272     assert(!thread->owns_locks(), "must release all locks when leaving VM");
 273     thread->frame_anchor()->make_walkable(thread);
 274     trans_and_fence(_thread_in_vm, _thread_in_native);
 275     // Check for pending. async. exceptions or suspends.
 276     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
 277   }
 278 
 279   ~ThreadToNativeFromVM() {
 280     trans_from_native(_thread_in_vm);
 281     assert(!_thread->is_pending_jni_exception_check(), "Pending JNI Exception Check");
 282     // We don't need to clear_walkable because it will happen automagically when we return to java
 283   }
 284 };
 285 
 286 
 287 class ThreadBlockInVM : public ThreadStateTransition {
 288  public:
 289   ThreadBlockInVM(JavaThread *thread)
 290   : ThreadStateTransition(thread) {
 291     // Once we are blocked vm expects stack to be walkable
 292     thread->frame_anchor()->make_walkable(thread);
 293     trans_and_fence(_thread_in_vm, _thread_blocked);
 294   }
 295   ~ThreadBlockInVM() {
 296     trans_and_fence(_thread_blocked, _thread_in_vm);
 297     // We don't need to clear_walkable because it will happen automagically when we return to java
 298   }
 299 };
 300 
 301 
 302 // This special transition class is only used to prevent asynchronous exceptions
 303 // from being installed on vm exit in situations where we can't tolerate them.
 304 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
 305 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
 306  public:
 307   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
 308     trans_from_java(_thread_in_vm);
 309   }
 310   ~ThreadInVMfromJavaNoAsyncException()  {
 311     if (_thread->stack_yellow_reserved_zone_disabled()) {
 312       _thread->enable_stack_yellow_reserved_zone();
 313     }
 314     trans(_thread_in_vm, _thread_in_Java);
 315     // NOTE: We do not check for pending. async. exceptions.
 316     // If we did and moved the pending async exception over into the
 317     // pending exception field, we would need to deopt (currently C2
 318     // only). However, to do so would require that we transition back
 319     // to the _thread_in_vm state. Instead we postpone the handling of
 320     // the async exception.
 321 
 322 
 323     // Check for pending. suspends only.
 324     if (_thread->has_special_runtime_exit_condition())
 325       _thread->handle_special_runtime_exit_condition(false);
 326   }
 327 };
 328 
 329 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
 330 // Can be used to verify properties on enter/exit of the VM.
 331 
 332 #ifdef ASSERT
 333 class VMEntryWrapper {
 334  public:
 335   VMEntryWrapper();
 336   ~VMEntryWrapper();
 337 };
 338 
 339 
 340 class VMNativeEntryWrapper {
 341  public:
 342   VMNativeEntryWrapper() {
 343     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 344   }
 345 
 346   ~VMNativeEntryWrapper() {
 347     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 348   }
 349 };
 350 
 351 #endif
 352 
 353 
 354 // VM-internal runtime interface support
 355 
 356 // Definitions for JRT (Java (Compiler/Shared) Runtime)
 357 
 358 // JRT_LEAF currently can be called from either _thread_in_Java or
 359 // _thread_in_native mode. In _thread_in_native, it is ok
 360 // for another thread to trigger GC. The rest of the JRT_LEAF
 361 // rules apply.
 362 class JRTLeafVerifier : public NoSafepointVerifier {
 363   static bool should_verify_GC();
 364  public:
 365 #ifdef ASSERT
 366   JRTLeafVerifier();
 367   ~JRTLeafVerifier();
 368 #else
 369   JRTLeafVerifier() {}
 370   ~JRTLeafVerifier() {}
 371 #endif
 372 };
 373 
 374 #ifdef ASSERT
 375 
 376 class RuntimeHistogramElement : public HistogramElement {
 377   public:
 378    RuntimeHistogramElement(const char* name);
 379 };
 380 
 381 #define TRACE_CALL(result_type, header)                            \
 382   InterfaceSupport::_number_of_calls++;                            \
 383   if (CountRuntimeCalls) {                                         \
 384     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
 385     if (e != NULL) e->increment_count();                           \
 386   }
 387 #else
 388 #define TRACE_CALL(result_type, header)                            \
 389   /* do nothing */
 390 #endif
 391 
 392 
 393 // LEAF routines do not lock, GC or throw exceptions
 394 
 395 #define VM_LEAF_BASE(result_type, header)                            \
 396   TRACE_CALL(result_type, header)                                    \
 397   debug_only(NoHandleMark __hm;)                                     \
 398   os::verify_stack_alignment();                                      \
 399   /* begin of body */
 400 
 401 #define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)         \
 402   TRACE_CALL(result_type, header)                                    \
 403   debug_only(ResetNoHandleMark __rnhm;)                              \
 404   HandleMarkCleaner __hm(thread);                                    \
 405   Thread* THREAD = thread;                                           \
 406   os::verify_stack_alignment();                                      \
 407   /* begin of body */
 408 
 409 
 410 // ENTRY routines may lock, GC and throw exceptions
 411 
 412 #define VM_ENTRY_BASE(result_type, header, thread)                   \
 413   TRACE_CALL(result_type, header)                                    \
 414   HandleMarkCleaner __hm(thread);                                    \
 415   Thread* THREAD = thread;                                           \
 416   os::verify_stack_alignment();                                      \
 417   /* begin of body */
 418 
 419 
 420 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
 421 
 422 #define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
 423   TRACE_CALL(result_type, header)                                    \
 424   debug_only(NoHandleMark __hm;)                                     \
 425   Thread* THREAD = thread;                                           \
 426   os::verify_stack_alignment();                                      \
 427   /* begin of body */
 428 
 429 
 430 // Definitions for IRT (Interpreter Runtime)
 431 // (thread is an argument passed in to all these routines)
 432 
 433 #define IRT_ENTRY(result_type, header)                               \
 434   result_type header {                                               \
 435     ThreadInVMfromJava __tiv(thread);                                \
 436     VM_ENTRY_BASE(result_type, header, thread)                       \
 437     debug_only(VMEntryWrapper __vew;)
 438 
 439 
 440 #define IRT_LEAF(result_type, header)                                \
 441   result_type header {                                               \
 442     VM_LEAF_BASE(result_type, header)                                \
 443     debug_only(NoSafepointVerifier __nspv(true);)
 444 
 445 
 446 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
 447   result_type header {                                               \
 448     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 449     VM_ENTRY_BASE(result_type, header, thread)                       \
 450     debug_only(VMEntryWrapper __vew;)
 451 
 452 #define IRT_END }
 453 
 454 #define JRT_ENTRY(result_type, header)                               \
 455   result_type header {                                               \
 456     ThreadInVMfromJava __tiv(thread);                                \
 457     VM_ENTRY_BASE(result_type, header, thread)                       \
 458     debug_only(VMEntryWrapper __vew;)
 459 
 460 
 461 #define JRT_LEAF(result_type, header)                                \
 462   result_type header {                                               \
 463   VM_LEAF_BASE(result_type, header)                                  \
 464   debug_only(JRTLeafVerifier __jlv;)
 465 
 466 
 467 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
 468   result_type header {                                               \
 469     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 470     VM_ENTRY_BASE(result_type, header, thread)                       \
 471     debug_only(VMEntryWrapper __vew;)
 472 
 473 // Same as JRT Entry but allows for return value after the safepoint
 474 // to get back into Java from the VM
 475 #define JRT_BLOCK_ENTRY(result_type, header)                         \
 476   result_type header {                                               \
 477     TRACE_CALL(result_type, header)                                  \
 478     HandleMarkCleaner __hm(thread);
 479 
 480 #define JRT_BLOCK                                                    \
 481     {                                                                \
 482     ThreadInVMfromJava __tiv(thread);                                \
 483     Thread* THREAD = thread;                                         \
 484     debug_only(VMEntryWrapper __vew;)
 485 
 486 #define JRT_BLOCK_NO_ASYNC                                           \
 487     {                                                                \
 488     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 489     Thread* THREAD = thread;                                         \
 490     debug_only(VMEntryWrapper __vew;)
 491 
 492 #define JRT_BLOCK_END }
 493 
 494 #define JRT_END }
 495 
 496 // Definitions for JNI
 497 
 498 #define JNI_ENTRY(result_type, header)                               \
 499     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
 500     WeakPreserveExceptionMark __wem(thread);
 501 
 502 #define JNI_ENTRY_NO_PRESERVE(result_type, header)                   \
 503 extern "C" {                                                         \
 504   result_type JNICALL header {                                       \
 505     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 506     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 507     ThreadInVMfromNative __tiv(thread);                              \
 508     debug_only(VMNativeEntryWrapper __vew;)                          \
 509     VM_ENTRY_BASE(result_type, header, thread)
 510 
 511 
 512 // Ensure that the VMNativeEntryWrapper constructor, which can cause
 513 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
 514 #define JNI_QUICK_ENTRY(result_type, header)                         \
 515 extern "C" {                                                         \
 516   result_type JNICALL header {                                       \
 517     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 518     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 519     ThreadInVMfromNative __tiv(thread);                              \
 520     debug_only(VMNativeEntryWrapper __vew;)                          \
 521     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 522 
 523 
 524 #define JNI_LEAF(result_type, header)                                \
 525 extern "C" {                                                         \
 526   result_type JNICALL header {                                       \
 527     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 528     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 529     VM_LEAF_BASE(result_type, header)
 530 
 531 
 532 // Close the routine and the extern "C"
 533 #define JNI_END } }
 534 
 535 
 536 
 537 // Definitions for JVM
 538 
 539 #define JVM_ENTRY(result_type, header)                               \
 540 extern "C" {                                                         \
 541   result_type JNICALL header {                                       \
 542     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 543     ThreadInVMfromNative __tiv(thread);                              \
 544     debug_only(VMNativeEntryWrapper __vew;)                          \
 545     VM_ENTRY_BASE(result_type, header, thread)
 546 
 547 
 548 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 549 extern "C" {                                                         \
 550   result_type JNICALL header {                                       \
 551     JavaThread* thread = JavaThread::current();                      \
 552     ThreadInVMfromNative __tiv(thread);                              \
 553     debug_only(VMNativeEntryWrapper __vew;)                          \
 554     VM_ENTRY_BASE(result_type, header, thread)
 555 
 556 
 557 #define JVM_QUICK_ENTRY(result_type, header)                         \
 558 extern "C" {                                                         \
 559   result_type JNICALL header {                                       \
 560     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 561     ThreadInVMfromNative __tiv(thread);                              \
 562     debug_only(VMNativeEntryWrapper __vew;)                          \
 563     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 564 
 565 
 566 #define JVM_LEAF(result_type, header)                                \
 567 extern "C" {                                                         \
 568   result_type JNICALL header {                                       \
 569     VM_Exit::block_if_vm_exited();                                   \
 570     VM_LEAF_BASE(result_type, header)
 571 
 572 
 573 #define JVM_ENTRY_FROM_LEAF(env, result_type, header)                \
 574   { {                                                                \
 575     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 576     ThreadInVMfromNative __tiv(thread);                              \
 577     debug_only(VMNativeEntryWrapper __vew;)                          \
 578     VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
 579 
 580 
 581 #define JVM_END } }
 582 
 583 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP