1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
  27 
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "runtime/handles.inline.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/orderAccess.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepointMechanism.inline.hpp"
  34 #include "runtime/thread.hpp"
  35 #include "runtime/vm_operations.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/macros.hpp"
  38 #include "utilities/preserveException.hpp"
  39 
  40 // Wrapper for all entry points to the virtual machine.
  41 
  42 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
  43 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
  44 // the VM and perform checks upon leave of the VM.
  45 
  46 
  47 class InterfaceSupport: AllStatic {
  48 # ifdef ASSERT
  49  public:
  50   static long _scavenge_alot_counter;
  51   static long _fullgc_alot_counter;
  52   static long _number_of_calls;
  53   static long _fullgc_alot_invocation;
  54 
  55   // Helper methods used to implement +ScavengeALot and +FullGCALot
  56   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
  57   static void gc_alot();
  58 
  59   static void walk_stack_from(vframe* start_vf);
  60   static void walk_stack();
  61 
  62   static void zombieAll();
  63   static void unlinkSymbols();
  64   static void deoptimizeAll();
  65   static void stress_derived_pointers();
  66   static void verify_stack();
  67   static void verify_last_frame();
  68 # endif
  69 
  70  public:
  71   static void serialize_thread_state_with_handler(JavaThread* thread) {
  72     serialize_thread_state_internal(thread, true);
  73   }
  74 
  75   // Should only call this if we know that we have a proper SEH set up.
  76   static void serialize_thread_state(JavaThread* thread) {
  77     serialize_thread_state_internal(thread, false);
  78   }
  79 
  80  private:
  81   static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) {
  82     // Make sure new state is seen by VM thread
  83     if (os::is_MP()) {
  84       if (UseMembar) {
  85         // Force a fence between the write above and read below
  86         OrderAccess::fence();
  87       } else {
  88         // store to serialize page so VM thread can do pseudo remote membar
  89         if (needs_exception_handler) {
  90           os::write_memory_serialize_page_with_handler(thread);
  91         } else {
  92           os::write_memory_serialize_page(thread);
  93         }
  94       }
  95     }
  96   }
  97 };
  98 
  99 
 100 // Basic class for all thread transition classes.
 101 
 102 class ThreadStateTransition : public StackObj {
 103  protected:
 104   JavaThread* _thread;
 105  public:
 106   ThreadStateTransition(JavaThread *thread) {
 107     _thread = thread;
 108     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
 109   }
 110 
 111   // Change threadstate in a manner, so safepoint can detect changes.
 112   // Time-critical: called on exit from every runtime routine
 113   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 114     assert(from != _thread_in_Java, "use transition_from_java");
 115     assert(from != _thread_in_native, "use transition_from_native");
 116     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 117     assert(thread->thread_state() == from, "coming from wrong thread state");
 118     // Change to transition state
 119     thread->set_thread_state((JavaThreadState)(from + 1));
 120 
 121     InterfaceSupport::serialize_thread_state(thread);
 122 
 123     SafepointMechanism::block_if_requested(thread);
 124     thread->set_thread_state(to);
 125 
 126     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 127   }
 128 
 129   // transition_and_fence must be used on any thread state transition
 130   // where there might not be a Java call stub on the stack, in
 131   // particular on Windows where the Structured Exception Handler is
 132   // set up in the call stub. os::write_memory_serialize_page() can
 133   // fault and we can't recover from it on Windows without a SEH in
 134   // place.
 135   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 136     assert(thread->thread_state() == from, "coming from wrong thread state");
 137     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 138     // Change to transition state
 139     thread->set_thread_state((JavaThreadState)(from + 1));
 140 
 141     InterfaceSupport::serialize_thread_state_with_handler(thread);
 142 
 143     SafepointMechanism::block_if_requested(thread);
 144     thread->set_thread_state(to);
 145 
 146     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 147   }
 148 
 149   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
 150   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
 151   // have not been setup.
 152   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
 153     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
 154     thread->set_thread_state(to);
 155   }
 156 
 157   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
 158     assert((to & 1) == 0, "odd numbers are transitions states");
 159     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
 160     // Change to transition state
 161     thread->set_thread_state(_thread_in_native_trans);
 162 
 163     InterfaceSupport::serialize_thread_state_with_handler(thread);
 164 
 165     // We never install asynchronous exceptions when coming (back) in
 166     // to the runtime from native code because the runtime is not set
 167     // up to handle exceptions floating around at arbitrary points.
 168     if (SafepointMechanism::poll(thread) || thread->is_suspend_after_native()) {
 169       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 170 
 171       // Clear unhandled oops anywhere where we could block, even if we don't.
 172       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 173     }
 174 
 175     thread->set_thread_state(to);
 176   }
 177  protected:
 178    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
 179    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
 180    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
 181    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 182 };
 183 
 184 class ThreadInVMForHandshake : public ThreadStateTransition {
 185   const JavaThreadState _original_state;
 186 
 187   void transition_back() {
 188     // This can be invoked from transition states and must return to the original state properly
 189     assert(_thread->thread_state() == _thread_in_vm, "should only call when leaving VM after handshake");
 190     _thread->set_thread_state(_thread_in_vm_trans);
 191 
 192     InterfaceSupport::serialize_thread_state(_thread);
 193 
 194     SafepointMechanism::block_if_requested(_thread);
 195 
 196     _thread->set_thread_state(_original_state);
 197   }
 198 
 199  public:
 200 
 201   ThreadInVMForHandshake(JavaThread* thread) : ThreadStateTransition(thread),
 202       _original_state(thread->thread_state()) {
 203 
 204     if (thread->has_last_Java_frame()) {
 205       thread->frame_anchor()->make_walkable(thread);
 206     }
 207 
 208     thread->set_thread_state(_thread_in_vm);
 209   }
 210 
 211   ~ThreadInVMForHandshake() {
 212     transition_back();
 213   }
 214 
 215 };
 216 
 217 class ThreadInVMfromJava : public ThreadStateTransition {
 218  public:
 219   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
 220     trans_from_java(_thread_in_vm);
 221   }
 222   ~ThreadInVMfromJava()  {
 223     if (_thread->stack_yellow_reserved_zone_disabled()) {
 224       _thread->enable_stack_yellow_reserved_zone();
 225     }
 226     trans(_thread_in_vm, _thread_in_Java);
 227     // Check for pending. async. exceptions or suspends.
 228     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
 229   }
 230 };
 231 
 232 
 233 class ThreadInVMfromUnknown {
 234  private:
 235   JavaThread* _thread;
 236  public:
 237   ThreadInVMfromUnknown() : _thread(NULL) {
 238     Thread* t = Thread::current();
 239     if (t->is_Java_thread()) {
 240       JavaThread* t2 = (JavaThread*) t;
 241       if (t2->thread_state() == _thread_in_native) {
 242         _thread = t2;
 243         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
 244         // Used to have a HandleMarkCleaner but that is dangerous as
 245         // it could free a handle in our (indirect, nested) caller.
 246         // We expect any handles will be short lived and figure we
 247         // don't need an actual HandleMark.
 248       }
 249     }
 250   }
 251   ~ThreadInVMfromUnknown()  {
 252     if (_thread) {
 253       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
 254     }
 255   }
 256 };
 257 
 258 
 259 class ThreadInVMfromNative : public ThreadStateTransition {
 260  public:
 261   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
 262     trans_from_native(_thread_in_vm);
 263   }
 264   ~ThreadInVMfromNative() {
 265     trans_and_fence(_thread_in_vm, _thread_in_native);
 266   }
 267 };
 268 
 269 
 270 class ThreadToNativeFromVM : public ThreadStateTransition {
 271  public:
 272   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
 273     // We are leaving the VM at this point and going directly to native code.
 274     // Block, if we are in the middle of a safepoint synchronization.
 275     assert(!thread->owns_locks(), "must release all locks when leaving VM");
 276     thread->frame_anchor()->make_walkable(thread);
 277     trans_and_fence(_thread_in_vm, _thread_in_native);
 278     // Check for pending. async. exceptions or suspends.
 279     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
 280   }
 281 
 282   ~ThreadToNativeFromVM() {
 283     trans_from_native(_thread_in_vm);
 284     assert(!_thread->is_pending_jni_exception_check(), "Pending JNI Exception Check");
 285     // We don't need to clear_walkable because it will happen automagically when we return to java
 286   }
 287 };
 288 
 289 
 290 class ThreadBlockInVM : public ThreadStateTransition {
 291  public:
 292   ThreadBlockInVM(JavaThread *thread)
 293   : ThreadStateTransition(thread) {
 294     // Once we are blocked vm expects stack to be walkable
 295     thread->frame_anchor()->make_walkable(thread);
 296     trans_and_fence(_thread_in_vm, _thread_blocked);
 297   }
 298   ~ThreadBlockInVM() {
 299     trans_and_fence(_thread_blocked, _thread_in_vm);
 300     // We don't need to clear_walkable because it will happen automagically when we return to java
 301   }
 302 };
 303 
 304 
 305 // This special transition class is only used to prevent asynchronous exceptions
 306 // from being installed on vm exit in situations where we can't tolerate them.
 307 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
 308 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
 309  public:
 310   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
 311     trans_from_java(_thread_in_vm);
 312   }
 313   ~ThreadInVMfromJavaNoAsyncException()  {
 314     if (_thread->stack_yellow_reserved_zone_disabled()) {
 315       _thread->enable_stack_yellow_reserved_zone();
 316     }
 317     trans(_thread_in_vm, _thread_in_Java);
 318     // NOTE: We do not check for pending. async. exceptions.
 319     // If we did and moved the pending async exception over into the
 320     // pending exception field, we would need to deopt (currently C2
 321     // only). However, to do so would require that we transition back
 322     // to the _thread_in_vm state. Instead we postpone the handling of
 323     // the async exception.
 324 
 325 
 326     // Check for pending. suspends only.
 327     if (_thread->has_special_runtime_exit_condition())
 328       _thread->handle_special_runtime_exit_condition(false);
 329   }
 330 };
 331 
 332 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
 333 // Can be used to verify properties on enter/exit of the VM.
 334 
 335 #ifdef ASSERT
 336 class VMEntryWrapper {
 337  public:
 338   VMEntryWrapper();
 339   ~VMEntryWrapper();
 340 };
 341 
 342 
 343 class VMNativeEntryWrapper {
 344  public:
 345   VMNativeEntryWrapper() {
 346     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 347   }
 348 
 349   ~VMNativeEntryWrapper() {
 350     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 351   }
 352 };
 353 
 354 #endif
 355 
 356 
 357 // VM-internal runtime interface support
 358 
 359 #ifdef ASSERT
 360 
 361 class RuntimeHistogramElement : public HistogramElement {
 362   public:
 363    RuntimeHistogramElement(const char* name);
 364 };
 365 
 366 #define TRACE_CALL(result_type, header)                            \
 367   InterfaceSupport::_number_of_calls++;                            \
 368   if (CountRuntimeCalls) {                                         \
 369     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
 370     if (e != NULL) e->increment_count();                           \
 371   }
 372 #else
 373 #define TRACE_CALL(result_type, header)                            \
 374   /* do nothing */
 375 #endif
 376 
 377 
 378 // LEAF routines do not lock, GC or throw exceptions
 379 
 380 #define VM_LEAF_BASE(result_type, header)                            \
 381   TRACE_CALL(result_type, header)                                    \
 382   debug_only(NoHandleMark __hm;)                                     \
 383   os::verify_stack_alignment();                                      \
 384   /* begin of body */
 385 
 386 #define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)         \
 387   TRACE_CALL(result_type, header)                                    \
 388   debug_only(ResetNoHandleMark __rnhm;)                              \
 389   HandleMarkCleaner __hm(thread);                                    \
 390   Thread* THREAD = thread;                                           \
 391   os::verify_stack_alignment();                                      \
 392   /* begin of body */
 393 
 394 
 395 // ENTRY routines may lock, GC and throw exceptions
 396 
 397 #define VM_ENTRY_BASE(result_type, header, thread)                   \
 398   TRACE_CALL(result_type, header)                                    \
 399   HandleMarkCleaner __hm(thread);                                    \
 400   Thread* THREAD = thread;                                           \
 401   os::verify_stack_alignment();                                      \
 402   /* begin of body */
 403 
 404 
 405 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
 406 
 407 #define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
 408   TRACE_CALL(result_type, header)                                    \
 409   debug_only(NoHandleMark __hm;)                                     \
 410   Thread* THREAD = thread;                                           \
 411   os::verify_stack_alignment();                                      \
 412   /* begin of body */
 413 
 414 
 415 // Definitions for IRT (Interpreter Runtime)
 416 // (thread is an argument passed in to all these routines)
 417 
 418 #define IRT_ENTRY(result_type, header)                               \
 419   result_type header {                                               \
 420     ThreadInVMfromJava __tiv(thread);                                \
 421     VM_ENTRY_BASE(result_type, header, thread)                       \
 422     debug_only(VMEntryWrapper __vew;)
 423 
 424 
 425 #define IRT_LEAF(result_type, header)                                \
 426   result_type header {                                               \
 427     VM_LEAF_BASE(result_type, header)                                \
 428     debug_only(NoSafepointVerifier __nspv(true);)
 429 
 430 
 431 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
 432   result_type header {                                               \
 433     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 434     VM_ENTRY_BASE(result_type, header, thread)                       \
 435     debug_only(VMEntryWrapper __vew;)
 436 
 437 #define IRT_END }
 438 
 439 
 440 // Definitions for JRT (Java (Compiler/Shared) Runtime)
 441 
 442 #define JRT_ENTRY(result_type, header)                               \
 443   result_type header {                                               \
 444     ThreadInVMfromJava __tiv(thread);                                \
 445     VM_ENTRY_BASE(result_type, header, thread)                       \
 446     debug_only(VMEntryWrapper __vew;)
 447 
 448 
 449 #define JRT_LEAF(result_type, header)                                \
 450   result_type header {                                               \
 451   VM_LEAF_BASE(result_type, header)                                  \
 452   debug_only(JRTLeafVerifier __jlv;)
 453 
 454 
 455 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
 456   result_type header {                                               \
 457     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 458     VM_ENTRY_BASE(result_type, header, thread)                       \
 459     debug_only(VMEntryWrapper __vew;)
 460 
 461 // Same as JRT Entry but allows for return value after the safepoint
 462 // to get back into Java from the VM
 463 #define JRT_BLOCK_ENTRY(result_type, header)                         \
 464   result_type header {                                               \
 465     TRACE_CALL(result_type, header)                                  \
 466     HandleMarkCleaner __hm(thread);
 467 
 468 #define JRT_BLOCK                                                    \
 469     {                                                                \
 470     ThreadInVMfromJava __tiv(thread);                                \
 471     Thread* THREAD = thread;                                         \
 472     debug_only(VMEntryWrapper __vew;)
 473 
 474 #define JRT_BLOCK_NO_ASYNC                                           \
 475     {                                                                \
 476     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 477     Thread* THREAD = thread;                                         \
 478     debug_only(VMEntryWrapper __vew;)
 479 
 480 #define JRT_BLOCK_END }
 481 
 482 #define JRT_END }
 483 
 484 // Definitions for JNI
 485 
 486 #define JNI_ENTRY(result_type, header)                               \
 487     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
 488     WeakPreserveExceptionMark __wem(thread);
 489 
 490 #define JNI_ENTRY_NO_PRESERVE(result_type, header)                   \
 491 extern "C" {                                                         \
 492   result_type JNICALL header {                                       \
 493     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 494     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 495     ThreadInVMfromNative __tiv(thread);                              \
 496     debug_only(VMNativeEntryWrapper __vew;)                          \
 497     VM_ENTRY_BASE(result_type, header, thread)
 498 
 499 
 500 // Ensure that the VMNativeEntryWrapper constructor, which can cause
 501 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
 502 #define JNI_QUICK_ENTRY(result_type, header)                         \
 503 extern "C" {                                                         \
 504   result_type JNICALL header {                                       \
 505     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 506     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 507     ThreadInVMfromNative __tiv(thread);                              \
 508     debug_only(VMNativeEntryWrapper __vew;)                          \
 509     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 510 
 511 
 512 #define JNI_LEAF(result_type, header)                                \
 513 extern "C" {                                                         \
 514   result_type JNICALL header {                                       \
 515     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 516     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 517     VM_LEAF_BASE(result_type, header)
 518 
 519 
 520 // Close the routine and the extern "C"
 521 #define JNI_END } }
 522 
 523 
 524 
 525 // Definitions for JVM
 526 
 527 #define JVM_ENTRY(result_type, header)                               \
 528 extern "C" {                                                         \
 529   result_type JNICALL header {                                       \
 530     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 531     ThreadInVMfromNative __tiv(thread);                              \
 532     debug_only(VMNativeEntryWrapper __vew;)                          \
 533     VM_ENTRY_BASE(result_type, header, thread)
 534 
 535 
 536 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 537 extern "C" {                                                         \
 538   result_type JNICALL header {                                       \
 539     JavaThread* thread = JavaThread::current();                      \
 540     ThreadInVMfromNative __tiv(thread);                              \
 541     debug_only(VMNativeEntryWrapper __vew;)                          \
 542     VM_ENTRY_BASE(result_type, header, thread)
 543 
 544 
 545 #define JVM_QUICK_ENTRY(result_type, header)                         \
 546 extern "C" {                                                         \
 547   result_type JNICALL header {                                       \
 548     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 549     ThreadInVMfromNative __tiv(thread);                              \
 550     debug_only(VMNativeEntryWrapper __vew;)                          \
 551     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 552 
 553 
 554 #define JVM_LEAF(result_type, header)                                \
 555 extern "C" {                                                         \
 556   result_type JNICALL header {                                       \
 557     VM_Exit::block_if_vm_exited();                                   \
 558     VM_LEAF_BASE(result_type, header)
 559 
 560 
 561 #define JVM_ENTRY_FROM_LEAF(env, result_type, header)                \
 562   { {                                                                \
 563     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 564     ThreadInVMfromNative __tiv(thread);                              \
 565     debug_only(VMNativeEntryWrapper __vew;)                          \
 566     VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
 567 
 568 
 569 #define JVM_END } }
 570 
 571 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP