1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  27 
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "runtime/handles.inline.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/orderAccess.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepointMechanism.inline.hpp"
  34 #include "runtime/thread.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 #include "utilities/macros.hpp"
  37 #include "utilities/preserveException.hpp"
  38 
  39 // Wrapper for all entry points to the virtual machine.
  40 
  41 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
  42 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
  43 // the VM and perform checks upon leave of the VM.
  44 
  45 
  46 class InterfaceSupport: AllStatic {
  47 # ifdef ASSERT
  48  public:
  49   static long _scavenge_alot_counter;
  50   static long _fullgc_alot_counter;
  51   static long _number_of_calls;
  52   static long _fullgc_alot_invocation;
  53 
  54   // Helper methods used to implement +ScavengeALot and +FullGCALot
  55   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
  56   static void gc_alot();
  57 
  58   static void walk_stack_from(vframe* start_vf);
  59   static void walk_stack();
  60 
  61   static void zombieAll();
  62   static void unlinkSymbols();
  63   static void deoptimizeAll();
  64   static void stress_derived_pointers();
  65   static void verify_stack();
  66   static void verify_last_frame();
  67 # endif
  68 
  69  public:
  70   static void serialize_thread_state_with_handler(JavaThread* thread) {
  71     serialize_thread_state_internal(thread, true);
  72   }
  73 
  74   // Should only call this if we know that we have a proper SEH set up.
  75   static void serialize_thread_state(JavaThread* thread) {
  76     serialize_thread_state_internal(thread, false);
  77   }
  78 
  79  private:
  80   static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) {
  81     // Make sure new state is seen by VM thread
  82     if (os::is_MP()) {
  83       if (UseMembar) {
  84         // Force a fence between the write above and read below
  85         OrderAccess::fence();
  86       } else {
  87         // store to serialize page so VM thread can do pseudo remote membar
  88         if (needs_exception_handler) {
  89           os::write_memory_serialize_page_with_handler(thread);
  90         } else {
  91           os::write_memory_serialize_page(thread);
  92         }
  93       }
  94     }
  95   }
  96 };
  97 
  98 
  99 // Basic class for all thread transition classes.
 100 
 101 class ThreadStateTransition : public StackObj {
 102  protected:
 103   JavaThread* _thread;
 104  public:
 105   ThreadStateTransition(JavaThread *thread) {
 106     _thread = thread;
 107     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
 108   }
 109 
 110   // Change threadstate in a manner, so safepoint can detect changes.
 111   // Time-critical: called on exit from every runtime routine
 112   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 113     assert(from != _thread_in_Java, "use transition_from_java");
 114     assert(from != _thread_in_native, "use transition_from_native");
 115     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 116     assert(thread->thread_state() == from, "coming from wrong thread state");
 117     // Change to transition state
 118     thread->set_thread_state((JavaThreadState)(from + 1));
 119 
 120     InterfaceSupport::serialize_thread_state(thread);
 121 
 122     SafepointMechanism::block_if_requested(thread);
 123     thread->set_thread_state(to);
 124 
 125     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 126   }
 127 
 128   // transition_and_fence must be used on any thread state transition
 129   // where there might not be a Java call stub on the stack, in
 130   // particular on Windows where the Structured Exception Handler is
 131   // set up in the call stub. os::write_memory_serialize_page() can
 132   // fault and we can't recover from it on Windows without a SEH in
 133   // place.
 134   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 135     assert(thread->thread_state() == from, "coming from wrong thread state");
 136     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 137     // Change to transition state
 138     thread->set_thread_state((JavaThreadState)(from + 1));
 139 
 140     InterfaceSupport::serialize_thread_state_with_handler(thread);
 141 
 142     SafepointMechanism::block_if_requested(thread);
 143     thread->set_thread_state(to);
 144 
 145     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 146   }
 147 
 148   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
 149   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
 150   // have not been setup.
 151   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
 152     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
 153     thread->set_thread_state(to);
 154   }
 155 
 156   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
 157     assert((to & 1) == 0, "odd numbers are transitions states");
 158     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
 159     // Change to transition state
 160     thread->set_thread_state(_thread_in_native_trans);
 161 
 162     InterfaceSupport::serialize_thread_state_with_handler(thread);
 163 
 164     // We never install asynchronous exceptions when coming (back) in
 165     // to the runtime from native code because the runtime is not set
 166     // up to handle exceptions floating around at arbitrary points.
 167     if (SafepointMechanism::poll(thread) || thread->is_suspend_after_native()) {
 168       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 169 
 170       // Clear unhandled oops anywhere where we could block, even if we don't.
 171       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 172     }
 173 
 174     thread->set_thread_state(to);
 175   }
 176  protected:
 177    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
 178    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
 179    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
 180    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 181 };
 182 
 183 class ThreadInVMForHandshake : public ThreadStateTransition {
 184   const JavaThreadState _original_state;
 185 
 186   void transition_back() {
 187     // This can be invoked from transition states and must return to the original state properly
 188     assert(_thread->thread_state() == _thread_in_vm, "should only call when leaving VM after handshake");
 189     _thread->set_thread_state(_thread_in_vm_trans);
 190 
 191     InterfaceSupport::serialize_thread_state(_thread);
 192 
 193     SafepointMechanism::block_if_requested(_thread);
 194 
 195     _thread->set_thread_state(_original_state);
 196   }
 197 
 198  public:
 199 
 200   ThreadInVMForHandshake(JavaThread* thread) : ThreadStateTransition(thread),
 201       _original_state(thread->thread_state()) {
 202 
 203     if (thread->has_last_Java_frame()) {
 204       thread->frame_anchor()->make_walkable(thread);
 205     }
 206 
 207     thread->set_thread_state(_thread_in_vm);
 208   }
 209 
 210   ~ThreadInVMForHandshake() {
 211     transition_back();
 212   }
 213 
 214 };
 215 
 216 class ThreadInVMfromJava : public ThreadStateTransition {
 217  public:
 218   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
 219     trans_from_java(_thread_in_vm);
 220   }
 221   ~ThreadInVMfromJava()  {
 222     if (_thread->stack_yellow_reserved_zone_disabled()) {
 223       _thread->enable_stack_yellow_reserved_zone();
 224     }
 225     trans(_thread_in_vm, _thread_in_Java);
 226     // Check for pending. async. exceptions or suspends.
 227     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
 228   }
 229 };
 230 
 231 
 232 class ThreadInVMfromUnknown {
 233  private:
 234   JavaThread* _thread;
 235  public:
 236   ThreadInVMfromUnknown() : _thread(NULL) {
 237     Thread* t = Thread::current();
 238     if (t->is_Java_thread()) {
 239       JavaThread* t2 = (JavaThread*) t;
 240       if (t2->thread_state() == _thread_in_native) {
 241         _thread = t2;
 242         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
 243         // Used to have a HandleMarkCleaner but that is dangerous as
 244         // it could free a handle in our (indirect, nested) caller.
 245         // We expect any handles will be short lived and figure we
 246         // don't need an actual HandleMark.
 247       }
 248     }
 249   }
 250   ~ThreadInVMfromUnknown()  {
 251     if (_thread) {
 252       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
 253     }
 254   }
 255 };
 256 
 257 
 258 class ThreadInVMfromNative : public ThreadStateTransition {
 259  public:
 260   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
 261     trans_from_native(_thread_in_vm);
 262   }
 263   ~ThreadInVMfromNative() {
 264     trans_and_fence(_thread_in_vm, _thread_in_native);
 265   }
 266 };
 267 
 268 
 269 class ThreadToNativeFromVM : public ThreadStateTransition {
 270  public:
 271   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
 272     // We are leaving the VM at this point and going directly to native code.
 273     // Block, if we are in the middle of a safepoint synchronization.
 274     assert(!thread->owns_locks(), "must release all locks when leaving VM");
 275     thread->frame_anchor()->make_walkable(thread);
 276     trans_and_fence(_thread_in_vm, _thread_in_native);
 277     // Check for pending. async. exceptions or suspends.
 278     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
 279   }
 280 
 281   ~ThreadToNativeFromVM() {
 282     trans_from_native(_thread_in_vm);
 283     assert(!_thread->is_pending_jni_exception_check(), "Pending JNI Exception Check");
 284     // We don't need to clear_walkable because it will happen automagically when we return to java
 285   }
 286 };
 287 
 288 
 289 class ThreadBlockInVM : public ThreadStateTransition {
 290  public:
 291   ThreadBlockInVM(JavaThread *thread)
 292   : ThreadStateTransition(thread) {
 293     // Once we are blocked vm expects stack to be walkable
 294     thread->frame_anchor()->make_walkable(thread);
 295     trans_and_fence(_thread_in_vm, _thread_blocked);
 296   }
 297   ~ThreadBlockInVM() {
 298     trans_and_fence(_thread_blocked, _thread_in_vm);
 299     // We don't need to clear_walkable because it will happen automagically when we return to java
 300   }
 301 };
 302 
 303 
 304 // This special transition class is only used to prevent asynchronous exceptions
 305 // from being installed on vm exit in situations where we can't tolerate them.
 306 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
 307 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
 308  public:
 309   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
 310     trans_from_java(_thread_in_vm);
 311   }
 312   ~ThreadInVMfromJavaNoAsyncException()  {
 313     if (_thread->stack_yellow_reserved_zone_disabled()) {
 314       _thread->enable_stack_yellow_reserved_zone();
 315     }
 316     trans(_thread_in_vm, _thread_in_Java);
 317     // NOTE: We do not check for pending. async. exceptions.
 318     // If we did and moved the pending async exception over into the
 319     // pending exception field, we would need to deopt (currently C2
 320     // only). However, to do so would require that we transition back
 321     // to the _thread_in_vm state. Instead we postpone the handling of
 322     // the async exception.
 323 
 324 
 325     // Check for pending. suspends only.
 326     if (_thread->has_special_runtime_exit_condition())
 327       _thread->handle_special_runtime_exit_condition(false);
 328   }
 329 };
 330 
 331 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
 332 // Can be used to verify properties on enter/exit of the VM.
 333 
 334 #ifdef ASSERT
 335 class VMEntryWrapper {
 336  public:
 337   VMEntryWrapper();
 338   ~VMEntryWrapper();
 339 };
 340 
 341 
 342 class VMNativeEntryWrapper {
 343  public:
 344   VMNativeEntryWrapper() {
 345     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 346   }
 347 
 348   ~VMNativeEntryWrapper() {
 349     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 350   }
 351 };
 352 
 353 #endif
 354 
 355 
 356 // VM-internal runtime interface support
 357 
 358 #ifdef ASSERT
 359 
 360 class RuntimeHistogramElement : public HistogramElement {
 361   public:
 362    RuntimeHistogramElement(const char* name);
 363 };
 364 
 365 #define TRACE_CALL(result_type, header)                            \
 366   InterfaceSupport::_number_of_calls++;                            \
 367   if (CountRuntimeCalls) {                                         \
 368     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
 369     if (e != NULL) e->increment_count();                           \
 370   }
 371 #else
 372 #define TRACE_CALL(result_type, header)                            \
 373   /* do nothing */
 374 #endif
 375 
 376 
 377 // LEAF routines do not lock, GC or throw exceptions
 378 
 379 #define VM_LEAF_BASE(result_type, header)                            \
 380   TRACE_CALL(result_type, header)                                    \
 381   debug_only(NoHandleMark __hm;)                                     \
 382   os::verify_stack_alignment();                                      \
 383   /* begin of body */
 384 
 385 #define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)         \
 386   TRACE_CALL(result_type, header)                                    \
 387   debug_only(ResetNoHandleMark __rnhm;)                              \
 388   HandleMarkCleaner __hm(thread);                                    \
 389   Thread* THREAD = thread;                                           \
 390   os::verify_stack_alignment();                                      \
 391   /* begin of body */
 392 
 393 
 394 // ENTRY routines may lock, GC and throw exceptions
 395 
 396 #define VM_ENTRY_BASE(result_type, header, thread)                   \
 397   TRACE_CALL(result_type, header)                                    \
 398   HandleMarkCleaner __hm(thread);                                    \
 399   Thread* THREAD = thread;                                           \
 400   os::verify_stack_alignment();                                      \
 401   /* begin of body */
 402 
 403 
 404 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
 405 
 406 #define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
 407   TRACE_CALL(result_type, header)                                    \
 408   debug_only(NoHandleMark __hm;)                                     \
 409   Thread* THREAD = thread;                                           \
 410   os::verify_stack_alignment();                                      \
 411   /* begin of body */
 412 
 413 
 414 // Definitions for IRT (Interpreter Runtime)
 415 // (thread is an argument passed in to all these routines)
 416 
 417 #define IRT_ENTRY(result_type, header)                               \
 418   result_type header {                                               \
 419     ThreadInVMfromJava __tiv(thread);                                \
 420     VM_ENTRY_BASE(result_type, header, thread)                       \
 421     debug_only(VMEntryWrapper __vew;)
 422 
 423 
 424 #define IRT_LEAF(result_type, header)                                \
 425   result_type header {                                               \
 426     VM_LEAF_BASE(result_type, header)                                \
 427     debug_only(NoSafepointVerifier __nspv(true);)
 428 
 429 
 430 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
 431   result_type header {                                               \
 432     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 433     VM_ENTRY_BASE(result_type, header, thread)                       \
 434     debug_only(VMEntryWrapper __vew;)
 435 
 436 #define IRT_END }
 437 
 438 
 439 // Definitions for JRT (Java (Compiler/Shared) Runtime)
 440 
 441 #define JRT_ENTRY(result_type, header)                               \
 442   result_type header {                                               \
 443     ThreadInVMfromJava __tiv(thread);                                \
 444     VM_ENTRY_BASE(result_type, header, thread)                       \
 445     debug_only(VMEntryWrapper __vew;)
 446 
 447 
 448 #define JRT_LEAF(result_type, header)                                \
 449   result_type header {                                               \
 450   VM_LEAF_BASE(result_type, header)                                  \
 451   debug_only(JRTLeafVerifier __jlv;)
 452 
 453 
 454 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
 455   result_type header {                                               \
 456     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 457     VM_ENTRY_BASE(result_type, header, thread)                       \
 458     debug_only(VMEntryWrapper __vew;)
 459 
 460 // Same as JRT Entry but allows for return value after the safepoint
 461 // to get back into Java from the VM
 462 #define JRT_BLOCK_ENTRY(result_type, header)                         \
 463   result_type header {                                               \
 464     TRACE_CALL(result_type, header)                                  \
 465     HandleMarkCleaner __hm(thread);
 466 
 467 #define JRT_BLOCK                                                    \
 468     {                                                                \
 469     ThreadInVMfromJava __tiv(thread);                                \
 470     Thread* THREAD = thread;                                         \
 471     debug_only(VMEntryWrapper __vew;)
 472 
 473 #define JRT_BLOCK_NO_ASYNC                                           \
 474     {                                                                \
 475     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 476     Thread* THREAD = thread;                                         \
 477     debug_only(VMEntryWrapper __vew;)
 478 
 479 #define JRT_BLOCK_END }
 480 
 481 #define JRT_END }
 482 
 483 // Definitions for JNI
 484 
 485 #define JNI_ENTRY(result_type, header)                               \
 486     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
 487     WeakPreserveExceptionMark __wem(thread);
 488 
 489 #define JNI_ENTRY_NO_PRESERVE(result_type, header)                   \
 490 extern "C" {                                                         \
 491   result_type JNICALL header {                                       \
 492     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 493     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 494     ThreadInVMfromNative __tiv(thread);                              \
 495     debug_only(VMNativeEntryWrapper __vew;)                          \
 496     VM_ENTRY_BASE(result_type, header, thread)
 497 
 498 
 499 // Ensure that the VMNativeEntryWrapper constructor, which can cause
 500 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
 501 #define JNI_QUICK_ENTRY(result_type, header)                         \
 502 extern "C" {                                                         \
 503   result_type JNICALL header {                                       \
 504     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 505     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 506     ThreadInVMfromNative __tiv(thread);                              \
 507     debug_only(VMNativeEntryWrapper __vew;)                          \
 508     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 509 
 510 
 511 #define JNI_LEAF(result_type, header)                                \
 512 extern "C" {                                                         \
 513   result_type JNICALL header {                                       \
 514     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 515     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 516     VM_LEAF_BASE(result_type, header)
 517 
 518 
 519 // Close the routine and the extern "C"
 520 #define JNI_END } }
 521 
 522 
 523 
 524 // Definitions for JVM
 525 
 526 #define JVM_ENTRY(result_type, header)                               \
 527 extern "C" {                                                         \
 528   result_type JNICALL header {                                       \
 529     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 530     ThreadInVMfromNative __tiv(thread);                              \
 531     debug_only(VMNativeEntryWrapper __vew;)                          \
 532     VM_ENTRY_BASE(result_type, header, thread)
 533 
 534 
 535 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 536 extern "C" {                                                         \
 537   result_type JNICALL header {                                       \
 538     JavaThread* thread = JavaThread::current();                      \
 539     ThreadInVMfromNative __tiv(thread);                              \
 540     debug_only(VMNativeEntryWrapper __vew;)                          \
 541     VM_ENTRY_BASE(result_type, header, thread)
 542 
 543 
 544 #define JVM_QUICK_ENTRY(result_type, header)                         \
 545 extern "C" {                                                         \
 546   result_type JNICALL header {                                       \
 547     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 548     ThreadInVMfromNative __tiv(thread);                              \
 549     debug_only(VMNativeEntryWrapper __vew;)                          \
 550     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 551 
 552 
 553 #define JVM_LEAF(result_type, header)                                \
 554 extern "C" {                                                         \
 555   result_type JNICALL header {                                       \
 556     VM_Exit::block_if_vm_exited();                                   \
 557     VM_LEAF_BASE(result_type, header)
 558 
 559 
 560 #define JVM_ENTRY_FROM_LEAF(env, result_type, header)                \
 561   { {                                                                \
 562     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 563     ThreadInVMfromNative __tiv(thread);                              \
 564     debug_only(VMNativeEntryWrapper __vew;)                          \
 565     VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
 566 
 567 
 568 #define JVM_END } }
 569 
 570 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP