1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  26 #define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
  27 
  28 #include "gc/shared/gcLocker.hpp"
  29 #include "runtime/handles.inline.hpp"
  30 #include "runtime/mutexLocker.hpp"
  31 #include "runtime/orderAccess.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepoint.hpp"
  34 #include "runtime/thread.inline.hpp"
  35 #include "runtime/vmThread.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/preserveException.hpp"
  38 #include "utilities/top.hpp"
  39 
  40 // Wrapper for all entry points to the virtual machine.
  41 // The HandleMarkCleaner is a faster version of HandleMark.
  42 // It relies on the fact that there is a HandleMark further
  43 // down the stack (in JavaCalls::call_helper), and just resets
  44 // to the saved values in that HandleMark.
  45 
  46 class HandleMarkCleaner: public StackObj {
  47  private:
  48   Thread* _thread;
  49  public:
  50   HandleMarkCleaner(Thread* thread) {
  51     _thread = thread;
  52     _thread->last_handle_mark()->push();
  53   }
  54   ~HandleMarkCleaner() {
  55     _thread->last_handle_mark()->pop_and_restore();
  56   }
  57 
  58  private:
  59   inline void* operator new(size_t size, void* ptr) throw() {
  60     return ptr;
  61   }
  62 };
  63 
  64 // InterfaceSupport provides functionality used by the VM_LEAF_BASE and
  65 // VM_ENTRY_BASE macros. These macros are used to guard entry points into
  66 // the VM and perform checks upon leave of the VM.
  67 
  68 
  69 class InterfaceSupport: AllStatic {
  70 # ifdef ASSERT
  71  public:
  72   static long _scavenge_alot_counter;
  73   static long _fullgc_alot_counter;
  74   static long _number_of_calls;
  75   static long _fullgc_alot_invocation;
  76 
  77   // Helper methods used to implement +ScavengeALot and +FullGCALot
  78   static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
  79   static void gc_alot();
  80 
  81   static void walk_stack_from(vframe* start_vf);
  82   static void walk_stack();
  83 
  84   static void zombieAll();
  85   static void unlinkSymbols();
  86   static void deoptimizeAll();
  87   static void stress_derived_pointers();
  88   static void verify_stack();
  89   static void verify_last_frame();
  90 # endif
  91 
  92  public:
  93   // OS dependent stuff
  94 #ifdef TARGET_OS_FAMILY_linux
  95 # include "interfaceSupport_linux.hpp"
  96 #endif
  97 #ifdef TARGET_OS_FAMILY_solaris
  98 # include "interfaceSupport_solaris.hpp"
  99 #endif
 100 #ifdef TARGET_OS_FAMILY_windows
 101 # include "interfaceSupport_windows.hpp"
 102 #endif
 103 #ifdef TARGET_OS_FAMILY_aix
 104 # include "interfaceSupport_aix.hpp"
 105 #endif
 106 #ifdef TARGET_OS_FAMILY_bsd
 107 # include "interfaceSupport_bsd.hpp"
 108 #endif
 109 
 110 };
 111 
 112 
 113 // Basic class for all thread transition classes.
 114 
 115 class ThreadStateTransition : public StackObj {
 116  protected:
 117   JavaThread* _thread;
 118  public:
 119   ThreadStateTransition(JavaThread *thread) {
 120     _thread = thread;
 121     assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
 122   }
 123 
 124   // Change threadstate in a manner, so safepoint can detect changes.
 125   // Time-critical: called on exit from every runtime routine
 126   static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 127     assert(from != _thread_in_Java, "use transition_from_java");
 128     assert(from != _thread_in_native, "use transition_from_native");
 129     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 130     assert(thread->thread_state() == from, "coming from wrong thread state");
 131     // Change to transition state (assumes total store ordering!  -Urs)
 132     thread->set_thread_state((JavaThreadState)(from + 1));
 133 
 134     // Make sure new state is seen by VM thread
 135     if (os::is_MP()) {
 136       if (UseMembar) {
 137         // Force a fence between the write above and read below
 138         OrderAccess::fence();
 139       } else {
 140         // store to serialize page so VM thread can do pseudo remote membar
 141         os::write_memory_serialize_page(thread);
 142       }
 143     }
 144 
 145     if (SafepointSynchronize::do_call_back()) {
 146       SafepointSynchronize::block(thread);
 147     }
 148     thread->set_thread_state(to);
 149 
 150     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 151   }
 152 
 153   // transition_and_fence must be used on any thread state transition
 154   // where there might not be a Java call stub on the stack, in
 155   // particular on Windows where the Structured Exception Handler is
 156   // set up in the call stub. os::write_memory_serialize_page() can
 157   // fault and we can't recover from it on Windows without a SEH in
 158   // place.
 159   static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
 160     assert(thread->thread_state() == from, "coming from wrong thread state");
 161     assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
 162     // Change to transition state (assumes total store ordering!  -Urs)
 163     thread->set_thread_state((JavaThreadState)(from + 1));
 164 
 165     // Make sure new state is seen by VM thread
 166     if (os::is_MP()) {
 167       if (UseMembar) {
 168         // Force a fence between the write above and read below
 169         OrderAccess::fence();
 170       } else {
 171         // Must use this rather than serialization page in particular on Windows
 172         InterfaceSupport::serialize_memory(thread);
 173       }
 174     }
 175 
 176     if (SafepointSynchronize::do_call_back()) {
 177       SafepointSynchronize::block(thread);
 178     }
 179     thread->set_thread_state(to);
 180 
 181     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 182   }
 183 
 184   // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
 185   // never block on entry to the VM. This will break the code, since e.g. preserve arguments
 186   // have not been setup.
 187   static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
 188     assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
 189     thread->set_thread_state(to);
 190   }
 191 
 192   static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
 193     assert((to & 1) == 0, "odd numbers are transitions states");
 194     assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
 195     // Change to transition state (assumes total store ordering!  -Urs)
 196     thread->set_thread_state(_thread_in_native_trans);
 197 
 198     // Make sure new state is seen by GC thread
 199     if (os::is_MP()) {
 200       if (UseMembar) {
 201         // Force a fence between the write above and read below
 202         OrderAccess::fence();
 203       } else {
 204         // Must use this rather than serialization page in particular on Windows
 205         InterfaceSupport::serialize_memory(thread);
 206       }
 207     }
 208 
 209     // We never install asynchronous exceptions when coming (back) in
 210     // to the runtime from native code because the runtime is not set
 211     // up to handle exceptions floating around at arbitrary points.
 212     if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
 213       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 214 
 215       // Clear unhandled oops anywhere where we could block, even if we don't.
 216       CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
 217     }
 218 
 219     thread->set_thread_state(to);
 220   }
 221  protected:
 222    void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
 223    void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
 224    void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
 225    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 226 };
 227 
 228 
 229 class ThreadInVMfromJava : public ThreadStateTransition {
 230  public:
 231   ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
 232     trans_from_java(_thread_in_vm);
 233   }
 234   ~ThreadInVMfromJava()  {
 235     trans(_thread_in_vm, _thread_in_Java);
 236     // Check for pending. async. exceptions or suspends.
 237     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
 238   }
 239 };
 240 
 241 
 242 class ThreadInVMfromUnknown {
 243  private:
 244   JavaThread* _thread;
 245  public:
 246   ThreadInVMfromUnknown() : _thread(NULL) {
 247     Thread* t = Thread::current();
 248     if (t->is_Java_thread()) {
 249       JavaThread* t2 = (JavaThread*) t;
 250       if (t2->thread_state() == _thread_in_native) {
 251         _thread = t2;
 252         ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
 253         // Used to have a HandleMarkCleaner but that is dangerous as
 254         // it could free a handle in our (indirect, nested) caller.
 255         // We expect any handles will be short lived and figure we
 256         // don't need an actual HandleMark.
 257       }
 258     }
 259   }
 260   ~ThreadInVMfromUnknown()  {
 261     if (_thread) {
 262       ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
 263     }
 264   }
 265 };
 266 
 267 
 268 class ThreadInVMfromNative : public ThreadStateTransition {
 269  public:
 270   ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
 271     trans_from_native(_thread_in_vm);
 272   }
 273   ~ThreadInVMfromNative() {
 274     trans_and_fence(_thread_in_vm, _thread_in_native);
 275   }
 276 };
 277 
 278 
 279 class ThreadToNativeFromVM : public ThreadStateTransition {
 280  public:
 281   ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
 282     // We are leaving the VM at this point and going directly to native code.
 283     // Block, if we are in the middle of a safepoint synchronization.
 284     assert(!thread->owns_locks(), "must release all locks when leaving VM");
 285     thread->frame_anchor()->make_walkable(thread);
 286     trans_and_fence(_thread_in_vm, _thread_in_native);
 287     // Check for pending. async. exceptions or suspends.
 288     if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
 289   }
 290 
 291   ~ThreadToNativeFromVM() {
 292     trans_from_native(_thread_in_vm);
 293     // We don't need to clear_walkable because it will happen automagically when we return to java
 294   }
 295 };
 296 
 297 
 298 class ThreadBlockInVM : public ThreadStateTransition {
 299  public:
 300   ThreadBlockInVM(JavaThread *thread)
 301   : ThreadStateTransition(thread) {
 302     // Once we are blocked vm expects stack to be walkable
 303     thread->frame_anchor()->make_walkable(thread);
 304     trans_and_fence(_thread_in_vm, _thread_blocked);
 305   }
 306   ~ThreadBlockInVM() {
 307     trans_and_fence(_thread_blocked, _thread_in_vm);
 308     // We don't need to clear_walkable because it will happen automagically when we return to java
 309   }
 310 };
 311 
 312 
 313 // This special transition class is only used to prevent asynchronous exceptions
 314 // from being installed on vm exit in situations where we can't tolerate them.
 315 // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
 316 class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
 317  public:
 318   ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
 319     trans_from_java(_thread_in_vm);
 320   }
 321   ~ThreadInVMfromJavaNoAsyncException()  {
 322     trans(_thread_in_vm, _thread_in_Java);
 323     // NOTE: We do not check for pending. async. exceptions.
 324     // If we did and moved the pending async exception over into the
 325     // pending exception field, we would need to deopt (currently C2
 326     // only). However, to do so would require that we transition back
 327     // to the _thread_in_vm state. Instead we postpone the handling of
 328     // the async exception.
 329 
 330     // Check for pending. suspends only.
 331     if (_thread->has_special_runtime_exit_condition())
 332       _thread->handle_special_runtime_exit_condition(false);
 333   }
 334 };
 335 
 336 // Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
 337 // Can be used to verify properties on enter/exit of the VM.
 338 
 339 #ifdef ASSERT
 340 class VMEntryWrapper {
 341  public:
 342   VMEntryWrapper() {
 343     if (VerifyLastFrame) {
 344       InterfaceSupport::verify_last_frame();
 345     }
 346   }
 347 
 348   ~VMEntryWrapper() {
 349     InterfaceSupport::check_gc_alot();
 350     if (WalkStackALot) {
 351       InterfaceSupport::walk_stack();
 352     }
 353 #ifdef COMPILER2
 354     // This option is not used by Compiler 1
 355     if (StressDerivedPointers) {
 356       InterfaceSupport::stress_derived_pointers();
 357     }
 358 #endif
 359     if (DeoptimizeALot || DeoptimizeRandom) {
 360       InterfaceSupport::deoptimizeAll();
 361     }
 362     if (ZombieALot) {
 363       InterfaceSupport::zombieAll();
 364     }
 365     if (UnlinkSymbolsALot) {
 366       InterfaceSupport::unlinkSymbols();
 367     }
 368     // do verification AFTER potential deoptimization
 369     if (VerifyStack) {
 370       InterfaceSupport::verify_stack();
 371     }
 372 
 373   }
 374 };
 375 
 376 
 377 class VMNativeEntryWrapper {
 378  public:
 379   VMNativeEntryWrapper() {
 380     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 381   }
 382 
 383   ~VMNativeEntryWrapper() {
 384     if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
 385   }
 386 };
 387 
 388 #endif
 389 
 390 
 391 // VM-internal runtime interface support
 392 
 393 #ifdef ASSERT
 394 
 395 class RuntimeHistogramElement : public HistogramElement {
 396   public:
 397    RuntimeHistogramElement(const char* name);
 398 };
 399 
 400 #define TRACE_CALL(result_type, header)                            \
 401   InterfaceSupport::_number_of_calls++;                            \
 402   if (CountRuntimeCalls) {                                         \
 403     static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
 404     if (e != NULL) e->increment_count();                           \
 405   }
 406 #else
 407 #define TRACE_CALL(result_type, header)                            \
 408   /* do nothing */
 409 #endif
 410 
 411 
 412 // LEAF routines do not lock, GC or throw exceptions
 413 
 414 #define VM_LEAF_BASE(result_type, header)                            \
 415   TRACE_CALL(result_type, header)                                    \
 416   debug_only(NoHandleMark __hm;)                                     \
 417   os::verify_stack_alignment();                                      \
 418   /* begin of body */
 419 
 420 
 421 // ENTRY routines may lock, GC and throw exceptions
 422 
 423 #define VM_ENTRY_BASE(result_type, header, thread)                   \
 424   TRACE_CALL(result_type, header)                                    \
 425   HandleMarkCleaner __hm(thread);                                    \
 426   Thread* THREAD = thread;                                           \
 427   os::verify_stack_alignment();                                      \
 428   /* begin of body */
 429 
 430 
 431 // QUICK_ENTRY routines behave like ENTRY but without a handle mark
 432 
 433 #define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
 434   TRACE_CALL(result_type, header)                                    \
 435   debug_only(NoHandleMark __hm;)                                     \
 436   Thread* THREAD = thread;                                           \
 437   os::verify_stack_alignment();                                      \
 438   /* begin of body */
 439 
 440 
 441 // Definitions for IRT (Interpreter Runtime)
 442 // (thread is an argument passed in to all these routines)
 443 
 444 #define IRT_ENTRY(result_type, header)                               \
 445   result_type header {                                               \
 446     ThreadInVMfromJava __tiv(thread);                                \
 447     VM_ENTRY_BASE(result_type, header, thread)                       \
 448     debug_only(VMEntryWrapper __vew;)
 449 
 450 
 451 #define IRT_LEAF(result_type, header)                                \
 452   result_type header {                                               \
 453     VM_LEAF_BASE(result_type, header)                                \
 454     debug_only(No_Safepoint_Verifier __nspv(true);)
 455 
 456 
 457 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
 458   result_type header {                                               \
 459     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 460     VM_ENTRY_BASE(result_type, header, thread)                       \
 461     debug_only(VMEntryWrapper __vew;)
 462 
 463 #define IRT_END }
 464 
 465 
 466 // Definitions for JRT (Java (Compiler/Shared) Runtime)
 467 
 468 #define JRT_ENTRY(result_type, header)                               \
 469   result_type header {                                               \
 470     ThreadInVMfromJava __tiv(thread);                                \
 471     VM_ENTRY_BASE(result_type, header, thread)                       \
 472     debug_only(VMEntryWrapper __vew;)
 473 
 474 
 475 #define JRT_LEAF(result_type, header)                                \
 476   result_type header {                                               \
 477   VM_LEAF_BASE(result_type, header)                                  \
 478   debug_only(JRT_Leaf_Verifier __jlv;)
 479 
 480 
 481 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
 482   result_type header {                                               \
 483     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 484     VM_ENTRY_BASE(result_type, header, thread)                       \
 485     debug_only(VMEntryWrapper __vew;)
 486 
 487 // Same as JRT Entry but allows for return value after the safepoint
 488 // to get back into Java from the VM
 489 #define JRT_BLOCK_ENTRY(result_type, header)                         \
 490   result_type header {                                               \
 491     TRACE_CALL(result_type, header)                                  \
 492     HandleMarkCleaner __hm(thread);
 493 
 494 #define JRT_BLOCK                                                    \
 495     {                                                                \
 496     ThreadInVMfromJava __tiv(thread);                                \
 497     Thread* THREAD = thread;                                         \
 498     debug_only(VMEntryWrapper __vew;)
 499 
 500 #define JRT_BLOCK_NO_ASYNC                                           \
 501     {                                                                \
 502     ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
 503     Thread* THREAD = thread;                                         \
 504     debug_only(VMEntryWrapper __vew;)
 505 
 506 #define JRT_BLOCK_END }
 507 
 508 #define JRT_END }
 509 
 510 // Definitions for JNI
 511 
 512 #define JNI_ENTRY(result_type, header)                               \
 513     JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
 514     WeakPreserveExceptionMark __wem(thread);
 515 
 516 #define JNI_ENTRY_NO_PRESERVE(result_type, header)             \
 517 extern "C" {                                                         \
 518   result_type JNICALL header {                                \
 519     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 520     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 521     ThreadInVMfromNative __tiv(thread);                              \
 522     debug_only(VMNativeEntryWrapper __vew;)                          \
 523     VM_ENTRY_BASE(result_type, header, thread)
 524 
 525 
 526 // Ensure that the VMNativeEntryWrapper constructor, which can cause
 527 // a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
 528 #define JNI_QUICK_ENTRY(result_type, header)                         \
 529 extern "C" {                                                         \
 530   result_type JNICALL header {                                \
 531     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 532     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 533     ThreadInVMfromNative __tiv(thread);                              \
 534     debug_only(VMNativeEntryWrapper __vew;)                          \
 535     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 536 
 537 
 538 #define JNI_LEAF(result_type, header)                                \
 539 extern "C" {                                                         \
 540   result_type JNICALL header {                                \
 541     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 542     assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
 543     VM_LEAF_BASE(result_type, header)
 544 
 545 
 546 // Close the routine and the extern "C"
 547 #define JNI_END } }
 548 
 549 
 550 
 551 // Definitions for JVM
 552 
 553 #define JVM_ENTRY(result_type, header)                               \
 554 extern "C" {                                                         \
 555   result_type JNICALL header {                                       \
 556     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 557     ThreadInVMfromNative __tiv(thread);                              \
 558     debug_only(VMNativeEntryWrapper __vew;)                          \
 559     VM_ENTRY_BASE(result_type, header, thread)
 560 
 561 
 562 #define JVM_ENTRY_NO_ENV(result_type, header)                        \
 563 extern "C" {                                                         \
 564   result_type JNICALL header {                                       \
 565     JavaThread* thread = JavaThread::current();                      \
 566     ThreadInVMfromNative __tiv(thread);                              \
 567     debug_only(VMNativeEntryWrapper __vew;)                          \
 568     VM_ENTRY_BASE(result_type, header, thread)
 569 
 570 
 571 #define JVM_QUICK_ENTRY(result_type, header)                         \
 572 extern "C" {                                                         \
 573   result_type JNICALL header {                                       \
 574     JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
 575     ThreadInVMfromNative __tiv(thread);                              \
 576     debug_only(VMNativeEntryWrapper __vew;)                          \
 577     VM_QUICK_ENTRY_BASE(result_type, header, thread)
 578 
 579 
 580 #define JVM_LEAF(result_type, header)                                \
 581 extern "C" {                                                         \
 582   result_type JNICALL header {                                       \
 583     VM_Exit::block_if_vm_exited();                                   \
 584     VM_LEAF_BASE(result_type, header)
 585 
 586 
 587 #define JVM_END } }
 588 
 589 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP