src/share/vm/runtime/thread.hpp

Print this page




 884   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
 885   bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
 886                                                          // never locked) when throwing an exception. Used by interpreter only.
 887 
 888   // JNI attach states:
 889   enum JNIAttachStates {
 890     _not_attaching_via_jni = 1,  // thread is not attaching via JNI
 891     _attaching_via_jni,          // thread is attaching via JNI
 892     _attached_via_jni            // thread has attached via JNI
 893   };
 894 
 895   // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni.
 896   // A native thread that is attaching via JNI starts with a value
 897   // of _attaching_via_jni and transitions to _attached_via_jni.
 898   volatile JNIAttachStates _jni_attach_state;
 899 
 900  public:
 901   // State of the stack guard pages for this thread.
 902   enum StackGuardState {
 903     stack_guard_unused,         // not needed

 904     stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
 905     stack_guard_enabled         // enabled
 906   };
 907 
 908  private:
 909 
 910 #if INCLUDE_JVMCI
 911   // The _pending_* fields below are used to communicate extra information
 912   // from an uncommon trap in JVMCI compiled code to the uncommon trap handler.
 913 
 914   // Communicates the DeoptReason and DeoptAction of the uncommon trap
 915   int       _pending_deoptimization;
 916 
 917   // Specifies whether the uncommon trap is to bci 0 of a synchronized method
 918   // before the monitor has been acquired.
 919   bool      _pending_monitorenter;
 920 
 921   // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter
 922   bool      _pending_transfer_to_interpreter;
 923 


 932     address   _implicit_exception_pc;
 933 
 934     // Communicates an alternative call target to an i2c stub from a JavaCall .
 935     address   _alternate_call_target;
 936   } _jvmci;
 937 
 938   // Support for high precision, thread sensitive counters in JVMCI compiled code.
 939   jlong*    _jvmci_counters;
 940 
 941  public:
 942   static jlong* _jvmci_old_thread_counters;
 943   static void collect_counters(typeArrayOop array);
 944  private:
 945 #endif // INCLUDE_JVMCI
 946 
 947   StackGuardState  _stack_guard_state;
 948 
 949   // Precompute the limit of the stack as used in stack overflow checks.
 950   // We load it from here to simplify the stack overflow check in assembly.
 951   address          _stack_overflow_limit;

 952 
 953   // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
 954   // used to temp. parsing values into and out of the runtime system during exception handling for compiled
 955   // code)
 956   volatile oop     _exception_oop;               // Exception thrown in compiled code
 957   volatile address _exception_pc;                // PC where exception happened
 958   volatile address _exception_handler_pc;        // PC for handler of exception
 959   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 960 
 961  private:
 962   // support for JNI critical regions
 963   jint    _jni_active_critical;                  // count of entries into JNI critical region
 964 
 965   // Checked JNI: function name requires exception check
 966   char* _pending_jni_exception_check_fn;
 967 
 968   // For deadlock detection.
 969   int _depth_first_number;
 970 
 971   // JVMTI PopFrame support


1318 #endif // INCLUDE_JVMCI
1319 
1320   // Exception handling for compiled methods
1321   oop      exception_oop() const                 { return _exception_oop; }
1322   address  exception_pc() const                  { return _exception_pc; }
1323   address  exception_handler_pc() const          { return _exception_handler_pc; }
1324   bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
1325 
1326   void set_exception_oop(oop o)                  { (void)const_cast<oop&>(_exception_oop = o); }
1327   void set_exception_pc(address a)               { _exception_pc = a; }
1328   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
1329   void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
1330 
1331   void clear_exception_oop_and_pc() {
1332     set_exception_oop(NULL);
1333     set_exception_pc(NULL);
1334   }
1335 
1336   // Stack overflow support
1337   inline size_t stack_available(address cur_sp);




1338   address stack_yellow_zone_base() {
1339     return (address)(stack_base() -
1340                      (stack_size() -
1341                      (stack_red_zone_size() + stack_yellow_zone_size())));
1342   }
1343   size_t  stack_yellow_zone_size() {
1344     return StackYellowPages * os::vm_page_size();
1345   }
1346   address stack_red_zone_base() {
1347     return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
1348   }
1349   size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }



1350   bool in_stack_yellow_zone(address a) {
1351     return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
1352   }
1353   bool in_stack_red_zone(address a) {
1354     return (a <= stack_red_zone_base()) &&
1355            (a >= (address)((intptr_t)stack_base() - stack_size()));
1356   }
1357 
1358   void create_stack_guard_pages();
1359   void remove_stack_guard_pages();
1360 


1361   void enable_stack_yellow_zone();
1362   void disable_stack_yellow_zone();
1363   void enable_stack_red_zone();
1364   void disable_stack_red_zone();
1365 
1366   inline bool stack_guard_zone_unused();
1367   inline bool stack_yellow_zone_disabled();
1368   inline bool stack_yellow_zone_enabled();









1369 
1370   // Attempt to reguard the stack after a stack overflow may have occurred.
1371   // Returns true if (a) guard pages are not needed on this thread, (b) the
1372   // pages are already guarded, or (c) the pages were successfully reguarded.
1373   // Returns false if there is not enough stack space to reguard the pages, in
1374   // which case the caller should unwind a frame and try again.  The argument
1375   // should be the caller's (approximate) sp.
1376   bool reguard_stack(address cur_sp);
1377   // Similar to above but see if current stackpoint is out of the guard area
1378   // and reguard if possible.
1379   bool reguard_stack(void);
1380 
1381   address stack_overflow_limit() { return _stack_overflow_limit; }
1382   void set_stack_overflow_limit() {
1383     _stack_overflow_limit = _stack_base - _stack_size +
1384                             ((StackShadowPages +

1385                               StackYellowPages +
1386                               StackRedPages) * os::vm_page_size());
1387   }
1388 
1389   // Misc. accessors/mutators
1390   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
1391   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
1392   bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
1393 
1394 #ifndef PRODUCT
1395   void record_jump(address target, address instr, const char* file, int line);
1396 #endif // PRODUCT
1397 
1398   // For assembly stub generation
1399   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
1400 #ifndef PRODUCT
1401   static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index); }
1402   static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring); }
1403 #endif // PRODUCT
1404   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }


1414   static ByteSize callee_target_offset()         { return byte_offset_of(JavaThread, _callee_target); }
1415   static ByteSize vm_result_offset()             { return byte_offset_of(JavaThread, _vm_result); }
1416   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2); }
1417   static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state); }
1418   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc); }
1419   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread); }
1420 #if INCLUDE_JVMCI
1421   static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); }
1422   static ByteSize pending_monitorenter_offset()  { return byte_offset_of(JavaThread, _pending_monitorenter); }
1423   static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); }
1424   static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); }
1425   static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); }
1426   static ByteSize jvmci_counters_offset()        { return byte_offset_of(JavaThread, _jvmci_counters); }
1427 #endif // INCLUDE_JVMCI
1428   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop); }
1429   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc); }
1430   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
1431   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1432   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1433   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state); }

1434   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
1435 
1436   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1437   static ByteSize should_post_on_exceptions_flag_offset() {
1438     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1439   }
1440 
1441 #if INCLUDE_ALL_GCS
1442   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
1443   static ByteSize dirty_card_queue_offset()      { return byte_offset_of(JavaThread, _dirty_card_queue); }
1444 #endif // INCLUDE_ALL_GCS
1445 
1446   // Returns the jni environment for this thread
1447   JNIEnv* jni_environment()                      { return &_jni_environment; }
1448 
1449   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1450     JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1451     // Only return NULL if thread is off the thread list; starting to
1452     // exit should not return NULL.
1453     if (thread_from_jni_env->is_terminated()) {




 884   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
 885   bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
 886                                                          // never locked) when throwing an exception. Used by interpreter only.
 887 
 888   // JNI attach states:
 889   enum JNIAttachStates {
 890     _not_attaching_via_jni = 1,  // thread is not attaching via JNI
 891     _attaching_via_jni,          // thread is attaching via JNI
 892     _attached_via_jni            // thread has attached via JNI
 893   };
 894 
 895   // A regular JavaThread's _jni_attach_state is _not_attaching_via_jni.
 896   // A native thread that is attaching via JNI starts with a value
 897   // of _attaching_via_jni and transitions to _attached_via_jni.
 898   volatile JNIAttachStates _jni_attach_state;
 899 
 900  public:
 901   // State of the stack guard pages for this thread.
 902   enum StackGuardState {
 903     stack_guard_unused,         // not needed
 904     stack_guard_reserved_disabled,
 905     stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow
 906     stack_guard_enabled         // enabled
 907   };
 908 
 909  private:
 910 
 911 #if INCLUDE_JVMCI
 912   // The _pending_* fields below are used to communicate extra information
 913   // from an uncommon trap in JVMCI compiled code to the uncommon trap handler.
 914 
 915   // Communicates the DeoptReason and DeoptAction of the uncommon trap
 916   int       _pending_deoptimization;
 917 
 918   // Specifies whether the uncommon trap is to bci 0 of a synchronized method
 919   // before the monitor has been acquired.
 920   bool      _pending_monitorenter;
 921 
 922   // Specifies if the DeoptReason for the last uncommon trap was Reason_transfer_to_interpreter
 923   bool      _pending_transfer_to_interpreter;
 924 


 933     address   _implicit_exception_pc;
 934 
 935     // Communicates an alternative call target to an i2c stub from a JavaCall .
 936     address   _alternate_call_target;
 937   } _jvmci;
 938 
 939   // Support for high precision, thread sensitive counters in JVMCI compiled code.
 940   jlong*    _jvmci_counters;
 941 
 942  public:
 943   static jlong* _jvmci_old_thread_counters;
 944   static void collect_counters(typeArrayOop array);
 945  private:
 946 #endif // INCLUDE_JVMCI
 947 
 948   StackGuardState  _stack_guard_state;
 949 
 950   // Precompute the limit of the stack as used in stack overflow checks.
 951   // We load it from here to simplify the stack overflow check in assembly.
 952   address          _stack_overflow_limit;
 953   intptr_t*        _reserved_stack_activation;
 954 
 955   // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
 956   // used to temp. parsing values into and out of the runtime system during exception handling for compiled
 957   // code)
 958   volatile oop     _exception_oop;               // Exception thrown in compiled code
 959   volatile address _exception_pc;                // PC where exception happened
 960   volatile address _exception_handler_pc;        // PC for handler of exception
 961   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 962 
 963  private:
 964   // support for JNI critical regions
 965   jint    _jni_active_critical;                  // count of entries into JNI critical region
 966 
 967   // Checked JNI: function name requires exception check
 968   char* _pending_jni_exception_check_fn;
 969 
 970   // For deadlock detection.
 971   int _depth_first_number;
 972 
 973   // JVMTI PopFrame support


1320 #endif // INCLUDE_JVMCI
1321 
1322   // Exception handling for compiled methods
1323   oop      exception_oop() const                 { return _exception_oop; }
1324   address  exception_pc() const                  { return _exception_pc; }
1325   address  exception_handler_pc() const          { return _exception_handler_pc; }
1326   bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
1327 
1328   void set_exception_oop(oop o)                  { (void)const_cast<oop&>(_exception_oop = o); }
1329   void set_exception_pc(address a)               { _exception_pc = a; }
1330   void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
1331   void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
1332 
1333   void clear_exception_oop_and_pc() {
1334     set_exception_oop(NULL);
1335     set_exception_pc(NULL);
1336   }
1337 
1338   // Stack overflow support
1339   inline size_t stack_available(address cur_sp);
1340   address stack_reserved_zone_base()
1341     { return stack_yellow_zone_base();}
1342   size_t stack_reserved_zone_size()
1343     { return StackReservedPages * os::vm_page_size(); }
1344   address stack_yellow_zone_base() {
1345     return (address)(stack_base() -
1346                      (stack_size() -
1347                      (stack_red_zone_size() + stack_yellow_zone_size())));
1348   }
1349   size_t  stack_yellow_zone_size() {
1350     return StackYellowPages * os::vm_page_size() + stack_reserved_zone_size();
1351   }
1352   address stack_red_zone_base() {
1353     return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
1354   }
1355   size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }
1356   bool in_stack_reserved_zone(address a) {
1357     return (a <= stack_reserved_zone_base()) && (a >= (address)((intptr_t)stack_reserved_zone_base() - stack_reserved_zone_size()));
1358   }
1359   bool in_stack_yellow_zone(address a) {
1360     return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
1361   }
1362   bool in_stack_red_zone(address a) {
1363     return (a <= stack_red_zone_base()) &&
1364            (a >= (address)((intptr_t)stack_base() - stack_size()));
1365   }
1366 
1367   void create_stack_guard_pages();
1368   void remove_stack_guard_pages();
1369 
1370   void enable_stack_reserved_zone();
1371   void disable_stack_reserved_zone();
1372   void enable_stack_yellow_zone();
1373   void disable_stack_yellow_zone();
1374   void enable_stack_red_zone();
1375   void disable_stack_red_zone();
1376 
1377   inline bool stack_guard_zone_unused();
1378   inline bool stack_yellow_zone_disabled();
1379   inline bool stack_reserved_zone_disabled();
1380   inline bool stack_guards_enabled();
1381   
1382   intptr_t* reserved_stack_activation() const { return _reserved_stack_activation; }
1383   void      set_reserved_stack_activation(intptr_t* addr) {
1384     assert(_reserved_stack_activation == (intptr_t*)stack_base()
1385             || _reserved_stack_activation == NULL
1386             || addr == (intptr_t*)stack_base(), "Must not be set twice");
1387     _reserved_stack_activation = addr; 
1388   }
1389 
1390   // Attempt to reguard the stack after a stack overflow may have occurred.
1391   // Returns true if (a) guard pages are not needed on this thread, (b) the
1392   // pages are already guarded, or (c) the pages were successfully reguarded.
1393   // Returns false if there is not enough stack space to reguard the pages, in
1394   // which case the caller should unwind a frame and try again.  The argument
1395   // should be the caller's (approximate) sp.
1396   bool reguard_stack(address cur_sp);
1397   // Similar to above but see if current stackpoint is out of the guard area
1398   // and reguard if possible.
1399   bool reguard_stack(void);
1400 
1401   address stack_overflow_limit() { return _stack_overflow_limit; }
1402   void set_stack_overflow_limit() {
1403     _stack_overflow_limit = _stack_base - _stack_size +
1404                             ((StackShadowPages +
1405                               StackReservedPages +
1406                               StackYellowPages +
1407                               StackRedPages) * os::vm_page_size());
1408   }
1409 
1410   // Misc. accessors/mutators
1411   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
1412   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
1413   bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
1414 
1415 #ifndef PRODUCT
1416   void record_jump(address target, address instr, const char* file, int line);
1417 #endif // PRODUCT
1418 
1419   // For assembly stub generation
1420   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
1421 #ifndef PRODUCT
1422   static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index); }
1423   static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring); }
1424 #endif // PRODUCT
1425   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }


1435   static ByteSize callee_target_offset()         { return byte_offset_of(JavaThread, _callee_target); }
1436   static ByteSize vm_result_offset()             { return byte_offset_of(JavaThread, _vm_result); }
1437   static ByteSize vm_result_2_offset()           { return byte_offset_of(JavaThread, _vm_result_2); }
1438   static ByteSize thread_state_offset()          { return byte_offset_of(JavaThread, _thread_state); }
1439   static ByteSize saved_exception_pc_offset()    { return byte_offset_of(JavaThread, _saved_exception_pc); }
1440   static ByteSize osthread_offset()              { return byte_offset_of(JavaThread, _osthread); }
1441 #if INCLUDE_JVMCI
1442   static ByteSize pending_deoptimization_offset() { return byte_offset_of(JavaThread, _pending_deoptimization); }
1443   static ByteSize pending_monitorenter_offset()  { return byte_offset_of(JavaThread, _pending_monitorenter); }
1444   static ByteSize pending_failed_speculation_offset() { return byte_offset_of(JavaThread, _pending_failed_speculation); }
1445   static ByteSize jvmci_alternate_call_target_offset() { return byte_offset_of(JavaThread, _jvmci._alternate_call_target); }
1446   static ByteSize jvmci_implicit_exception_pc_offset() { return byte_offset_of(JavaThread, _jvmci._implicit_exception_pc); }
1447   static ByteSize jvmci_counters_offset()        { return byte_offset_of(JavaThread, _jvmci_counters); }
1448 #endif // INCLUDE_JVMCI
1449   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop); }
1450   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc); }
1451   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
1452   static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
1453   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
1454   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state); }
1455   static ByteSize reserved_stack_activation_offset() { return byte_offset_of(JavaThread, _reserved_stack_activation); }
1456   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags); }
1457 
1458   static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); }
1459   static ByteSize should_post_on_exceptions_flag_offset() {
1460     return byte_offset_of(JavaThread, _should_post_on_exceptions_flag);
1461   }
1462 
1463 #if INCLUDE_ALL_GCS
1464   static ByteSize satb_mark_queue_offset()       { return byte_offset_of(JavaThread, _satb_mark_queue); }
1465   static ByteSize dirty_card_queue_offset()      { return byte_offset_of(JavaThread, _dirty_card_queue); }
1466 #endif // INCLUDE_ALL_GCS
1467 
1468   // Returns the jni environment for this thread
1469   JNIEnv* jni_environment()                      { return &_jni_environment; }
1470 
1471   static JavaThread* thread_from_jni_environment(JNIEnv* env) {
1472     JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset()));
1473     // Only return NULL if thread is off the thread list; starting to
1474     // exit should not return NULL.
1475     if (thread_from_jni_env->is_terminated()) {