src/share/vm/runtime/thread.hpp

Print this page
rev 6521 : 8044775: Improve usage of umbrella header atomic.inline.hpp.
Reviewed-by: stefank, kvn


 326   // Returns the current thread
 327   static inline Thread* current();
 328 
 329   // Common thread operations
 330   static void set_priority(Thread* thread, ThreadPriority priority);
 331   static ThreadPriority get_priority(const Thread* const thread);
 332   static void start(Thread* thread);
 333   static void interrupt(Thread* thr);
 334   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 335 
 336   void set_native_thread_name(const char *name) {
 337     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
 338     os::set_native_thread_name(name);
 339   }
 340 
 341   ObjectMonitor** omInUseList_addr()             { return (ObjectMonitor **)&omInUseList; }
 342   Monitor* SR_lock() const                       { return _SR_lock; }
 343 
 344   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 345 
 346   void set_suspend_flag(SuspendFlags f) {
 347     assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
 348     uint32_t flags;
 349     do {
 350       flags = _suspend_flags;
 351     }
 352     while (Atomic::cmpxchg((jint)(flags | f),
 353                            (volatile jint*)&_suspend_flags,
 354                            (jint)flags) != (jint)flags);
 355   }
 356   void clear_suspend_flag(SuspendFlags f) {
 357     assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
 358     uint32_t flags;
 359     do {
 360       flags = _suspend_flags;
 361     }
 362     while (Atomic::cmpxchg((jint)(flags & ~f),
 363                            (volatile jint*)&_suspend_flags,
 364                            (jint)flags) != (jint)flags);
 365   }
 366 
 367   void set_has_async_exception() {
 368     set_suspend_flag(_has_async_exception);
 369   }
 370   void clear_has_async_exception() {
 371     clear_suspend_flag(_has_async_exception);
 372   }
 373 
 374   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 375 
 376   void set_critical_native_unlock() {
 377     set_suspend_flag(_critical_native_unlock);
 378   }
 379   void clear_critical_native_unlock() {
 380     clear_suspend_flag(_critical_native_unlock);
 381   }
 382 
 383   // Support for Unhandled Oop detection
 384 #ifdef CHECK_UNHANDLED_OOPS
 385  private:
 386   UnhandledOops* _unhandled_oops;
 387  public:
 388   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 389   // Mark oop safe for gc.  It may be stack allocated but won't move.
 390   void allow_unhandled_oop(oop *op) {
 391     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 392   }
 393   // Clear oops at safepoint so crashes point to unhandled oop violator
 394   void clear_unhandled_oops() {
 395     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
 396   }
 397 #endif // CHECK_UNHANDLED_OOPS
 398 
 399 #ifndef PRODUCT
 400   bool skip_gcalot()           { return _skip_gcalot; }
 401   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }


1054   void block_if_vm_exited();
1055 
1056   bool doing_unsafe_access()                     { return _doing_unsafe_access; }
1057   void set_doing_unsafe_access(bool val)         { _doing_unsafe_access = val; }
1058 
1059   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
1060   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
1061 
1062 #if INCLUDE_NMT
1063   // native memory tracking
1064   inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
1065   inline void         set_recorder(MemRecorder* rc) { _recorder = rc; }
1066 
1067  private:
1068   // per-thread memory recorder
1069   MemRecorder* volatile _recorder;
1070 #endif // INCLUDE_NMT
1071 
1072   // Suspend/resume support for JavaThread
1073  private:
1074   void set_ext_suspended()       { set_suspend_flag (_ext_suspended);  }
1075   void clear_ext_suspended()     { clear_suspend_flag(_ext_suspended); }
1076 
1077  public:
1078   void java_suspend();
1079   void java_resume();
1080   int  java_suspend_self();
1081 
1082   void check_and_wait_while_suspended() {
1083     assert(JavaThread::current() == this, "sanity check");
1084 
1085     bool do_self_suspend;
1086     do {
1087       // were we externally suspended while we were waiting?
1088       do_self_suspend = handle_special_suspend_equivalent_condition();
1089       if (do_self_suspend) {
1090         // don't surprise the thread that suspended us by returning
1091         java_suspend_self();
1092         set_suspend_equivalent();
1093       }
1094     } while (do_self_suspend);
1095   }


1103   static void check_special_condition_for_native_trans_and_transition(JavaThread *thread);
1104 
1105   bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
1106   bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
1107     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1108     // Warning: is_ext_suspend_completed() may temporarily drop the
1109     // SR_lock to allow the thread to reach a stable thread state if
1110     // it is currently in a transient thread state.
1111     return is_ext_suspend_completed(false /*!called_by_wait */,
1112                                     SuspendRetryDelay, bits);
1113   }
1114 
1115   // We cannot allow wait_for_ext_suspend_completion() to run forever or
1116   // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
1117   // passed as the count and delay parameters. Experiments with specific
1118   // calls to wait_for_ext_suspend_completion() can be done by passing
1119   // other values in the code. Experiments with all calls can be done
1120   // via the appropriate -XX options.
1121   bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
1122 
1123   void set_external_suspend()     { set_suspend_flag  (_external_suspend); }
1124   void clear_external_suspend()   { clear_suspend_flag(_external_suspend); }
1125 
1126   void set_deopt_suspend()        { set_suspend_flag  (_deopt_suspend); }
1127   void clear_deopt_suspend()      { clear_suspend_flag(_deopt_suspend); }
1128   bool is_deopt_suspend()         { return (_suspend_flags & _deopt_suspend) != 0; }
1129 
1130   bool is_external_suspend() const {
1131     return (_suspend_flags & _external_suspend) != 0;
1132   }
1133   // Whenever a thread transitions from native to vm/java it must suspend
1134   // if external|deopt suspend is present.
1135   bool is_suspend_after_native() const {
1136     return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0;
1137   }
1138 
1139   // external suspend request is completed
1140   bool is_ext_suspended() const {
1141     return (_suspend_flags & _ext_suspended) != 0;
1142   }
1143 
1144   bool is_external_suspend_with_lock() const {
1145     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1146     return is_external_suspend();
1147   }


1195 
1196   // Return true if JavaThread has an asynchronous condition or
1197   // if external suspension is requested.
1198   bool has_special_runtime_exit_condition() {
1199     // We call is_external_suspend() last since external suspend should
1200     // be less common. Because we don't use is_external_suspend_with_lock
1201     // it is possible that we won't see an asynchronous external suspend
1202     // request that has just gotten started, i.e., SR_lock grabbed but
1203     // _external_suspend field change either not made yet or not visible
1204     // yet. However, this is okay because the request is asynchronous and
1205     // we will see the new flag value the next time through. It's also
1206     // possible that the external suspend request is dropped after
1207     // we have checked is_external_suspend(), we will recheck its value
1208     // under SR_lock in java_suspend_self().
1209     return (_special_runtime_exit_condition != _no_async_condition) ||
1210             is_external_suspend() || is_deopt_suspend();
1211   }
1212 
1213   void set_pending_unsafe_access_error()          { _special_runtime_exit_condition = _async_unsafe_access_error; }
1214 
1215   void set_pending_async_exception(oop e) {
1216     _pending_async_exception = e;
1217     _special_runtime_exit_condition = _async_exception;
1218     set_has_async_exception();
1219   }
1220 
1221   // Fast-locking support
1222   bool is_lock_owned(address adr) const;
1223 
1224   // Accessors for vframe array top
1225   // The linked list of vframe arrays are sorted on sp. This means when we
1226   // unpack the head must contain the vframe array to unpack.
1227   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
1228   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
1229 
1230   // Side structure for deferring update of java frame locals until deopt occurs
1231   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
1232   void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
1233 
1234   // These only really exist to make debugging deopt problems simpler
1235 
1236   void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
1237   vframeArray* vframe_array_last() const         { return _vframe_array_last;  }
1238 
1239   // The special resourceMark used during deoptimization




 326   // Returns the current thread
 327   static inline Thread* current();
 328 
 329   // Common thread operations
 330   static void set_priority(Thread* thread, ThreadPriority priority);
 331   static ThreadPriority get_priority(const Thread* const thread);
 332   static void start(Thread* thread);
 333   static void interrupt(Thread* thr);
 334   static bool is_interrupted(Thread* thr, bool clear_interrupted);
 335 
 336   void set_native_thread_name(const char *name) {
 337     assert(Thread::current() == this, "set_native_thread_name can only be called on the current thread");
 338     os::set_native_thread_name(name);
 339   }
 340 
 341   ObjectMonitor** omInUseList_addr()             { return (ObjectMonitor **)&omInUseList; }
 342   Monitor* SR_lock() const                       { return _SR_lock; }
 343 
 344   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 345 
 346   inline void set_suspend_flag(SuspendFlags f);
 347   inline void clear_suspend_flag(SuspendFlags f);


















 348 
 349   inline void set_has_async_exception();
 350   inline void clear_has_async_exception();




 351 
 352   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 353 
 354   inline void set_critical_native_unlock();
 355   inline void clear_critical_native_unlock();




 356 
 357   // Support for Unhandled Oop detection
 358 #ifdef CHECK_UNHANDLED_OOPS
 359  private:
 360   UnhandledOops* _unhandled_oops;
 361  public:
 362   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 363   // Mark oop safe for gc.  It may be stack allocated but won't move.
 364   void allow_unhandled_oop(oop *op) {
 365     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 366   }
 367   // Clear oops at safepoint so crashes point to unhandled oop violator
 368   void clear_unhandled_oops() {
 369     if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops();
 370   }
 371 #endif // CHECK_UNHANDLED_OOPS
 372 
 373 #ifndef PRODUCT
 374   bool skip_gcalot()           { return _skip_gcalot; }
 375   void set_skip_gcalot(bool v) { _skip_gcalot = v;    }


1028   void block_if_vm_exited();
1029 
1030   bool doing_unsafe_access()                     { return _doing_unsafe_access; }
1031   void set_doing_unsafe_access(bool val)         { _doing_unsafe_access = val; }
1032 
1033   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
1034   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
1035 
1036 #if INCLUDE_NMT
1037   // native memory tracking
1038   inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
1039   inline void         set_recorder(MemRecorder* rc) { _recorder = rc; }
1040 
1041  private:
1042   // per-thread memory recorder
1043   MemRecorder* volatile _recorder;
1044 #endif // INCLUDE_NMT
1045 
1046   // Suspend/resume support for JavaThread
1047  private:
1048   inline void set_ext_suspended();
1049   inline void clear_ext_suspended();
1050 
1051  public:
1052   void java_suspend();
1053   void java_resume();
1054   int  java_suspend_self();
1055 
1056   void check_and_wait_while_suspended() {
1057     assert(JavaThread::current() == this, "sanity check");
1058 
1059     bool do_self_suspend;
1060     do {
1061       // were we externally suspended while we were waiting?
1062       do_self_suspend = handle_special_suspend_equivalent_condition();
1063       if (do_self_suspend) {
1064         // don't surprise the thread that suspended us by returning
1065         java_suspend_self();
1066         set_suspend_equivalent();
1067       }
1068     } while (do_self_suspend);
1069   }


1077   static void check_special_condition_for_native_trans_and_transition(JavaThread *thread);
1078 
1079   bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits);
1080   bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
1081     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1082     // Warning: is_ext_suspend_completed() may temporarily drop the
1083     // SR_lock to allow the thread to reach a stable thread state if
1084     // it is currently in a transient thread state.
1085     return is_ext_suspend_completed(false /*!called_by_wait */,
1086                                     SuspendRetryDelay, bits);
1087   }
1088 
1089   // We cannot allow wait_for_ext_suspend_completion() to run forever or
1090   // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
1091   // passed as the count and delay parameters. Experiments with specific
1092   // calls to wait_for_ext_suspend_completion() can be done by passing
1093   // other values in the code. Experiments with all calls can be done
1094   // via the appropriate -XX options.
1095   bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
1096 
1097   inline void set_external_suspend();
1098   inline void clear_external_suspend();
1099 
1100   inline void set_deopt_suspend();
1101   inline void clear_deopt_suspend();
1102   bool is_deopt_suspend()         { return (_suspend_flags & _deopt_suspend) != 0; }
1103 
1104   bool is_external_suspend() const {
1105     return (_suspend_flags & _external_suspend) != 0;
1106   }
1107   // Whenever a thread transitions from native to vm/java it must suspend
1108   // if external|deopt suspend is present.
1109   bool is_suspend_after_native() const {
1110     return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0;
1111   }
1112 
1113   // external suspend request is completed
1114   bool is_ext_suspended() const {
1115     return (_suspend_flags & _ext_suspended) != 0;
1116   }
1117 
1118   bool is_external_suspend_with_lock() const {
1119     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1120     return is_external_suspend();
1121   }


1169 
1170   // Return true if JavaThread has an asynchronous condition or
1171   // if external suspension is requested.
1172   bool has_special_runtime_exit_condition() {
1173     // We call is_external_suspend() last since external suspend should
1174     // be less common. Because we don't use is_external_suspend_with_lock
1175     // it is possible that we won't see an asynchronous external suspend
1176     // request that has just gotten started, i.e., SR_lock grabbed but
1177     // _external_suspend field change either not made yet or not visible
1178     // yet. However, this is okay because the request is asynchronous and
1179     // we will see the new flag value the next time through. It's also
1180     // possible that the external suspend request is dropped after
1181     // we have checked is_external_suspend(), we will recheck its value
1182     // under SR_lock in java_suspend_self().
1183     return (_special_runtime_exit_condition != _no_async_condition) ||
1184             is_external_suspend() || is_deopt_suspend();
1185   }
1186 
1187   void set_pending_unsafe_access_error()          { _special_runtime_exit_condition = _async_unsafe_access_error; }
1188 
1189   inline void set_pending_async_exception(oop e);




1190 
1191   // Fast-locking support
1192   bool is_lock_owned(address adr) const;
1193 
1194   // Accessors for vframe array top
1195   // The linked list of vframe arrays are sorted on sp. This means when we
1196   // unpack the head must contain the vframe array to unpack.
1197   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
1198   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
1199 
1200   // Side structure for deferring update of java frame locals until deopt occurs
1201   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
1202   void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
1203 
1204   // These only really exist to make debugging deopt problems simpler
1205 
1206   void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
1207   vframeArray* vframe_array_last() const         { return _vframe_array_last;  }
1208 
1209   // The special resourceMark used during deoptimization