< prev index next >

src/hotspot/share/runtime/thread.hpp

Print this page
rev 56101 : 8227745: Enable Escape Analysis for better performance when debugging
Reviewed-by: ???


 274   // osThread.hpp).
 275   //
 276   // 2. It would be more natural if set_external_suspend() is private and
 277   // part of java_suspend(), but that probably would affect the suspend/query
 278   // performance. Need more investigation on this.
 279 
 280   // suspend/resume lock: used for self-suspend
 281   Monitor* _SR_lock;
 282 
 283  protected:
 284   enum SuspendFlags {
 285     // NOTE: avoid using the sign-bit as cc generates different test code
 286     //       when the sign-bit is used, and sometimes incorrectly - see CR 6398077
 287 
 288     _external_suspend       = 0x20000000U, // thread is asked to self suspend
 289     _ext_suspended          = 0x40000000U, // thread has self-suspended
 290 
 291     _has_async_exception    = 0x00000001U, // there is a pending async exception
 292     _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock
 293 
 294     _trace_flag             = 0x00000004U  // call tracing backend

 295   };
 296 
 297   // various suspension related flags - atomically updated
 298   // overloaded for async exception checking in check_special_condition_for_native_trans.
 299   volatile uint32_t _suspend_flags;
 300 
 301  private:
 302   int _num_nested_signal;
 303 
 304   DEBUG_ONLY(bool _suspendible_thread;)
 305 
 306  public:
 307   void enter_signal_handler() { _num_nested_signal++; }
 308   void leave_signal_handler() { _num_nested_signal--; }
 309   bool is_inside_signal_handler() const { return _num_nested_signal > 0; }
 310 
 311   // Determines if a heap allocation failure will be retried
 312   // (e.g., by deoptimizing and re-executing in the interpreter).
 313   // In this case, the failed allocation must raise
 314   // Universe::out_of_memory_error_retry() and omit side effects


 524 
 525   ObjectMonitor** omInUseList_addr()             { return (ObjectMonitor **)&omInUseList; }
 526   Monitor* SR_lock() const                       { return _SR_lock; }
 527 
 528   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 529 
 530   inline void set_suspend_flag(SuspendFlags f);
 531   inline void clear_suspend_flag(SuspendFlags f);
 532 
 533   inline void set_has_async_exception();
 534   inline void clear_has_async_exception();
 535 
 536   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 537 
 538   inline void set_critical_native_unlock();
 539   inline void clear_critical_native_unlock();
 540 
 541   inline void set_trace_flag();
 542   inline void clear_trace_flag();
 543 



 544   // Support for Unhandled Oop detection
 545   // Add the field for both, fastdebug and debug, builds to keep
 546   // Thread's fields layout the same.
 547   // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
 548 #ifdef CHECK_UNHANDLED_OOPS
 549  private:
 550   UnhandledOops* _unhandled_oops;
 551 #elif defined(ASSERT)
 552  private:
 553   void* _unhandled_oops;
 554 #endif
 555 #ifdef CHECK_UNHANDLED_OOPS
 556  public:
 557   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 558   // Mark oop safe for gc.  It may be stack allocated but won't move.
 559   void allow_unhandled_oop(oop *op) {
 560     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 561   }
 562   // Clear oops at safepoint so crashes point to unhandled oop violator
 563   void clear_unhandled_oops() {


 598   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 599   void initialize_tlab() {
 600     if (UseTLAB) {
 601       tlab().initialize();
 602     }
 603   }
 604 
 605   jlong allocated_bytes()               { return _allocated_bytes; }
 606   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 607   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 608   inline jlong cooked_allocated_bytes();
 609 
 610   ThreadHeapSampler& heap_sampler()     { return _heap_sampler; }
 611 
 612   ThreadStatisticalInfo& statistical_info() { return _statistical_info; }
 613 
 614   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
 615 
 616   bool is_trace_suspend()               { return (_suspend_flags & _trace_flag) != 0; }
 617 


 618   // VM operation support
 619   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
 620   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
 621   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 622 
 623   // For tracking the heavyweight monitor the thread is pending on.
 624   ObjectMonitor* current_pending_monitor() {
 625     return _current_pending_monitor;
 626   }
 627   void set_current_pending_monitor(ObjectMonitor* monitor) {
 628     _current_pending_monitor = monitor;
 629   }
 630   void set_current_pending_monitor_is_from_java(bool from_java) {
 631     _current_pending_monitor_is_from_java = from_java;
 632   }
 633   bool current_pending_monitor_is_from_java() {
 634     return _current_pending_monitor_is_from_java;
 635   }
 636 
 637   // For tracking the ObjectMonitor on which this thread called Object.wait()


 958   void unpark();
 959 
 960   // Returns the single instance of WatcherThread
 961   static WatcherThread* watcher_thread()         { return _watcher_thread; }
 962 
 963   // Create and start the single instance of WatcherThread, or stop it on shutdown
 964   static void start();
 965   static void stop();
 966   // Only allow start once the VM is sufficiently initialized
 967   // Otherwise the first task to enroll will trigger the start
 968   static void make_startable();
 969  private:
 970   int sleep() const;
 971 };
 972 
 973 
 974 class CompilerThread;
 975 
 976 typedef void (*ThreadFunction)(JavaThread*, TRAPS);
 977 




























 978 class JavaThread: public Thread {
 979   friend class VMStructs;
 980   friend class JVMCIVMStructs;
 981   friend class WhiteBox;
 982  private:
 983   bool           _on_thread_list;                // Is set when this JavaThread is added to the Threads list
 984   oop            _threadObj;                     // The Java level thread object
 985 
 986 #ifdef ASSERT
 987  private:
 988   int _java_call_counter;
 989 
 990  public:
 991   int  java_call_counter()                       { return _java_call_counter; }
 992   void inc_java_call_counter()                   { _java_call_counter++; }
 993   void dec_java_call_counter() {
 994     assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper");
 995     _java_call_counter--;
 996   }
 997  private:  // restore original namespace restriction


1000 #ifndef PRODUCT
1001  public:
1002   enum {
1003     jump_ring_buffer_size = 16
1004   };
1005  private:  // restore original namespace restriction
1006 #endif
1007 
1008   JavaFrameAnchor _anchor;                       // Encapsulation of current java frame and it state
1009 
1010   ThreadFunction _entry_point;
1011 
1012   JNIEnv        _jni_environment;
1013 
1014   // Deopt support
1015   DeoptResourceMark*  _deopt_mark;               // Holds special ResourceMark for deoptimization
1016 
1017   CompiledMethod*       _deopt_nmethod;         // CompiledMethod that is currently being deoptimized
1018   vframeArray*  _vframe_array_head;              // Holds the heap of the active vframeArrays
1019   vframeArray*  _vframe_array_last;              // Holds last vFrameArray we popped
1020   // Because deoptimization is lazy we must save jvmti requests to set locals
1021   // in compiled frames until we deoptimize and we have an interpreter frame.
1022   // This holds the pointer to array (yeah like there might be more than one) of
1023   // description of compiled vframes that have locals that need to be updated.
1024   GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates;
1025 
1026   // Handshake value for fixing 6243940. We need a place for the i2c
1027   // adapter to store the callee Method*. This value is NEVER live
1028   // across a gc point so it does NOT have to be gc'd
1029   // The handshake is open ended since we can't be certain that it will
1030   // be NULLed. This is because we rarely ever see the race and end up
1031   // in handle_wrong_method which is the backend of the handshake. See
1032   // code in i2c adapters and handle_wrong_method.
1033 
1034   Method*       _callee_target;
1035 
1036   // Used to pass back results to the interpreter or generated code running Java code.
1037   oop           _vm_result;    // oop result is GC-preserved
1038   Metadata*     _vm_result_2;  // non-oop result
1039 
1040   // See ReduceInitialCardMarks: this holds the precise space interval of
1041   // the most recent slow path allocation for which compiled code has
1042   // elided card-marks for performance along the fast-path.
1043   MemRegion     _deferred_card_mark;
1044 


1315     _handshake.set_operation(this, op);
1316   }
1317 
1318   bool has_handshake() const {
1319     return _handshake.has_operation();
1320   }
1321 
1322   void handshake_process_by_self() {
1323     _handshake.process_by_self(this);
1324   }
1325 
1326   void handshake_process_by_vmthread() {
1327     _handshake.process_by_vmthread(this);
1328   }
1329 
1330   // Suspend/resume support for JavaThread
1331  private:
1332   inline void set_ext_suspended();
1333   inline void clear_ext_suspended();
1334 




1335  public:
1336   void java_suspend(); // higher-level suspension logic called by the public APIs
1337   void java_resume();  // higher-level resume logic called by the public APIs
1338   int  java_suspend_self(); // low-level self-suspension mechanics
1339 
1340  private:
1341   // mid-level wrapper around java_suspend_self to set up correct state and
1342   // check for a pending safepoint at the end
1343   void java_suspend_self_with_safepoint_check();
1344 
1345  public:
1346   void check_and_wait_while_suspended() {
1347     assert(JavaThread::current() == this, "sanity check");
1348 
1349     bool do_self_suspend;
1350     do {
1351       // were we externally suspended while we were waiting?
1352       do_self_suspend = handle_special_suspend_equivalent_condition();
1353       if (do_self_suspend) {
1354         // don't surprise the thread that suspended us by returning


1379   // We cannot allow wait_for_ext_suspend_completion() to run forever or
1380   // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
1381   // passed as the count and delay parameters. Experiments with specific
1382   // calls to wait_for_ext_suspend_completion() can be done by passing
1383   // other values in the code. Experiments with all calls can be done
1384   // via the appropriate -XX options.
1385   bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
1386 
1387   // test for suspend - most (all?) of these should go away
1388   bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits);
1389 
1390   inline void set_external_suspend();
1391   inline void clear_external_suspend();
1392 
1393   bool is_external_suspend() const {
1394     return (_suspend_flags & _external_suspend) != 0;
1395   }
1396   // Whenever a thread transitions from native to vm/java it must suspend
1397   // if external|deopt suspend is present.
1398   bool is_suspend_after_native() const {
1399     return (_suspend_flags & (_external_suspend JFR_ONLY(| _trace_flag))) != 0;
1400   }
1401 
1402   // external suspend request is completed
1403   bool is_ext_suspended() const {
1404     return (_suspend_flags & _ext_suspended) != 0;
1405   }
1406 
1407   bool is_external_suspend_with_lock() const {
1408     MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1409     return is_external_suspend();
1410   }
1411 
1412   // Special method to handle a pending external suspend request
1413   // when a suspend equivalent condition lifts.
1414   bool handle_special_suspend_equivalent_condition() {
1415     assert(is_suspend_equivalent(),
1416            "should only be called in a suspend equivalence condition");
1417     MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1418     bool ret = is_external_suspend();
1419     if (!ret) {


1452   bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); }
1453 
1454   void check_and_handle_async_exceptions(bool check_unsafe_error = true);
1455 
1456   // these next two are also used for self-suspension and async exception support
1457   void handle_special_runtime_exit_condition(bool check_asyncs = true);
1458 
1459   // Return true if JavaThread has an asynchronous condition or
1460   // if external suspension is requested.
1461   bool has_special_runtime_exit_condition() {
1462     // Because we don't use is_external_suspend_with_lock
1463     // it is possible that we won't see an asynchronous external suspend
1464     // request that has just gotten started, i.e., SR_lock grabbed but
1465     // _external_suspend field change either not made yet or not visible
1466     // yet. However, this is okay because the request is asynchronous and
1467     // we will see the new flag value the next time through. It's also
1468     // possible that the external suspend request is dropped after
1469     // we have checked is_external_suspend(), we will recheck its value
1470     // under SR_lock in java_suspend_self().
1471     return (_special_runtime_exit_condition != _no_async_condition) ||
1472             is_external_suspend() || is_trace_suspend();
1473   }
1474 
1475   void set_pending_unsafe_access_error()          { _special_runtime_exit_condition = _async_unsafe_access_error; }
1476 
1477   inline void set_pending_async_exception(oop e);
1478 
1479   // Fast-locking support
1480   bool is_lock_owned(address adr) const;
1481 
1482   // Accessors for vframe array top
1483   // The linked list of vframe arrays are sorted on sp. This means when we
1484   // unpack the head must contain the vframe array to unpack.
1485   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
1486   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
1487 
1488   // Side structure for deferring update of java frame locals until deopt occurs
1489   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
1490   void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
















1491 
1492   // These only really exist to make debugging deopt problems simpler
1493 
1494   void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
1495   vframeArray* vframe_array_last() const         { return _vframe_array_last;  }
1496 
1497   // The special resourceMark used during deoptimization
1498 
1499   void set_deopt_mark(DeoptResourceMark* value)  { _deopt_mark = value; }
1500   DeoptResourceMark* deopt_mark(void)            { return _deopt_mark; }
1501 
1502   void set_deopt_compiled_method(CompiledMethod* nm)  { _deopt_nmethod = nm; }
1503   CompiledMethod* deopt_compiled_method()        { return _deopt_nmethod; }
1504 
1505   Method*    callee_target() const               { return _callee_target; }
1506   void set_callee_target  (Method* x)          { _callee_target   = x; }
1507 
1508   // Oop results of vm runtime calls
1509   oop  vm_result() const                         { return _vm_result; }
1510   void set_vm_result  (oop x)                    { _vm_result   = x; }


2072 // Dedicated thread to sweep the code cache
2073 class CodeCacheSweeperThread : public JavaThread {
2074   CompiledMethod*       _scanned_compiled_method; // nmethod being scanned by the sweeper
2075  public:
2076   CodeCacheSweeperThread();
2077   // Track the nmethod currently being scanned by the sweeper
2078   void set_scanned_compiled_method(CompiledMethod* cm) {
2079     assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value");
2080     _scanned_compiled_method = cm;
2081   }
2082 
2083   // Hide sweeper thread from external view.
2084   bool is_hidden_from_external_view() const { return true; }
2085 
2086   bool is_Code_cache_sweeper_thread() const { return true; }
2087 
2088   // Prevent GC from unloading _scanned_compiled_method
2089   void oops_do(OopClosure* f, CodeBlobClosure* cf);
2090   void nmethods_do(CodeBlobClosure* cf);
2091 };








2092 
2093 // A thread used for Compilation.
2094 class CompilerThread : public JavaThread {
2095   friend class VMStructs;
2096  private:
2097   CompilerCounters* _counters;
2098 
2099   ciEnv*                _env;
2100   CompileLog*           _log;
2101   CompileTask* volatile _task;  // print_threads_compiling can read this concurrently.
2102   CompileQueue*         _queue;
2103   BufferBlob*           _buffer_blob;
2104 
2105   AbstractCompiler*     _compiler;
2106   TimeStamp             _idle_time;
2107 
2108  public:
2109 
2110   static CompilerThread* current();
2111 




 274   // osThread.hpp).
 275   //
 276   // 2. It would be more natural if set_external_suspend() is private and
 277   // part of java_suspend(), but that probably would affect the suspend/query
 278   // performance. Need more investigation on this.
 279 
 280   // suspend/resume lock: used for self-suspend
 281   Monitor* _SR_lock;
 282 
 283  protected:
 284   enum SuspendFlags {
 285     // NOTE: avoid using the sign-bit as cc generates different test code
 286     //       when the sign-bit is used, and sometimes incorrectly - see CR 6398077
 287 
 288     _external_suspend       = 0x20000000U, // thread is asked to self suspend
 289     _ext_suspended          = 0x40000000U, // thread has self-suspended
 290 
 291     _has_async_exception    = 0x00000001U, // there is a pending async exception
 292     _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock
 293 
 294     _trace_flag             = 0x00000004U, // call tracing backend
 295     _ea_obj_deopt           = 0x00000008U  // suspend for object reallocation and relocking for JVMTI agent
 296   };
 297 
 298   // various suspension related flags - atomically updated
 299   // overloaded for async exception checking in check_special_condition_for_native_trans.
 300   volatile uint32_t _suspend_flags;
 301 
 302  private:
 303   int _num_nested_signal;
 304 
 305   DEBUG_ONLY(bool _suspendible_thread;)
 306 
 307  public:
 308   void enter_signal_handler() { _num_nested_signal++; }
 309   void leave_signal_handler() { _num_nested_signal--; }
 310   bool is_inside_signal_handler() const { return _num_nested_signal > 0; }
 311 
 312   // Determines if a heap allocation failure will be retried
 313   // (e.g., by deoptimizing and re-executing in the interpreter).
 314   // In this case, the failed allocation must raise
 315   // Universe::out_of_memory_error_retry() and omit side effects


 525 
 526   ObjectMonitor** omInUseList_addr()             { return (ObjectMonitor **)&omInUseList; }
 527   Monitor* SR_lock() const                       { return _SR_lock; }
 528 
 529   bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }
 530 
 531   inline void set_suspend_flag(SuspendFlags f);
 532   inline void clear_suspend_flag(SuspendFlags f);
 533 
 534   inline void set_has_async_exception();
 535   inline void clear_has_async_exception();
 536 
 537   bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; }
 538 
 539   inline void set_critical_native_unlock();
 540   inline void clear_critical_native_unlock();
 541 
 542   inline void set_trace_flag();
 543   inline void clear_trace_flag();
 544 
 545   inline void set_ea_obj_deopt_flag();
 546   inline void clear_ea_obj_deopt_flag();
 547 
 548   // Support for Unhandled Oop detection
 549   // Add the field for both, fastdebug and debug, builds to keep
 550   // Thread's fields layout the same.
 551   // Note: CHECK_UNHANDLED_OOPS is defined only for fastdebug build.
 552 #ifdef CHECK_UNHANDLED_OOPS
 553  private:
 554   UnhandledOops* _unhandled_oops;
 555 #elif defined(ASSERT)
 556  private:
 557   void* _unhandled_oops;
 558 #endif
 559 #ifdef CHECK_UNHANDLED_OOPS
 560  public:
 561   UnhandledOops* unhandled_oops() { return _unhandled_oops; }
 562   // Mark oop safe for gc.  It may be stack allocated but won't move.
 563   void allow_unhandled_oop(oop *op) {
 564     if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op);
 565   }
 566   // Clear oops at safepoint so crashes point to unhandled oop violator
 567   void clear_unhandled_oops() {


 602   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 603   void initialize_tlab() {
 604     if (UseTLAB) {
 605       tlab().initialize();
 606     }
 607   }
 608 
 609   jlong allocated_bytes()               { return _allocated_bytes; }
 610   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 611   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 612   inline jlong cooked_allocated_bytes();
 613 
 614   ThreadHeapSampler& heap_sampler()     { return _heap_sampler; }
 615 
 616   ThreadStatisticalInfo& statistical_info() { return _statistical_info; }
 617 
 618   JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
 619 
 620   bool is_trace_suspend()               { return (_suspend_flags & _trace_flag) != 0; }
 621 
 622   bool is_ea_obj_deopt_suspend()        { return (_suspend_flags & _ea_obj_deopt) != 0; }
 623 
 624   // VM operation support
 625   int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
 626   int vm_operation_completed_count()             { return _vm_operation_completed_count; }
 627   void increment_vm_operation_completed_count()  { _vm_operation_completed_count++; }
 628 
 629   // For tracking the heavyweight monitor the thread is pending on.
 630   ObjectMonitor* current_pending_monitor() {
 631     return _current_pending_monitor;
 632   }
 633   void set_current_pending_monitor(ObjectMonitor* monitor) {
 634     _current_pending_monitor = monitor;
 635   }
 636   void set_current_pending_monitor_is_from_java(bool from_java) {
 637     _current_pending_monitor_is_from_java = from_java;
 638   }
 639   bool current_pending_monitor_is_from_java() {
 640     return _current_pending_monitor_is_from_java;
 641   }
 642 
 643   // For tracking the ObjectMonitor on which this thread called Object.wait()


 964   void unpark();
 965 
 966   // Returns the single instance of WatcherThread
 967   static WatcherThread* watcher_thread()         { return _watcher_thread; }
 968 
 969   // Create and start the single instance of WatcherThread, or stop it on shutdown
 970   static void start();
 971   static void stop();
 972   // Only allow start once the VM is sufficiently initialized
 973   // Otherwise the first task to enroll will trigger the start
 974   static void make_startable();
 975  private:
 976   int sleep() const;
 977 };
 978 
 979 
 980 class CompilerThread;
 981 
 982 typedef void (*ThreadFunction)(JavaThread*, TRAPS);
 983 
 984 // Holds updates for compiled frames by JVMTI agents that cannot be performed immediately.
 985 class JvmtiDeferredUpdates : public CHeapObj<mtCompiler> {
 986 
 987   // Relocking has to be deferred, if the lock owning thread is currently waiting on the monitor.
 988   int _relock_count_after_wait;
 989 
 990   // Deferred updates of locals, expressions and monitors
 991   GrowableArray<jvmtiDeferredLocalVariableSet*> _deferred_locals_updates;
 992 
 993  public:
 994   JvmtiDeferredUpdates() :
 995     _relock_count_after_wait(0),
 996     _deferred_locals_updates((ResourceObj::set_allocation_type((address) &_deferred_locals_updates,
 997                               ResourceObj::C_HEAP), 1), true, mtCompiler) { }
 998 
 999   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() { return &_deferred_locals_updates; }
1000 
1001   int get_and_reset_relock_count_after_wait() {
1002     int result = _relock_count_after_wait;
1003     _relock_count_after_wait = 0;
1004     return result;
1005   }
1006   void inc_relock_count_after_wait() {
1007     _relock_count_after_wait++;
1008   }
1009 };
1010 
1011 
1012 class JavaThread: public Thread {
1013   friend class VMStructs;
1014   friend class JVMCIVMStructs;
1015   friend class WhiteBox;
1016  private:
1017   bool           _on_thread_list;                // Is set when this JavaThread is added to the Threads list
1018   oop            _threadObj;                     // The Java level thread object
1019 
1020 #ifdef ASSERT
1021  private:
1022   int _java_call_counter;
1023 
1024  public:
1025   int  java_call_counter()                       { return _java_call_counter; }
1026   void inc_java_call_counter()                   { _java_call_counter++; }
1027   void dec_java_call_counter() {
1028     assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper");
1029     _java_call_counter--;
1030   }
1031  private:  // restore original namespace restriction


1034 #ifndef PRODUCT
1035  public:
1036   enum {
1037     jump_ring_buffer_size = 16
1038   };
1039  private:  // restore original namespace restriction
1040 #endif
1041 
1042   JavaFrameAnchor _anchor;                       // Encapsulation of current java frame and it state
1043 
1044   ThreadFunction _entry_point;
1045 
1046   JNIEnv        _jni_environment;
1047 
1048   // Deopt support
1049   DeoptResourceMark*  _deopt_mark;               // Holds special ResourceMark for deoptimization
1050 
1051   CompiledMethod*       _deopt_nmethod;         // CompiledMethod that is currently being deoptimized
1052   vframeArray*  _vframe_array_head;              // Holds the heap of the active vframeArrays
1053   vframeArray*  _vframe_array_last;              // Holds last vFrameArray we popped
1054   // Holds updates by JVMTI agents for compiled frames that cannot be performed immediately. They
1055   // will be carried out as soon as possible, which, in most cases, is just before deoptimization of
1056   // the frame, when control returns to it.
1057   JvmtiDeferredUpdates* _jvmti_deferred_updates;

1058 
1059   // Handshake value for fixing 6243940. We need a place for the i2c
1060   // adapter to store the callee Method*. This value is NEVER live
1061   // across a gc point so it does NOT have to be gc'd
1062   // The handshake is open ended since we can't be certain that it will
1063   // be NULLed. This is because we rarely ever see the race and end up
1064   // in handle_wrong_method which is the backend of the handshake. See
1065   // code in i2c adapters and handle_wrong_method.
1066 
1067   Method*       _callee_target;
1068 
1069   // Used to pass back results to the interpreter or generated code running Java code.
1070   oop           _vm_result;    // oop result is GC-preserved
1071   Metadata*     _vm_result_2;  // non-oop result
1072 
1073   // See ReduceInitialCardMarks: this holds the precise space interval of
1074   // the most recent slow path allocation for which compiled code has
1075   // elided card-marks for performance along the fast-path.
1076   MemRegion     _deferred_card_mark;
1077 


1348     _handshake.set_operation(this, op);
1349   }
1350 
1351   bool has_handshake() const {
1352     return _handshake.has_operation();
1353   }
1354 
1355   void handshake_process_by_self() {
1356     _handshake.process_by_self(this);
1357   }
1358 
1359   void handshake_process_by_vmthread() {
1360     _handshake.process_by_vmthread(this);
1361   }
1362 
1363   // Suspend/resume support for JavaThread
1364  private:
1365   inline void set_ext_suspended();
1366   inline void clear_ext_suspended();
1367 
1368   // Synchronize with another thread (most likely a JVMTI agent) that is deoptimizing objects of the
1369   // current thread, i.e. reverts optimizations based on escape analysis.
1370   void wait_for_object_deoptimization();
1371 
1372  public:
1373   void java_suspend(); // higher-level suspension logic called by the public APIs
1374   void java_resume();  // higher-level resume logic called by the public APIs
1375   int  java_suspend_self(); // low-level self-suspension mechanics
1376 
1377  private:
1378   // mid-level wrapper around java_suspend_self to set up correct state and
1379   // check for a pending safepoint at the end
1380   void java_suspend_self_with_safepoint_check();
1381 
1382  public:
1383   void check_and_wait_while_suspended() {
1384     assert(JavaThread::current() == this, "sanity check");
1385 
1386     bool do_self_suspend;
1387     do {
1388       // were we externally suspended while we were waiting?
1389       do_self_suspend = handle_special_suspend_equivalent_condition();
1390       if (do_self_suspend) {
1391         // don't surprise the thread that suspended us by returning


1416   // We cannot allow wait_for_ext_suspend_completion() to run forever or
1417   // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
1418   // passed as the count and delay parameters. Experiments with specific
1419   // calls to wait_for_ext_suspend_completion() can be done by passing
1420   // other values in the code. Experiments with all calls can be done
1421   // via the appropriate -XX options.
1422   bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits);
1423 
1424   // test for suspend - most (all?) of these should go away
1425   bool is_thread_fully_suspended(bool wait_for_suspend, uint32_t *bits);
1426 
1427   inline void set_external_suspend();
1428   inline void clear_external_suspend();
1429 
1430   bool is_external_suspend() const {
1431     return (_suspend_flags & _external_suspend) != 0;
1432   }
1433   // Whenever a thread transitions from native to vm/java it must suspend
1434   // if external|deopt suspend is present.
1435   bool is_suspend_after_native() const {
1436     return (_suspend_flags & (_external_suspend | _ea_obj_deopt JFR_ONLY(| _trace_flag))) != 0;
1437   }
1438 
1439   // external suspend request is completed
1440   bool is_ext_suspended() const {
1441     return (_suspend_flags & _ext_suspended) != 0;
1442   }
1443 
1444   bool is_external_suspend_with_lock() const {
1445     MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1446     return is_external_suspend();
1447   }
1448 
1449   // Special method to handle a pending external suspend request
1450   // when a suspend equivalent condition lifts.
1451   bool handle_special_suspend_equivalent_condition() {
1452     assert(is_suspend_equivalent(),
1453            "should only be called in a suspend equivalence condition");
1454     MutexLocker ml(SR_lock(), Mutex::_no_safepoint_check_flag);
1455     bool ret = is_external_suspend();
1456     if (!ret) {


1489   bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); }
1490 
1491   void check_and_handle_async_exceptions(bool check_unsafe_error = true);
1492 
1493   // these next two are also used for self-suspension and async exception support
1494   void handle_special_runtime_exit_condition(bool check_asyncs = true);
1495 
1496   // Return true if JavaThread has an asynchronous condition or
1497   // if external suspension is requested.
1498   bool has_special_runtime_exit_condition() {
1499     // Because we don't use is_external_suspend_with_lock
1500     // it is possible that we won't see an asynchronous external suspend
1501     // request that has just gotten started, i.e., SR_lock grabbed but
1502     // _external_suspend field change either not made yet or not visible
1503     // yet. However, this is okay because the request is asynchronous and
1504     // we will see the new flag value the next time through. It's also
1505     // possible that the external suspend request is dropped after
1506     // we have checked is_external_suspend(), we will recheck its value
1507     // under SR_lock in java_suspend_self().
1508     return (_special_runtime_exit_condition != _no_async_condition) ||
1509             is_external_suspend() || is_trace_suspend() || is_ea_obj_deopt_suspend();
1510   }
1511 
1512   void set_pending_unsafe_access_error()          { _special_runtime_exit_condition = _async_unsafe_access_error; }
1513 
1514   inline void set_pending_async_exception(oop e);
1515 
1516   // Fast-locking support
1517   bool is_lock_owned(address adr) const;
1518 
1519   // Accessors for vframe array top
1520   // The linked list of vframe arrays are sorted on sp. This means when we
1521   // unpack the head must contain the vframe array to unpack.
1522   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
1523   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
1524 
1525   // Side structure for deferring update of java frame locals until deopt occurs
1526   JvmtiDeferredUpdates* deferred_updates() const { return _jvmti_deferred_updates; }
1527   void reset_deferred_updates()                  { _jvmti_deferred_updates = NULL; }
1528   void allocate_deferred_updates() {
1529     assert(_jvmti_deferred_updates == NULL, "already allocated");
1530     _jvmti_deferred_updates = new JvmtiDeferredUpdates();
1531   }
1532   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _jvmti_deferred_updates == NULL ? NULL : _jvmti_deferred_updates->deferred_locals(); }
1533 
1534   // Relocking has to be deferred, if the lock owning thread is currently waiting on the monitor.
1535   int get_and_reset_relock_count_after_wait() {
1536     return deferred_updates() == NULL ? 0 : deferred_updates()->get_and_reset_relock_count_after_wait();
1537   }
1538   void inc_relock_count_after_wait() {
1539     if (deferred_updates() == NULL) {
1540       allocate_deferred_updates();
1541     }
1542     deferred_updates()->inc_relock_count_after_wait();
1543   }
1544 
1545   // These only really exist to make debugging deopt problems simpler
1546 
1547   void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; }
1548   vframeArray* vframe_array_last() const         { return _vframe_array_last;  }
1549 
1550   // The special resourceMark used during deoptimization
1551 
1552   void set_deopt_mark(DeoptResourceMark* value)  { _deopt_mark = value; }
1553   DeoptResourceMark* deopt_mark(void)            { return _deopt_mark; }
1554 
1555   void set_deopt_compiled_method(CompiledMethod* nm)  { _deopt_nmethod = nm; }
1556   CompiledMethod* deopt_compiled_method()        { return _deopt_nmethod; }
1557 
1558   Method*    callee_target() const               { return _callee_target; }
1559   void set_callee_target  (Method* x)          { _callee_target   = x; }
1560 
1561   // Oop results of vm runtime calls
1562   oop  vm_result() const                         { return _vm_result; }
1563   void set_vm_result  (oop x)                    { _vm_result   = x; }


2125 // Dedicated thread to sweep the code cache
2126 class CodeCacheSweeperThread : public JavaThread {
2127   CompiledMethod*       _scanned_compiled_method; // nmethod being scanned by the sweeper
2128  public:
2129   CodeCacheSweeperThread();
2130   // Track the nmethod currently being scanned by the sweeper
2131   void set_scanned_compiled_method(CompiledMethod* cm) {
2132     assert(_scanned_compiled_method == NULL || cm == NULL, "should reset to NULL before writing a new value");
2133     _scanned_compiled_method = cm;
2134   }
2135 
2136   // Hide sweeper thread from external view.
2137   bool is_hidden_from_external_view() const { return true; }
2138 
2139   bool is_Code_cache_sweeper_thread() const { return true; }
2140 
2141   // Prevent GC from unloading _scanned_compiled_method
2142   void oops_do(OopClosure* f, CodeBlobClosure* cf);
2143   void nmethods_do(CodeBlobClosure* cf);
2144 };
2145 
2146 #if defined(ASSERT) && COMPILER2_OR_JVMCI
2147 // See Deoptimization::deoptimize_objects_alot_loop()
2148 class DeoptimizeObjectsALotThread : public JavaThread {
2149  public:
2150   DeoptimizeObjectsALotThread();
2151 };
2152 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI
2153 
2154 // A thread used for Compilation.
2155 class CompilerThread : public JavaThread {
2156   friend class VMStructs;
2157  private:
2158   CompilerCounters* _counters;
2159 
2160   ciEnv*                _env;
2161   CompileLog*           _log;
2162   CompileTask* volatile _task;  // print_threads_compiling can read this concurrently.
2163   CompileQueue*         _queue;
2164   BufferBlob*           _buffer_blob;
2165 
2166   AbstractCompiler*     _compiler;
2167   TimeStamp             _idle_time;
2168 
2169  public:
2170 
2171   static CompilerThread* current();
2172 


< prev index next >