src/share/vm/runtime/thread.hpp

Print this page




 419   OSThread* osthread() const                     { return _osthread;   }
 420   void set_osthread(OSThread* thread)            { _osthread = thread; }
 421 
 422   // JNI handle support
 423   JNIHandleBlock* active_handles() const         { return _active_handles; }
 424   void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
 425   JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
 426   void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
 427 
 428   // Internal handle support
 429   HandleArea* handle_area() const                { return _handle_area; }
 430   void set_handle_area(HandleArea* area)         { _handle_area = area; }
 431 
 432   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
 433   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
 434 
 435   // Thread-Local Allocation Buffer (TLAB) support
 436   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 437   void initialize_tlab() {
 438     if (UseTLAB) {
 439       tlab().initialize();
 440     }
 441   }
 442 
 443   jlong allocated_bytes()               { return _allocated_bytes; }
 444   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 445   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 446   jlong cooked_allocated_bytes() {
 447     jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
 448     if (UseTLAB) {
 449       size_t used_bytes = tlab().used_bytes();
 450       if ((ssize_t)used_bytes > 0) {
 451         // More-or-less valid tlab.  The load_acquire above should ensure
 452         // that the result of the add is <= the instantaneous value
 453         return allocated_bytes + used_bytes;
 454       }
 455     }
 456     return allocated_bytes;
 457   }
 458 
 459   TRACE_DATA* trace_data()              { return &_trace_data; }


 933 #endif // GRAAL
 934   StackGuardState        _stack_guard_state;
 935 
 936   nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
 937 
 938   // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
 939   // used to temp. parsing values into and out of the runtime system during exception handling for compiled
 940   // code)
 941   volatile oop     _exception_oop;               // Exception thrown in compiled code
 942   volatile address _exception_pc;                // PC where exception happened
 943   volatile address _exception_handler_pc;        // PC for handler of exception
 944   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 945 
 946 #ifdef GRAAL
 947   // Record the method and bci from a gpu kernel exception so
 948   // it can be added into the exception stack trace
 949   jint    _gpu_exception_bci;
 950   Method* _gpu_exception_method;
 951   // Record the hsailDeoptimization info so gc oops_do processing can find it
 952   void*   _gpu_hsail_deopt_info;


 953 #endif
 954 
 955  public:
 956 #ifdef GRAAL
 957   void set_gpu_exception_bci(jint bci)           { _gpu_exception_bci = bci; } 
 958   jint get_gpu_exception_bci()                   { return _gpu_exception_bci; }
 959   void set_gpu_exception_method(Method* method)  { _gpu_exception_method = method; }
 960   Method* get_gpu_exception_method()             { return _gpu_exception_method; }
 961   void set_gpu_hsail_deopt_info(void * deoptInfo) { _gpu_hsail_deopt_info = deoptInfo; }
 962   void* get_gpu_hsail_deopt_info()               { return _gpu_hsail_deopt_info; }






 963 #endif
 964   
 965  private:  
 966   // support for JNI critical regions
 967   jint    _jni_active_critical;                  // count of entries into JNI critical region
 968 
 969   // For deadlock detection.
 970   int _depth_first_number;
 971 
 972   // JVMTI PopFrame support
 973   // This is set to popframe_pending to signal that top Java frame should be popped immediately
 974   int _popframe_condition;
 975 
 976 #ifndef PRODUCT
 977   int _jmp_ring_index;
 978   struct {
 979       // We use intptr_t instead of address so debugger doesn't try and display strings
 980       intptr_t _target;
 981       intptr_t _instruction;
 982       const char*  _file;




 419   OSThread* osthread() const                     { return _osthread;   }
 420   void set_osthread(OSThread* thread)            { _osthread = thread; }
 421 
 422   // JNI handle support
 423   JNIHandleBlock* active_handles() const         { return _active_handles; }
 424   void set_active_handles(JNIHandleBlock* block) { _active_handles = block; }
 425   JNIHandleBlock* free_handle_block() const      { return _free_handle_block; }
 426   void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; }
 427 
 428   // Internal handle support
 429   HandleArea* handle_area() const                { return _handle_area; }
 430   void set_handle_area(HandleArea* area)         { _handle_area = area; }
 431 
 432   GrowableArray<Metadata*>* metadata_handles() const          { return _metadata_handles; }
 433   void set_metadata_handles(GrowableArray<Metadata*>* handles){ _metadata_handles = handles; }
 434 
 435   // Thread-Local Allocation Buffer (TLAB) support
 436   ThreadLocalAllocBuffer& tlab()                 { return _tlab; }
 437   void initialize_tlab() {
 438     if (UseTLAB) {
 439       tlab().initialize(this);
 440     }
 441   }
 442 
 443   jlong allocated_bytes()               { return _allocated_bytes; }
 444   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
 445   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
 446   jlong cooked_allocated_bytes() {
 447     jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
 448     if (UseTLAB) {
 449       size_t used_bytes = tlab().used_bytes();
 450       if ((ssize_t)used_bytes > 0) {
 451         // More-or-less valid tlab.  The load_acquire above should ensure
 452         // that the result of the add is <= the instantaneous value
 453         return allocated_bytes + used_bytes;
 454       }
 455     }
 456     return allocated_bytes;
 457   }
 458 
 459   TRACE_DATA* trace_data()              { return &_trace_data; }


 933 #endif // GRAAL
 934   StackGuardState        _stack_guard_state;
 935 
 936   nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
 937 
 938   // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
 939   // used to temp. parsing values into and out of the runtime system during exception handling for compiled
 940   // code)
 941   volatile oop     _exception_oop;               // Exception thrown in compiled code
 942   volatile address _exception_pc;                // PC where exception happened
 943   volatile address _exception_handler_pc;        // PC for handler of exception
 944   volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
 945 
 946 #ifdef GRAAL
 947   // Record the method and bci from a gpu kernel exception so
 948   // it can be added into the exception stack trace
 949   jint    _gpu_exception_bci;
 950   Method* _gpu_exception_method;
 951   // Record the hsailDeoptimization info so gc oops_do processing can find it
 952   void*   _gpu_hsail_deopt_info;
 953   jint    _gpu_hsail_tlabs_count;
 954   ThreadLocalAllocBuffer** _gpu_hsail_tlabs;
 955 #endif
 956 
 957  public:
 958 #ifdef GRAAL
 959   void set_gpu_exception_bci(jint bci)           { _gpu_exception_bci = bci; } 
 960   jint get_gpu_exception_bci()                   { return _gpu_exception_bci; }
 961   void set_gpu_exception_method(Method* method)  { _gpu_exception_method = method; }
 962   Method* get_gpu_exception_method()             { return _gpu_exception_method; }
 963   void set_gpu_hsail_deopt_info(void * deoptInfo) { _gpu_hsail_deopt_info = deoptInfo; }
 964   void* get_gpu_hsail_deopt_info()               { return _gpu_hsail_deopt_info; }
 965   jint  get_gpu_hsail_tlabs_count()              { return _gpu_hsail_tlabs_count; }
 966 
 967   void  initialize_gpu_hsail_tlabs(jint count);
 968   ThreadLocalAllocBuffer* get_gpu_hsail_tlab_at(jint idx);
 969   void gpu_hsail_tlabs_make_parsable(bool retire);
 970   void  delete_gpu_hsail_tlabs();
 971 #endif
 972   
 973  private:  
 974   // support for JNI critical regions
 975   jint    _jni_active_critical;                  // count of entries into JNI critical region
 976 
 977   // For deadlock detection.
 978   int _depth_first_number;
 979 
 980   // JVMTI PopFrame support
 981   // This is set to popframe_pending to signal that top Java frame should be popped immediately
 982   int _popframe_condition;
 983 
 984 #ifndef PRODUCT
 985   int _jmp_ring_index;
 986   struct {
 987       // We use intptr_t instead of address so debugger doesn't try and display strings
 988       intptr_t _target;
 989       intptr_t _instruction;
 990       const char*  _file;