< prev index next >

src/share/vm/runtime/thread.hpp

Print this page

        

@@ -86,10 +86,13 @@
 template <class T, MEMFLAGS F> class ChunkedList;
 typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
 
 DEBUG_ONLY(class ResourceMark;)
 
+class TraceBuffer;
+class CachedTraceStack;
+
 class WorkerThread;
 
 // Class hierarchy
 // - Thread
 //   - NamedThread

@@ -260,10 +263,18 @@
                                                 // the Java heap
 
   // Thread-local buffer used by MetadataOnStackMark.
   MetadataOnStackBuffer* _metadata_on_stack_buffer;
 
+  TraceBuffer *_trace_buffer;
+  debug_only(bool _trace_active;)
+  intptr_t     _park_last_global_seq;
+  int          _park_priority;
+  int          _nesting_level;
+  address      _memento_original_return_address;
+  const CachedTraceStack* _memento_stack_trace;
+
   TRACE_DATA _trace_data;                       // Thread-local data for tracing
 
   ThreadExt _ext;
 
   int   _vm_operation_started_count;            // VM_Operation support

@@ -310,10 +321,11 @@
 
   // Testers
   virtual bool is_VM_thread()       const            { return false; }
   virtual bool is_Java_thread()     const            { return false; }
   virtual bool is_Compiler_thread() const            { return false; }
+  virtual bool is_TraceReader_thread() const         { return false; }
   virtual bool is_hidden_from_external_view() const  { return false; }
   virtual bool is_jvmti_agent_thread() const         { return false; }
   // True iff the thread can perform GC operations at a safepoint.
   // Generally will be true only of VM thread and parallel GC WorkGang
   // threads.

@@ -441,10 +453,25 @@
   jlong allocated_bytes()               { return _allocated_bytes; }
   void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
   void incr_allocated_bytes(jlong size) { _allocated_bytes += size; }
   inline jlong cooked_allocated_bytes();
 
+  TraceBuffer *trace_buffer()            { return _trace_buffer; }
+  void set_trace_buffer(TraceBuffer *b)  { _trace_buffer = b;    }
+  DEBUG_ONLY(bool trace_active()         { return _trace_active; })
+  DEBUG_ONLY(void toggle_trace_active()  { _trace_active = !_trace_active; })
+  void set_park_last_global_seq(intptr_t seq) { _park_last_global_seq = seq; }
+  static ByteSize park_last_global_seq_offset() { return byte_offset_of(Thread, _park_last_global_seq); }
+  int park_priority()                    { return _park_priority; }
+  static ByteSize park_priority_offset() { return byte_offset_of(Thread, _park_priority); }
+  int nesting_level()                    { return _nesting_level; }
+  static ByteSize nesting_level_offset() { return byte_offset_of(Thread, _nesting_level); }
+  address& memento_original_return_address()               { return _memento_original_return_address; }
+  static ByteSize memento_original_return_address_offset() { return byte_offset_of(Thread, _memento_original_return_address); }
+  const CachedTraceStack *memento_stack_trace()            { return _memento_stack_trace; }
+  void set_memento_stack_trace(const CachedTraceStack *ts) { _memento_stack_trace = ts;   }
+
   TRACE_DATA* trace_data()              { return &_trace_data; }
 
   const ThreadExt& ext() const          { return _ext; }
   ThreadExt& ext()                      { return _ext; }
 
< prev index next >