< prev index next >

src/hotspot/share/runtime/thread.inline.hpp

Print this page
rev 60137 : 8227745: Enable Escape Analysis for Better Performance in the Presence of JVMTI Agents
Reviewed-by: mdoerr, goetz
rev 60138 : 8227745: delta webrev.5 -> webrev.6


  48 }
  49 
  50 inline void Thread::set_has_async_exception() {
  51   set_suspend_flag(_has_async_exception);
  52 }
  53 inline void Thread::clear_has_async_exception() {
  54   clear_suspend_flag(_has_async_exception);
  55 }
  56 inline void Thread::set_critical_native_unlock() {
  57   set_suspend_flag(_critical_native_unlock);
  58 }
  59 inline void Thread::clear_critical_native_unlock() {
  60   clear_suspend_flag(_critical_native_unlock);
  61 }
  62 inline void Thread::set_trace_flag() {
  63   set_suspend_flag(_trace_flag);
  64 }
  65 inline void Thread::clear_trace_flag() {
  66   clear_suspend_flag(_trace_flag);
  67 }
  68 inline void Thread::set_ea_obj_deopt_flag() {
  69   set_suspend_flag(_ea_obj_deopt);
  70 }
  71 inline void Thread::clear_ea_obj_deopt_flag() {
  72   clear_suspend_flag(_ea_obj_deopt);
  73 }
  74 
  75 inline jlong Thread::cooked_allocated_bytes() {
  76   jlong allocated_bytes = Atomic::load_acquire(&_allocated_bytes);
  77   if (UseTLAB) {
  78     size_t used_bytes = tlab().used_bytes();
  79     if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
  80       // Comparing used_bytes with the maximum allowed size will ensure
  81       // that we don't add the used bytes from a semi-initialized TLAB
  82       // ending up with incorrect values. There is still a race between
  83       // incrementing _allocated_bytes and clearing the TLAB, that might
  84       // cause double counting in rare cases.
  85       return allocated_bytes + used_bytes;
  86     }
  87   }
  88   return allocated_bytes;
  89 }
  90 
  91 inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) {
  92   return (ThreadsList*)Atomic::cmpxchg(&_threads_hazard_ptr, compare_value, exchange_value);




  48 }
  49 
  50 inline void Thread::set_has_async_exception() {
  51   set_suspend_flag(_has_async_exception);
  52 }
  53 inline void Thread::clear_has_async_exception() {
  54   clear_suspend_flag(_has_async_exception);
  55 }
  56 inline void Thread::set_critical_native_unlock() {
  57   set_suspend_flag(_critical_native_unlock);
  58 }
  59 inline void Thread::clear_critical_native_unlock() {
  60   clear_suspend_flag(_critical_native_unlock);
  61 }
  62 inline void Thread::set_trace_flag() {
  63   set_suspend_flag(_trace_flag);
  64 }
  65 inline void Thread::clear_trace_flag() {
  66   clear_suspend_flag(_trace_flag);
  67 }
  68 inline void Thread::set_obj_deopt_flag() {
  69   set_suspend_flag(_obj_deopt);
  70 }
  71 inline void Thread::clear_obj_deopt_flag() {
  72   clear_suspend_flag(_obj_deopt);
  73 }
  74 
  75 inline jlong Thread::cooked_allocated_bytes() {
  76   jlong allocated_bytes = Atomic::load_acquire(&_allocated_bytes);
  77   if (UseTLAB) {
  78     size_t used_bytes = tlab().used_bytes();
  79     if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
  80       // Comparing used_bytes with the maximum allowed size will ensure
  81       // that we don't add the used bytes from a semi-initialized TLAB
  82       // ending up with incorrect values. There is still a race between
  83       // incrementing _allocated_bytes and clearing the TLAB, that might
  84       // cause double counting in rare cases.
  85       return allocated_bytes + used_bytes;
  86     }
  87   }
  88   return allocated_bytes;
  89 }
  90 
  91 inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) {
  92   return (ThreadsList*)Atomic::cmpxchg(&_threads_hazard_ptr, compare_value, exchange_value);


< prev index next >