< prev index next >

src/hotspot/share/utilities/globalCounter.inline.hpp

Print this page




  23  */
  24 
  25 #ifndef SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP
  26 #define SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/orderAccess.hpp"
  30 #include "runtime/thread.inline.hpp"
  31 #include "utilities/globalCounter.hpp"
  32 
  33 inline GlobalCounter::CSContext
  34 GlobalCounter::critical_section_begin(Thread *thread) {
  35   assert(thread == Thread::current(), "must be current thread");
  36   uintx old_cnt = Atomic::load(thread->get_rcu_counter());
  37   // Retain the old counter value if already active, e.g. nested.
  38   // Otherwise, set the counter to the current version + active bit.
  39   uintx new_cnt = old_cnt;
  40   if ((new_cnt & COUNTER_ACTIVE) == 0) {
  41     new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE;
  42   }
  43   OrderAccess::release_store_fence(thread->get_rcu_counter(), new_cnt);
  44   return static_cast<CSContext>(old_cnt);
  45 }
  46 
  47 inline void
  48 GlobalCounter::critical_section_end(Thread *thread, CSContext context) {
  49   assert(thread == Thread::current(), "must be current thread");
  50   assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
  51   // Restore the counter value from before the associated begin.
  52   OrderAccess::release_store(thread->get_rcu_counter(),
  53                              static_cast<uintx>(context));
  54 }
  55 
  56 class GlobalCounter::CriticalSection {
  57  private:
  58   Thread* _thread;
  59   CSContext _context;
  60  public:
  61   inline CriticalSection(Thread* thread) :
  62     _thread(thread),
  63     _context(GlobalCounter::critical_section_begin(_thread))
  64   {}
  65 
  66   inline  ~CriticalSection() {
  67     GlobalCounter::critical_section_end(_thread, _context);
  68   }
  69 };
  70 
  71 #endif // SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP


  23  */
  24 
  25 #ifndef SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP
  26 #define SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/orderAccess.hpp"
  30 #include "runtime/thread.inline.hpp"
  31 #include "utilities/globalCounter.hpp"
  32 
  33 inline GlobalCounter::CSContext
  34 GlobalCounter::critical_section_begin(Thread *thread) {
  35   assert(thread == Thread::current(), "must be current thread");
  36   uintx old_cnt = Atomic::load(thread->get_rcu_counter());
  37   // Retain the old counter value if already active, e.g. nested.
  38   // Otherwise, set the counter to the current version + active bit.
  39   uintx new_cnt = old_cnt;
  40   if ((new_cnt & COUNTER_ACTIVE) == 0) {
  41     new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE;
  42   }
  43   Atomic::release_store_fence(thread->get_rcu_counter(), new_cnt);
  44   return static_cast<CSContext>(old_cnt);
  45 }
  46 
  47 inline void
  48 GlobalCounter::critical_section_end(Thread *thread, CSContext context) {
  49   assert(thread == Thread::current(), "must be current thread");
  50   assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
  51   // Restore the counter value from before the associated begin.
  52   Atomic::release_store(thread->get_rcu_counter(),
  53                         static_cast<uintx>(context));
  54 }
  55 
  56 class GlobalCounter::CriticalSection {
  57  private:
  58   Thread* _thread;
  59   CSContext _context;
  60  public:
  61   inline CriticalSection(Thread* thread) :
  62     _thread(thread),
  63     _context(GlobalCounter::critical_section_begin(_thread))
  64   {}
  65 
  66   inline  ~CriticalSection() {
  67     GlobalCounter::critical_section_end(_thread, _context);
  68   }
  69 };
  70 
  71 #endif // SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP
< prev index next >