< prev index next >

src/hotspot/share/utilities/globalCounter.cpp

Print this page




  24 
  25 #include "precompiled.hpp"
  26 #include "utilities/globalCounter.hpp"
  27 #include "runtime/orderAccess.hpp"
  28 #include "runtime/thread.hpp"
  29 #include "runtime/threadSMR.inline.hpp"
  30 #include "runtime/vmThread.hpp"
  31 #include "utilities/spinYield.hpp"
  32 
  33 GlobalCounter::PaddedCounter GlobalCounter::_global_counter;
  34 
  35 class GlobalCounter::CounterThreadCheck : public ThreadClosure {
  36  private:
  37   uintx _gbl_cnt;
  38  public:
  39   CounterThreadCheck(uintx gbl_cnt) : _gbl_cnt(gbl_cnt) {}
  40   void do_thread(Thread* thread) {
  41     SpinYield yield;
  42     // Loops on this thread until it has exited the critical read section.
  43     while(true) {
  44       uintx cnt = OrderAccess::load_acquire(thread->get_rcu_counter());
  45       // This checks if the thread's counter is active. And if so is the counter
  46       // for a pre-existing reader (belongs to this grace period). A pre-existing
  47       // reader will have a lower counter than the global counter version for this
  48       // generation. If the counter is larger than the global counter version this
  49       //  is a new reader and we can continue.
  50       if (((cnt & COUNTER_ACTIVE) != 0) && (cnt - _gbl_cnt) > (max_uintx / 2)) {
  51         yield.wait();
  52       } else {
  53         break;
  54       }
  55     }
  56   }
  57 };
  58 
  59 void GlobalCounter::write_synchronize() {
  60   assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
  61   // Atomic::add must provide fence since we have storeload dependency.
  62   uintx gbl_cnt = Atomic::add(COUNTER_INCREMENT, &_global_counter._counter);
  63 
  64   // Do all RCU threads.


  24 
  25 #include "precompiled.hpp"
  26 #include "utilities/globalCounter.hpp"
  27 #include "runtime/orderAccess.hpp"
  28 #include "runtime/thread.hpp"
  29 #include "runtime/threadSMR.inline.hpp"
  30 #include "runtime/vmThread.hpp"
  31 #include "utilities/spinYield.hpp"
  32 
  33 GlobalCounter::PaddedCounter GlobalCounter::_global_counter;
  34 
  35 class GlobalCounter::CounterThreadCheck : public ThreadClosure {
  36  private:
  37   uintx _gbl_cnt;
  38  public:
  39   CounterThreadCheck(uintx gbl_cnt) : _gbl_cnt(gbl_cnt) {}
  40   void do_thread(Thread* thread) {
  41     SpinYield yield;
  42     // Loops on this thread until it has exited the critical read section.
  43     while(true) {
  44       uintx cnt = Atomic::load_acquire(thread->get_rcu_counter());
  45       // This checks if the thread's counter is active. And if so is the counter
  46       // for a pre-existing reader (belongs to this grace period). A pre-existing
  47       // reader will have a lower counter than the global counter version for this
  48       // generation. If the counter is larger than the global counter version this
  49       //  is a new reader and we can continue.
  50       if (((cnt & COUNTER_ACTIVE) != 0) && (cnt - _gbl_cnt) > (max_uintx / 2)) {
  51         yield.wait();
  52       } else {
  53         break;
  54       }
  55     }
  56   }
  57 };
  58 
  59 void GlobalCounter::write_synchronize() {
  60   assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
  61   // Atomic::add must provide fence since we have storeload dependency.
  62   uintx gbl_cnt = Atomic::add(COUNTER_INCREMENT, &_global_counter._counter);
  63 
  64   // Do all RCU threads.
< prev index next >