1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP
  26 #define SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/orderAccess.hpp"
  30 #include "runtime/thread.inline.hpp"
  31 #include "utilities/globalCounter.hpp"
  32 
  33 inline GlobalCounter::CSContext
  34 GlobalCounter::critical_section_begin(Thread *thread) {
  35   assert(thread == Thread::current(), "must be current thread");
  36   uintx old_cnt = Atomic::load(thread->get_rcu_counter());
  37   // Retain the old counter value if already active, e.g. nested.
  38   // Otherwise, set the counter to the current version + active bit.
  39   uintx new_cnt = old_cnt;
  40   if ((new_cnt & COUNTER_ACTIVE) == 0) {
  41     new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE;
  42   }
  43   Atomic::release_store_fence(thread->get_rcu_counter(), new_cnt);
  44   return static_cast<CSContext>(old_cnt);
  45 }
  46 
  47 inline void
  48 GlobalCounter::critical_section_end(Thread *thread, CSContext context) {
  49   assert(thread == Thread::current(), "must be current thread");
  50   assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
  51   // Restore the counter value from before the associated begin.
  52   Atomic::release_store(thread->get_rcu_counter(),
  53                         static_cast<uintx>(context));
  54 }
  55 
  56 class GlobalCounter::CriticalSection {
  57  private:
  58   Thread* _thread;
  59   CSContext _context;
  60  public:
  61   inline CriticalSection(Thread* thread) :
  62     _thread(thread),
  63     _context(GlobalCounter::critical_section_begin(_thread))
  64   {}
  65 
  66   inline  ~CriticalSection() {
  67     GlobalCounter::critical_section_end(_thread, _context);
  68   }
  69 };
  70 
  71 #endif // SHARE_UTILITIES_GLOBALCOUNTER_INLINE_HPP