1 /*
  2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nmethod.hpp"
 28 #include "code/nmethodBarrier.hpp"
 29 #include "gc/shared/barrierSet.hpp"
 30 #include "logging/log.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "runtime/atomic.hpp"
 33 #include "runtime/orderAccess.hpp"
 34 #include "runtime/thread.hpp"
 35 #include "utilities/debug.hpp"
 36 #include "utilities/spinYield.hpp"
 37 
 38 bool nmethodBarrierState::is_safe() const {
 39   CounterUnion twin_value;
 40   twin_value._value = Atomic::load(&_state);
 41   return twin_value._counters._lock > 0;
 42 }
 43 
 44 bool nmethodBarrierState::enter() {
 45   while (true) {
 46     CounterUnion old;
 47     old._value = Atomic::load(&_state);
 48 
 49     CounterUnion next;
 50     next._value = old._value;
 51 
 52     int disarmed_value = nmethodBarrier::disarmed_value();
 53 
 54     if (old._counters._epoch != disarmed_value) {
 55       next._counters._epoch = disarmed_value;
 56       next._counters._lock = 1;
 57     } else if (old._counters._lock == 0) {
 58       return false;
 59     } else {
 60       ++next._counters._lock;
 61     }
 62 
 63     assert(next._counters._lock > 0, "check");
 64 
 65     if (Atomic::cmpxchg(next._value, &_state, old._value) == old._value) {
 66       return true;
 67     }
 68   }
 69 }
 70 
 71 void nmethodBarrierState::leave() {
 72   while (true) {
 73     CounterUnion old;
 74     old._value = Atomic::load(&_state);
 75 
 76     CounterUnion next;
 77     next._value = old._value;
 78 
 79     --next._counters._lock;
 80 
 81     if (Atomic::cmpxchg(next._value, &_state, old._value) == old._value) {
 82       return;
 83     }
 84   }
 85 }
 86 
 87 void nmethodBarrierState::wait() {
 88   SpinYield yield;
 89   CounterUnion old;
 90   for (;;) {
 91     old._value = Atomic::load(&_state);
 92     if (old._counters._lock == 0) {
 93       return;
 94     }
 95     yield.wait();
 96   }
 97 }
 98 
 99 nmethodBarrier::nmethodBarrier(nmethod* nm)
100   : _is_deoptimized(false),
101     _nm(nm),
102     _return_address_ptr(NULL) { }
103 
104 nmethodBarrier::nmethodBarrier(nmethod* nm, address* return_address_ptr)
105   : _is_deoptimized(false),
106     _nm(nm),
107     _return_address_ptr(return_address_ptr) {
108 }
109 
110 int nmethodBarrier::enter(address* return_address_ptr) {
111   BarrierSet* bs = BarrierSet::barrier_set();
112   address return_address = *return_address_ptr;
113   CodeBlob* cb = CodeCache::find_blob(return_address);
114   assert(cb != NULL, "invariant");
115   assert(bs->needs_nmethod_entry_barrier(), "how did we get here?");
116 
117   nmethod* nm = cb->as_nmethod();
118   nmethodBarrier nmbarrier(nm, return_address_ptr);
119   return bs->on_nmethod_entry_barrier(nm, &nmbarrier) ? 0 : 1;
120 }
121 
122 void nmethodBarrier::initialize(nmethod* nm) {
123   BarrierSet* bs = BarrierSet::barrier_set();
124   if (bs->needs_nmethod_entry_barrier()) {
125     nm->disarm_barrier();
126   }
127 }
128 
129 void nmethodBarrier::disarm() {
130   _nm->disarm_barrier();
131   // multiple threads can enter the barrier at the same time while armed
132   // one can finish and decide that the barrier can be disarmed, in the meantime the other
133   // have made the same decision continued on and deoptimized on something else.
134   // in that case the assert here might fail.
135   assert(!_is_deoptimized, "can only disarm methods that didn't deoptimize!");
136 }
137 
138 int nmethodBarrier::disarmed_value() {
139   BarrierSet* bs = BarrierSet::barrier_set();
140   Thread* thread = Thread::current();
141   int* state_addr = reinterpret_cast<int*>(reinterpret_cast<char*>(thread) +
142                                            in_bytes(bs->nmethod_entry_barrier_state_thread_offset()));
143   return *state_addr;
144 }