1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCache.hpp"
  27 #include "code/nmethod.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetNMethod.hpp"
  30 #include "logging/log.hpp"
  31 #include "runtime/thread.hpp"
  32 #include "utilities/debug.hpp"
  33 
  34 int BarrierSetNMethod::disarmed_value() const {
  35   return *disarmed_value_address();
  36 }
  37 
  38 bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
  39   if (nm->method()->is_method_handle_intrinsic()) {
  40     return false;
  41   }
  42 
  43   if (!nm->is_native_method() && !nm->is_compiled_by_c2() && !nm->is_compiled_by_c1()) {
  44     return false;
  45   }
  46 
  47   return true;
  48 }
  49 
  50 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
  51   address return_address = *return_address_ptr;
  52   CodeBlob* cb = CodeCache::find_blob(return_address);
  53   assert(cb != NULL, "invariant");
  54 
  55   nmethod* nm = cb->as_nmethod();
  56   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
  57 
  58   if (!bs_nm->is_armed(nm)) {
  59     return 0;
  60   }
  61 
  62   assert(!nm->is_osr_method(), "Should not reach here");
  63   // Called upon first entry after being armed
  64   bool may_enter = bs_nm->nmethod_entry_barrier(nm);
  65 
  66   // Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
  67   // a very rare event.
  68   if (DeoptimizeNMethodBarriersALot) {
  69     static volatile uint32_t counter=0;
  70     if (Atomic::add(&counter, 1u) % 3 == 0) {
  71       may_enter = false;
  72     }
  73   }
  74 
  75   if (!may_enter) {
  76     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
  77     bs_nm->deoptimize(nm, return_address_ptr);
  78   }
  79   return may_enter ? 0 : 1;
  80 }
  81 
  82 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
  83   // This check depends on the invariant that all nmethods that are deoptimized / made not entrant
  84   // are NOT disarmed.
  85   // This invariant is important because a method can be deoptimized after the method have been
  86   // resolved / looked up by OSR by another thread. By not deoptimizing them we guarantee that
  87   // a deoptimized method will always hit the barrier and come to the same conclusion - deoptimize
  88   if (!is_armed(nm)) {
  89     return true;
  90   }
  91 
  92   assert(nm->is_osr_method(), "Should not reach here");
  93   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
  94   return nmethod_entry_barrier(nm);
  95 }