1 /*
   2  * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "opto/locknode.hpp"
  27 #include "opto/parse.hpp"
  28 #include "opto/rootnode.hpp"
  29 #include "opto/runtime.hpp"
  30 
  31 //=============================================================================
  32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
  33   return _inmask;
  34 }
  35 
  36 const RegMask &BoxLockNode::out_RegMask() const {
  37   return *Matcher::idealreg2regmask[Op_RegP];
  38 }
  39 
  40 uint BoxLockNode::size_of() const { return sizeof(*this); }
  41 
  42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
  43                                        _slot(slot), _is_eliminated(false) {
  44   init_class_id(Class_BoxLock);
  45   init_flags(Flag_rematerialize);
  46   OptoReg::Name reg = OptoReg::stack2reg(_slot);
  47   _inmask.Insert(reg);
  48 }
  49 
  50 //-----------------------------hash--------------------------------------------
  51 uint BoxLockNode::hash() const {
  52   if (EliminateNestedLocks)
  53     return NO_HASH; // Each locked region has own BoxLock node
  54   return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
  55 }
  56 
  57 //------------------------------cmp--------------------------------------------
  58 uint BoxLockNode::cmp( const Node &n ) const {
  59   if (EliminateNestedLocks)
  60     return (&n == this); // Always fail except on self
  61   const BoxLockNode &bn = (const BoxLockNode &)n;
  62   return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
  63 }
  64 
  65 BoxLockNode* BoxLockNode::box_node(Node* box) {
  66   // Chase down the BoxNode after RA which may spill box nodes.
  67   while (!box->is_BoxLock()) {
  68     //    if (box_node->is_SpillCopy()) {
  69     //      Node *m = box_node->in(1);
  70     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
  71     //        box_node = m->in(m->as_Mach()->operand_index(2));
  72     //        continue;
  73     //      }
  74     //    }
  75     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
  76     // Only BoxLock nodes with the same stack slot are merged.
  77     // So it is enough to trace one path to find the slot value.
  78     box = box->in(1);
  79   }
  80   return box->as_BoxLock();
  81 }
  82 
  83 OptoReg::Name BoxLockNode::reg(Node* box) {
  84   return box_node(box)->in_RegMask(0).find_first_elem();
  85 }
  86 
  87 // Is BoxLock node used for one simple lock region (same box and obj)?
  88 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj) {
  89   LockNode* lock = NULL;
  90   bool has_one_lock = false;
  91   for (uint i = 0; i < this->outcnt(); i++) {
  92     Node* n = this->raw_out(i);
  93     assert(!n->is_Phi(), "should not merge BoxLock nodes");
  94     if (n->is_AbstractLock()) {
  95       AbstractLockNode* alock = n->as_AbstractLock();
  96       // Check lock's box since box could be referenced by Lock's debug info.
  97       if (alock->box_node() == this) {
  98         if (alock->obj_node()->eqv_uncast(obj)) {
  99           if ((unique_lock != NULL) && alock->is_Lock()) {
 100             if (lock == NULL) {
 101               lock = alock->as_Lock();
 102               has_one_lock = true;
 103             } else if (lock != alock->as_Lock()) {
 104               has_one_lock = false;
 105             }
 106           }
 107         } else {
 108           return false; // Different objects
 109         }
 110       }
 111     }
 112   }
 113 #ifdef ASSERT
 114   // Verify that FastLock and Safepoint reference only this lock region.
 115   for (uint i = 0; i < this->outcnt(); i++) {
 116     Node* n = this->raw_out(i);
 117     if (n->is_FastLock()) {
 118       FastLockNode* flock = n->as_FastLock();
 119       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
 120     }
 121     // Don't check monitor info in safepoints since the referenced object could
 122     // be different from the locked object. It could be Phi node of different
 123     // cast nodes which point to this locked object.
 124     // We assume that no other objects could be referenced in monitor info
 125     // associated with this BoxLock node because all associated locks and
 126     // unlocks are reference only this one object.
 127   }
 128 #endif
 129   if (unique_lock != NULL && has_one_lock) {
 130     *unique_lock = lock;
 131   }
 132   return true;
 133 }
 134 
 135 //=============================================================================
 136 //-----------------------------hash--------------------------------------------
 137 uint FastLockNode::hash() const { return NO_HASH; }
 138 
 139 //------------------------------cmp--------------------------------------------
 140 uint FastLockNode::cmp( const Node &n ) const {
 141   return (&n == this);                // Always fail except on self
 142 }
 143 
 144 //=============================================================================
 145 //-----------------------------hash--------------------------------------------
 146 uint FastUnlockNode::hash() const { return NO_HASH; }
 147 
 148 //------------------------------cmp--------------------------------------------
 149 uint FastUnlockNode::cmp( const Node &n ) const {
 150   return (&n == this);                // Always fail except on self
 151 }
 152 
 153 //
 154 // Create a counter which counts the number of times this lock is acquired
 155 //
 156 void FastLockNode::create_lock_counter(JVMState* state) {
 157   BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*)
 158            OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter);
 159   _counters = blnc->counters();
 160 }
 161 
 162 //=============================================================================
 163 //------------------------------do_monitor_enter-------------------------------
 164 void Parse::do_monitor_enter() {
 165   kill_dead_locals();
 166 
 167   // Null check; get casted pointer.
 168   Node* obj = null_check(peek());
 169   // Check for locking null object
 170   if (stopped()) return;
 171 
 172   // the monitor object is not part of debug info expression stack
 173   pop();
 174 
 175   // Insert a FastLockNode which takes as arguments the current thread pointer,
 176   // the obj pointer & the address of the stack slot pair used for the lock.
 177   shared_lock(obj);
 178 }
 179 
 180 //------------------------------do_monitor_exit--------------------------------
 181 void Parse::do_monitor_exit() {
 182   kill_dead_locals();
 183 
 184   pop();                        // Pop oop to unlock
 185   // Because monitors are guaranteed paired (else we bail out), we know
 186   // the matching Lock for this Unlock.  Hence we know there is no need
 187   // for a null check on Unlock.
 188   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
 189 }