< prev index next >

src/share/vm/opto/callnode.cpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/oopMap.hpp"
  28 #include "opto/callGenerator.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/convertnode.hpp"
  32 #include "opto/escape.hpp"
  33 #include "opto/locknode.hpp"
  34 #include "opto/machnode.hpp"
  35 #include "opto/matcher.hpp"
  36 #include "opto/parse.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/regmask.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 
  42 // Portions of code courtesy of Clifford Click
  43 
  44 // Optimization - Graph Style
  45 


1656   Node *result = SafePointNode::Ideal(phase, can_reshape);
1657   if (result != NULL)  return result;
1658   // Don't bother trying to transform a dead node
1659   if (in(0) && in(0)->is_top())  return NULL;
1660 
1661   // Now see if we can optimize away this lock.  We don't actually
1662   // remove the locking here, we simply set the _eliminate flag which
1663   // prevents macro expansion from expanding the lock.  Since we don't
1664   // modify the graph, the value returned from this function is the
1665   // one computed above.
1666   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1667     //
1668     // If we are locking an unescaped object, the lock/unlock is unnecessary
1669     //
1670     ConnectionGraph *cgr = phase->C->congraph();
1671     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1672       assert(!is_eliminated() || is_coarsened(), "sanity");
1673       // The lock could be marked eliminated by lock coarsening
1674       // code during first IGVN before EA. Replace coarsened flag
1675       // to eliminate all associated locks/unlocks.



1676       this->set_non_esc_obj();
1677       return result;
1678     }
1679 
1680     //
1681     // Try lock coarsening
1682     //
1683     PhaseIterGVN* iter = phase->is_IterGVN();
1684     if (iter != NULL && !is_eliminated()) {
1685 
1686       GrowableArray<AbstractLockNode*>   lock_ops;
1687 
1688       Node *ctrl = next_control(in(0));
1689 
1690       // now search back for a matching Unlock
1691       if (find_matching_unlock(ctrl, this, lock_ops)) {
1692         // found an unlock directly preceding this lock.  This is the
1693         // case of single unlock directly control dependent on a
1694         // single lock which is the trivial version of case 1 or 2.
1695       } else if (ctrl->is_Region() ) {


1717           for (int i = 0; i < lock_ops.length(); i++) {
1718             AbstractLockNode* lock = lock_ops.at(i);
1719             if (lock->Opcode() == Op_Lock)
1720               locks++;
1721             else
1722               unlocks++;
1723             if (Verbose) {
1724               lock->dump(1);
1725             }
1726           }
1727           tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1728         }
1729   #endif
1730 
1731         // for each of the identified locks, mark them
1732         // as eliminatable
1733         for (int i = 0; i < lock_ops.length(); i++) {
1734           AbstractLockNode* lock = lock_ops.at(i);
1735 
1736           // Mark it eliminated by coarsening and update any counters



1737           lock->set_coarsened();
1738         }
1739       } else if (ctrl->is_Region() &&
1740                  iter->_worklist.member(ctrl)) {
1741         // We weren't able to find any opportunities but the region this
1742         // lock is control dependent on hasn't been processed yet so put
1743         // this lock back on the worklist so we can check again once any
1744         // region simplification has occurred.
1745         iter->_worklist.push(this);
1746       }
1747     }
1748   }
1749 
1750   return result;
1751 }
1752 
1753 //=============================================================================
1754 bool LockNode::is_nested_lock_region() {





1755   BoxLockNode* box = box_node()->as_BoxLock();
1756   int stk_slot = box->stack_slot();
1757   if (stk_slot <= 0)



1758     return false; // External lock or it is not Box (Phi node).

1759 
1760   // Ignore complex cases: merged locks or multiple locks.
1761   Node* obj = obj_node();
1762   LockNode* unique_lock = NULL;
1763   if (!box->is_simple_lock_region(&unique_lock, obj) ||
1764       (unique_lock != this)) {








1765     return false;
1766   }
1767 
1768   // Look for external lock for the same object.
1769   SafePointNode* sfn = this->as_SafePoint();
1770   JVMState* youngest_jvms = sfn->jvms();
1771   int max_depth = youngest_jvms->depth();
1772   for (int depth = 1; depth <= max_depth; depth++) {
1773     JVMState* jvms = youngest_jvms->of_depth(depth);
1774     int num_mon  = jvms->nof_monitors();
1775     // Loop over monitors
1776     for (int idx = 0; idx < num_mon; idx++) {
1777       Node* obj_node = sfn->monitor_obj(jvms, idx);
1778       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1779       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1780         return true;
1781       }
1782     }
1783   }



1784   return false;
1785 }
1786 
1787 //=============================================================================
1788 uint UnlockNode::size_of() const { return sizeof(*this); }
1789 
1790 //=============================================================================
1791 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1792 
1793   // perform any generic optimizations first (returns 'this' or NULL)
1794   Node *result = SafePointNode::Ideal(phase, can_reshape);
1795   if (result != NULL)  return result;
1796   // Don't bother trying to transform a dead node
1797   if (in(0) && in(0)->is_top())  return NULL;
1798 
1799   // Now see if we can optimize away this unlock.  We don't actually
1800   // remove the unlocking here, we simply set the _eliminate flag which
1801   // prevents macro expansion from expanding the unlock.  Since we don't
1802   // modify the graph, the value returned from this function is the
1803   // one computed above.
1804   // Escape state is defined after Parse phase.
1805   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1806     //
1807     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1808     //
1809     ConnectionGraph *cgr = phase->C->congraph();
1810     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1811       assert(!is_eliminated() || is_coarsened(), "sanity");
1812       // The lock could be marked eliminated by lock coarsening
1813       // code during first IGVN before EA. Replace coarsened flag
1814       // to eliminate all associated locks/unlocks.



1815       this->set_non_esc_obj();
1816     }
1817   }
1818   return result;
1819 }
1820 





























1821 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
1822   : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
1823     _alloc_tightly_coupled(alloc_tightly_coupled),
1824     _kind(None),
1825     _arguments_validated(false) {
1826   init_class_id(Class_ArrayCopy);
1827   init_flags(Flag_is_macro);
1828   C->add_macro_node(this);
1829 }
1830 
1831 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
1832 
1833 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
1834                                    Node* src, Node* src_offset,
1835                                    Node* dest, Node* dest_offset,
1836                                    Node* length,
1837                                    bool alloc_tightly_coupled,
1838                                    Node* src_klass, Node* dest_klass,
1839                                    Node* src_length, Node* dest_length) {
1840 


   1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "opto/callGenerator.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/escape.hpp"
  34 #include "opto/locknode.hpp"
  35 #include "opto/machnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/parse.hpp"
  38 #include "opto/regalloc.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 
  43 // Portions of code courtesy of Clifford Click
  44 
  45 // Optimization - Graph Style
  46 


1657   Node *result = SafePointNode::Ideal(phase, can_reshape);
1658   if (result != NULL)  return result;
1659   // Don't bother trying to transform a dead node
1660   if (in(0) && in(0)->is_top())  return NULL;
1661 
1662   // Now see if we can optimize away this lock.  We don't actually
1663   // remove the locking here, we simply set the _eliminate flag which
1664   // prevents macro expansion from expanding the lock.  Since we don't
1665   // modify the graph, the value returned from this function is the
1666   // one computed above.
1667   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1668     //
1669     // If we are locking an unescaped object, the lock/unlock is unnecessary
1670     //
1671     ConnectionGraph *cgr = phase->C->congraph();
1672     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1673       assert(!is_eliminated() || is_coarsened(), "sanity");
1674       // The lock could be marked eliminated by lock coarsening
1675       // code during first IGVN before EA. Replace coarsened flag
1676       // to eliminate all associated locks/unlocks.
1677 #ifdef ASSERT
1678       this->log_lock_optimization(phase,"eliminate_lock_set_non_esc1");
1679 #endif
1680       this->set_non_esc_obj();
1681       return result;
1682     }
1683 
1684     //
1685     // Try lock coarsening
1686     //
1687     PhaseIterGVN* iter = phase->is_IterGVN();
1688     if (iter != NULL && !is_eliminated()) {
1689 
1690       GrowableArray<AbstractLockNode*>   lock_ops;
1691 
1692       Node *ctrl = next_control(in(0));
1693 
1694       // now search back for a matching Unlock
1695       if (find_matching_unlock(ctrl, this, lock_ops)) {
1696         // found an unlock directly preceding this lock.  This is the
1697         // case of single unlock directly control dependent on a
1698         // single lock which is the trivial version of case 1 or 2.
1699       } else if (ctrl->is_Region() ) {


1721           for (int i = 0; i < lock_ops.length(); i++) {
1722             AbstractLockNode* lock = lock_ops.at(i);
1723             if (lock->Opcode() == Op_Lock)
1724               locks++;
1725             else
1726               unlocks++;
1727             if (Verbose) {
1728               lock->dump(1);
1729             }
1730           }
1731           tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1732         }
1733   #endif
1734 
1735         // for each of the identified locks, mark them
1736         // as eliminatable
1737         for (int i = 0; i < lock_ops.length(); i++) {
1738           AbstractLockNode* lock = lock_ops.at(i);
1739 
1740           // Mark it eliminated by coarsening and update any counters
1741 #ifdef ASSERT
1742           lock->log_lock_optimization(phase, "eliminate_lock_set_coarsened");
1743 #endif
1744           lock->set_coarsened();
1745         }
1746       } else if (ctrl->is_Region() &&
1747                  iter->_worklist.member(ctrl)) {
1748         // We weren't able to find any opportunities but the region this
1749         // lock is control dependent on hasn't been processed yet so put
1750         // this lock back on the worklist so we can check again once any
1751         // region simplification has occurred.
1752         iter->_worklist.push(this);
1753       }
1754     }
1755   }
1756 
1757   return result;
1758 }
1759 
1760 //=============================================================================
1761 bool LockNode::is_nested_lock_region() {
1762   return is_nested_lock_region(NULL);
1763 }
1764 
1765 // p is used for access to compilation log; no logging if NULL
1766 bool LockNode::is_nested_lock_region(Phase * p) {
1767   BoxLockNode* box = box_node()->as_BoxLock();
1768   int stk_slot = box->stack_slot();
1769   if (stk_slot <= 0) {
1770 #ifdef ASSERT
1771     this->log_lock_optimization(p, "eliminate_lock_INLR_1");
1772 #endif
1773     return false; // External lock or it is not Box (Phi node).
1774   }
1775 
1776   // Ignore complex cases: merged locks or multiple locks.
1777   Node* obj = obj_node();
1778   LockNode* unique_lock = NULL;
1779   if (!box->is_simple_lock_region(&unique_lock, obj)) {
1780 #ifdef ASSERT
1781     this->log_lock_optimization(p, "eliminate_lock_INLR_2a");
1782 #endif
1783     return false;
1784   }
1785   if (unique_lock != this) {
1786 #ifdef ASSERT
1787     this->log_lock_optimization(p, "eliminate_lock_INLR_2b");
1788 #endif
1789     return false;
1790   }
1791 
1792   // Look for external lock for the same object.
1793   SafePointNode* sfn = this->as_SafePoint();
1794   JVMState* youngest_jvms = sfn->jvms();
1795   int max_depth = youngest_jvms->depth();
1796   for (int depth = 1; depth <= max_depth; depth++) {
1797     JVMState* jvms = youngest_jvms->of_depth(depth);
1798     int num_mon  = jvms->nof_monitors();
1799     // Loop over monitors
1800     for (int idx = 0; idx < num_mon; idx++) {
1801       Node* obj_node = sfn->monitor_obj(jvms, idx);
1802       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1803       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1804         return true;
1805       }
1806     }
1807   }
1808 #ifdef ASSERT
1809   this->log_lock_optimization(p, "eliminate_lock_INLR_3");
1810 #endif
1811   return false;
1812 }
1813 
1814 //=============================================================================
1815 uint UnlockNode::size_of() const { return sizeof(*this); }
1816 
1817 //=============================================================================
1818 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1819 
1820   // perform any generic optimizations first (returns 'this' or NULL)
1821   Node *result = SafePointNode::Ideal(phase, can_reshape);
1822   if (result != NULL)  return result;
1823   // Don't bother trying to transform a dead node
1824   if (in(0) && in(0)->is_top())  return NULL;
1825 
1826   // Now see if we can optimize away this unlock.  We don't actually
1827   // remove the unlocking here, we simply set the _eliminate flag which
1828   // prevents macro expansion from expanding the unlock.  Since we don't
1829   // modify the graph, the value returned from this function is the
1830   // one computed above.
1831   // Escape state is defined after Parse phase.
1832   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1833     //
1834     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1835     //
1836     ConnectionGraph *cgr = phase->C->congraph();
1837     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1838       assert(!is_eliminated() || is_coarsened(), "sanity");
1839       // The lock could be marked eliminated by lock coarsening
1840       // code during first IGVN before EA. Replace coarsened flag
1841       // to eliminate all associated locks/unlocks.
1842 #ifdef ASSERT
1843       this->log_lock_optimization(phase, "eliminate_lock_set_non_esc2");
1844 #endif
1845       this->set_non_esc_obj();
1846     }
1847   }
1848   return result;
1849 }
1850 
1851 const char * AbstractLockNode::kind_as_string() const {
1852   return is_coarsened()   ? "coarsened" :
1853          is_nested()      ? "nested" :
1854          is_non_esc_obj() ? "non_escaping" :
1855          "?";
1856 }
1857 
1858 void AbstractLockNode::log_lock_optimization(Phase *phase, const char * tag)  const {
1859   if (phase == NULL) {
1860     return;
1861   }
1862   Compile * C = phase->C;
1863   CompileLog* log = C->log();
1864   if (log != NULL) {
1865     log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'",
1866           tag, is_Lock(), C->compile_id(),
1867           is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
1868           kind_as_string());
1869     log->stamp();
1870     log->end_head();
1871     JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
1872     while (p != NULL) {
1873       log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1874       p = p->caller();
1875     }
1876     log->tail(tag);
1877   }
1878 }
1879 
1880 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
1881   : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
1882     _alloc_tightly_coupled(alloc_tightly_coupled),
1883     _kind(None),
1884     _arguments_validated(false) {
1885   init_class_id(Class_ArrayCopy);
1886   init_flags(Flag_is_macro);
1887   C->add_macro_node(this);
1888 }
1889 
1890 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
1891 
1892 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
1893                                    Node* src, Node* src_offset,
1894                                    Node* dest, Node* dest_offset,
1895                                    Node* length,
1896                                    bool alloc_tightly_coupled,
1897                                    Node* src_klass, Node* dest_klass,
1898                                    Node* src_length, Node* dest_length) {
1899 


< prev index next >