< prev index next >

src/share/vm/opto/callnode.cpp

Print this page


   1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/oopMap.hpp"
  28 #include "opto/callGenerator.hpp"
  29 #include "opto/callnode.hpp"
  30 #include "opto/castnode.hpp"
  31 #include "opto/convertnode.hpp"
  32 #include "opto/escape.hpp"
  33 #include "opto/locknode.hpp"
  34 #include "opto/machnode.hpp"
  35 #include "opto/matcher.hpp"
  36 #include "opto/parse.hpp"
  37 #include "opto/regalloc.hpp"
  38 #include "opto/regmask.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 
  42 // Portions of code courtesy of Clifford Click
  43 
  44 // Optimization - Graph Style
  45 


1656   Node *result = SafePointNode::Ideal(phase, can_reshape);
1657   if (result != NULL)  return result;
1658   // Don't bother trying to transform a dead node
1659   if (in(0) && in(0)->is_top())  return NULL;
1660 
1661   // Now see if we can optimize away this lock.  We don't actually
1662   // remove the locking here, we simply set the _eliminate flag which
1663   // prevents macro expansion from expanding the lock.  Since we don't
1664   // modify the graph, the value returned from this function is the
1665   // one computed above.
1666   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1667     //
1668     // If we are locking an unescaped object, the lock/unlock is unnecessary
1669     //
1670     ConnectionGraph *cgr = phase->C->congraph();
1671     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1672       assert(!is_eliminated() || is_coarsened(), "sanity");
1673       // The lock could be marked eliminated by lock coarsening
1674       // code during first IGVN before EA. Replace coarsened flag
1675       // to eliminate all associated locks/unlocks.



1676       this->set_non_esc_obj();
1677       return result;
1678     }
1679 
1680     //
1681     // Try lock coarsening
1682     //
1683     PhaseIterGVN* iter = phase->is_IterGVN();
1684     if (iter != NULL && !is_eliminated()) {
1685 
1686       GrowableArray<AbstractLockNode*>   lock_ops;
1687 
1688       Node *ctrl = next_control(in(0));
1689 
1690       // now search back for a matching Unlock
1691       if (find_matching_unlock(ctrl, this, lock_ops)) {
1692         // found an unlock directly preceding this lock.  This is the
1693         // case of single unlock directly control dependent on a
1694         // single lock which is the trivial version of case 1 or 2.
1695       } else if (ctrl->is_Region() ) {


1717           for (int i = 0; i < lock_ops.length(); i++) {
1718             AbstractLockNode* lock = lock_ops.at(i);
1719             if (lock->Opcode() == Op_Lock)
1720               locks++;
1721             else
1722               unlocks++;
1723             if (Verbose) {
1724               lock->dump(1);
1725             }
1726           }
1727           tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1728         }
1729   #endif
1730 
1731         // for each of the identified locks, mark them
1732         // as eliminatable
1733         for (int i = 0; i < lock_ops.length(); i++) {
1734           AbstractLockNode* lock = lock_ops.at(i);
1735 
1736           // Mark it eliminated by coarsening and update any counters



1737           lock->set_coarsened();
1738         }
1739       } else if (ctrl->is_Region() &&
1740                  iter->_worklist.member(ctrl)) {
1741         // We weren't able to find any opportunities but the region this
1742         // lock is control dependent on hasn't been processed yet so put
1743         // this lock back on the worklist so we can check again once any
1744         // region simplification has occurred.
1745         iter->_worklist.push(this);
1746       }
1747     }
1748   }
1749 
1750   return result;
1751 }
1752 
1753 //=============================================================================
1754 bool LockNode::is_nested_lock_region() {
1755   BoxLockNode* box = box_node()->as_BoxLock();
1756   int stk_slot = box->stack_slot();


1767 
1768   // Look for external lock for the same object.
1769   SafePointNode* sfn = this->as_SafePoint();
1770   JVMState* youngest_jvms = sfn->jvms();
1771   int max_depth = youngest_jvms->depth();
1772   for (int depth = 1; depth <= max_depth; depth++) {
1773     JVMState* jvms = youngest_jvms->of_depth(depth);
1774     int num_mon  = jvms->nof_monitors();
1775     // Loop over monitors
1776     for (int idx = 0; idx < num_mon; idx++) {
1777       Node* obj_node = sfn->monitor_obj(jvms, idx);
1778       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1779       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1780         return true;
1781       }
1782     }
1783   }
1784   return false;
1785 }
1786 










































1787 //=============================================================================
1788 uint UnlockNode::size_of() const { return sizeof(*this); }
1789 
1790 //=============================================================================
1791 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1792 
1793   // perform any generic optimizations first (returns 'this' or NULL)
1794   Node *result = SafePointNode::Ideal(phase, can_reshape);
1795   if (result != NULL)  return result;
1796   // Don't bother trying to transform a dead node
1797   if (in(0) && in(0)->is_top())  return NULL;
1798 
1799   // Now see if we can optimize away this unlock.  We don't actually
1800   // remove the unlocking here, we simply set the _eliminate flag which
1801   // prevents macro expansion from expanding the unlock.  Since we don't
1802   // modify the graph, the value returned from this function is the
1803   // one computed above.
1804   // Escape state is defined after Parse phase.
1805   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1806     //
1807     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1808     //
1809     ConnectionGraph *cgr = phase->C->congraph();
1810     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1811       assert(!is_eliminated() || is_coarsened(), "sanity");
1812       // The lock could be marked eliminated by lock coarsening
1813       // code during first IGVN before EA. Replace coarsened flag
1814       // to eliminate all associated locks/unlocks.



1815       this->set_non_esc_obj();
1816     }
1817   }
1818   return result;
1819 }
1820 

























1821 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
1822   : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
1823     _alloc_tightly_coupled(alloc_tightly_coupled),
1824     _kind(None),
1825     _arguments_validated(false) {
1826   init_class_id(Class_ArrayCopy);
1827   init_flags(Flag_is_macro);
1828   C->add_macro_node(this);
1829 }
1830 
1831 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
1832 
1833 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
1834                                    Node* src, Node* src_offset,
1835                                    Node* dest, Node* dest_offset,
1836                                    Node* length,
1837                                    bool alloc_tightly_coupled,
1838                                    Node* src_klass, Node* dest_klass,
1839                                    Node* src_length, Node* dest_length) {
1840 


   1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "ci/bcEscapeAnalyzer.hpp"
  28 #include "compiler/oopMap.hpp"
  29 #include "opto/callGenerator.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/escape.hpp"
  34 #include "opto/locknode.hpp"
  35 #include "opto/machnode.hpp"
  36 #include "opto/matcher.hpp"
  37 #include "opto/parse.hpp"
  38 #include "opto/regalloc.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 
  43 // Portions of code courtesy of Clifford Click
  44 
  45 // Optimization - Graph Style
  46 


1657   Node *result = SafePointNode::Ideal(phase, can_reshape);
1658   if (result != NULL)  return result;
1659   // Don't bother trying to transform a dead node
1660   if (in(0) && in(0)->is_top())  return NULL;
1661 
1662   // Now see if we can optimize away this lock.  We don't actually
1663   // remove the locking here, we simply set the _eliminate flag which
1664   // prevents macro expansion from expanding the lock.  Since we don't
1665   // modify the graph, the value returned from this function is the
1666   // one computed above.
1667   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1668     //
1669     // If we are locking an unescaped object, the lock/unlock is unnecessary
1670     //
1671     ConnectionGraph *cgr = phase->C->congraph();
1672     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1673       assert(!is_eliminated() || is_coarsened(), "sanity");
1674       // The lock could be marked eliminated by lock coarsening
1675       // code during first IGVN before EA. Replace coarsened flag
1676       // to eliminate all associated locks/unlocks.
1677 #ifdef ASSERT
1678       this->log_lock_optimization(phase,"eliminate_lock_set_non_esc1");
1679 #endif
1680       this->set_non_esc_obj();
1681       return result;
1682     }
1683 
1684     //
1685     // Try lock coarsening
1686     //
1687     PhaseIterGVN* iter = phase->is_IterGVN();
1688     if (iter != NULL && !is_eliminated()) {
1689 
1690       GrowableArray<AbstractLockNode*>   lock_ops;
1691 
1692       Node *ctrl = next_control(in(0));
1693 
1694       // now search back for a matching Unlock
1695       if (find_matching_unlock(ctrl, this, lock_ops)) {
1696         // found an unlock directly preceding this lock.  This is the
1697         // case of single unlock directly control dependent on a
1698         // single lock which is the trivial version of case 1 or 2.
1699       } else if (ctrl->is_Region() ) {


1721           for (int i = 0; i < lock_ops.length(); i++) {
1722             AbstractLockNode* lock = lock_ops.at(i);
1723             if (lock->Opcode() == Op_Lock)
1724               locks++;
1725             else
1726               unlocks++;
1727             if (Verbose) {
1728               lock->dump(1);
1729             }
1730           }
1731           tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1732         }
1733   #endif
1734 
1735         // for each of the identified locks, mark them
1736         // as eliminatable
1737         for (int i = 0; i < lock_ops.length(); i++) {
1738           AbstractLockNode* lock = lock_ops.at(i);
1739 
1740           // Mark it eliminated by coarsening and update any counters
1741 #ifdef ASSERT
1742           lock->log_lock_optimization(phase, "eliminate_lock_set_coarsened");
1743 #endif
1744           lock->set_coarsened();
1745         }
1746       } else if (ctrl->is_Region() &&
1747                  iter->_worklist.member(ctrl)) {
1748         // We weren't able to find any opportunities but the region this
1749         // lock is control dependent on hasn't been processed yet so put
1750         // this lock back on the worklist so we can check again once any
1751         // region simplification has occurred.
1752         iter->_worklist.push(this);
1753       }
1754     }
1755   }
1756 
1757   return result;
1758 }
1759 
1760 //=============================================================================
1761 bool LockNode::is_nested_lock_region() {
1762   BoxLockNode* box = box_node()->as_BoxLock();
1763   int stk_slot = box->stack_slot();


1774 
1775   // Look for external lock for the same object.
1776   SafePointNode* sfn = this->as_SafePoint();
1777   JVMState* youngest_jvms = sfn->jvms();
1778   int max_depth = youngest_jvms->depth();
1779   for (int depth = 1; depth <= max_depth; depth++) {
1780     JVMState* jvms = youngest_jvms->of_depth(depth);
1781     int num_mon  = jvms->nof_monitors();
1782     // Loop over monitors
1783     for (int idx = 0; idx < num_mon; idx++) {
1784       Node* obj_node = sfn->monitor_obj(jvms, idx);
1785       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1786       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1787         return true;
1788       }
1789     }
1790   }
1791   return false;
1792 }
1793 
1794 #ifdef ASSERT
1795 bool LockNode::is_nested_lock_region_debug(Phase * p) {
1796   BoxLockNode* box = box_node()->as_BoxLock();
1797   int stk_slot = box->stack_slot();
1798   if (stk_slot <= 0) {
1799     this->log_lock_optimization(p, "eliminate_lock_INLR_1");
1800     return false; // External lock or it is not Box (Phi node).
1801   }
1802 
1803   // Ignore complex cases: merged locks or multiple locks.
1804   Node* obj = obj_node();
1805   LockNode* unique_lock = NULL;
1806   if (!box->is_simple_lock_region(&unique_lock, obj)) {
1807     this->log_lock_optimization(p, "eliminate_lock_INLR_2a");
1808     return false;
1809   }
1810   if (unique_lock != this) {
1811     this->log_lock_optimization(p, "eliminate_lock_INLR_2b");
1812     return false;
1813   }
1814 
1815   // Look for external lock for the same object.
1816   SafePointNode* sfn = this->as_SafePoint();
1817   JVMState* youngest_jvms = sfn->jvms();
1818   int max_depth = youngest_jvms->depth();
1819   for (int depth = 1; depth <= max_depth; depth++) {
1820     JVMState* jvms = youngest_jvms->of_depth(depth);
1821     int num_mon  = jvms->nof_monitors();
1822     // Loop over monitors
1823     for (int idx = 0; idx < num_mon; idx++) {
1824       Node* obj_node = sfn->monitor_obj(jvms, idx);
1825       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1826       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1827         return true;
1828       }
1829     }
1830   }
1831   this->log_lock_optimization(p, "eliminate_lock_INLR_3");
1832   return false;
1833 }
1834 #endif
1835 
1836 //=============================================================================
1837 uint UnlockNode::size_of() const { return sizeof(*this); }
1838 
1839 //=============================================================================
1840 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1841 
1842   // perform any generic optimizations first (returns 'this' or NULL)
1843   Node *result = SafePointNode::Ideal(phase, can_reshape);
1844   if (result != NULL)  return result;
1845   // Don't bother trying to transform a dead node
1846   if (in(0) && in(0)->is_top())  return NULL;
1847 
1848   // Now see if we can optimize away this unlock.  We don't actually
1849   // remove the unlocking here, we simply set the _eliminate flag which
1850   // prevents macro expansion from expanding the unlock.  Since we don't
1851   // modify the graph, the value returned from this function is the
1852   // one computed above.
1853   // Escape state is defined after Parse phase.
1854   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1855     //
1856     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1857     //
1858     ConnectionGraph *cgr = phase->C->congraph();
1859     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1860       assert(!is_eliminated() || is_coarsened(), "sanity");
1861       // The lock could be marked eliminated by lock coarsening
1862       // code during first IGVN before EA. Replace coarsened flag
1863       // to eliminate all associated locks/unlocks.
1864 #ifdef ASSERT
1865       this->log_lock_optimization(phase, "eliminate_lock_set_non_esc2");
1866 #endif
1867       this->set_non_esc_obj();
1868     }
1869   }
1870   return result;
1871 }
1872 
1873 const char * AbstractLockNode::kind_as_string() const {
1874       return is_coarsened() ? "coarsened" :
1875                   is_nested() ? "nested" :
1876                       is_non_esc_obj() ? "non_escaping" : "?";
1877 }
1878 
1879 void AbstractLockNode::log_lock_optimization(Phase *phase, const char * tag)  const {
1880     Compile * C = phase->C;
1881     CompileLog* log = C->log();
1882     if (log != NULL) {
1883       log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'",
1884             tag, is_Lock(), C->compile_id(),
1885             is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?",
1886             kind_as_string());
1887       log->stamp();
1888       log->end_head();
1889       JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms();
1890       while (p != NULL) {
1891         log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
1892         p = p->caller();
1893       }
1894       log->tail(tag);
1895     }
1896 }
1897 
1898 ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled)
1899   : CallNode(arraycopy_type(), NULL, TypeRawPtr::BOTTOM),
1900     _alloc_tightly_coupled(alloc_tightly_coupled),
1901     _kind(None),
1902     _arguments_validated(false) {
1903   init_class_id(Class_ArrayCopy);
1904   init_flags(Flag_is_macro);
1905   C->add_macro_node(this);
1906 }
1907 
1908 uint ArrayCopyNode::size_of() const { return sizeof(*this); }
1909 
1910 ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
1911                                    Node* src, Node* src_offset,
1912                                    Node* dest, Node* dest_offset,
1913                                    Node* length,
1914                                    bool alloc_tightly_coupled,
1915                                    Node* src_klass, Node* dest_klass,
1916                                    Node* src_length, Node* dest_length) {
1917 


< prev index next >