< prev index next >

src/share/vm/opto/lcm.cpp

Print this page
rev 9821 : 8146612: C2: Precedence edges specification violated
Reviewed-by:
   1 /*
   2  * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 932             if (oop_store != NULL) {
 933               assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
 934             }
 935           }
 936         }
 937       }
 938 #endif
 939 
 940       // A few node types require changing a required edge to a precedence edge
 941       // before allocation.
 942       if( n->is_Mach() && n->req() > TypeFunc::Parms &&
 943           (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
 944            n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
 945         // MemBarAcquire could be created without Precedent edge.
 946         // del_req() replaces the specified edge with the last input edge
 947         // and then removes the last edge. If the specified edge > number of
 948         // edges the last edge will be moved outside of the input edges array
 949         // and the edge will be lost. This is why this code should be
 950         // executed only when Precedent (== TypeFunc::Parms) edge is present.
 951         Node *x = n->in(TypeFunc::Parms);







 952         n->del_req(TypeFunc::Parms);
 953         n->add_prec(x);
 954       }
 955     }
 956   }
 957   for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
 958     ready_cnt.at_put(block->get_node(i2)->_idx, 0);
 959 
 960   // All the prescheduled guys do not hold back internal nodes
 961   uint i3;
 962   for (i3 = 0; i3 < phi_cnt; i3++) {  // For all pre-scheduled
 963     Node *n = block->get_node(i3);       // Get pre-scheduled
 964     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 965       Node* m = n->fast_out(j);
 966       if (get_block_for_node(m) == block) { // Local-block user
 967         int m_cnt = ready_cnt.at(m->_idx)-1;
 968         if (OptoRegScheduling && block_size_threshold_ok) {
 969           // mark m as scheduled
 970           if (m_cnt < 0) {
 971             m->add_flag(Node::Flag_is_scheduled);


   1 /*
   2  * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


 932             if (oop_store != NULL) {
 933               assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
 934             }
 935           }
 936         }
 937       }
 938 #endif
 939 
 940       // A few node types require changing a required edge to a precedence edge
 941       // before allocation.
 942       if( n->is_Mach() && n->req() > TypeFunc::Parms &&
 943           (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire ||
 944            n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) {
 945         // MemBarAcquire could be created without Precedent edge.
 946         // del_req() replaces the specified edge with the last input edge
 947         // and then removes the last edge. If the specified edge > number of
 948         // edges the last edge will be moved outside of the input edges array
 949         // and the edge will be lost. This is why this code should be
 950         // executed only when Precedent (== TypeFunc::Parms) edge is present.
 951         Node *x = n->in(TypeFunc::Parms);
 952         if (x != NULL && get_block_for_node(x) == block && n->find_prec_edge(x) != -1) {
 953           // Old edge to node within same block will get removed, but no precedence
 954           // edge will get added because it already exists. Update ready count.
 955           int cnt = ready_cnt.at(n->_idx);
 956           assert(cnt > 1, "MemBar node %d must not get ready here", n->_idx);
 957           ready_cnt.at_put(n->_idx, cnt-1);
 958         }
 959         n->del_req(TypeFunc::Parms);
 960         n->add_prec(x);
 961       }
 962     }
 963   }
 964   for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
 965     ready_cnt.at_put(block->get_node(i2)->_idx, 0);
 966 
 967   // All the prescheduled guys do not hold back internal nodes
 968   uint i3;
 969   for (i3 = 0; i3 < phi_cnt; i3++) {  // For all pre-scheduled
 970     Node *n = block->get_node(i3);       // Get pre-scheduled
 971     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 972       Node* m = n->fast_out(j);
 973       if (get_block_for_node(m) == block) { // Local-block user
 974         int m_cnt = ready_cnt.at(m->_idx)-1;
 975         if (OptoRegScheduling && block_size_threshold_ok) {
 976           // mark m as scheduled
 977           if (m_cnt < 0) {
 978             m->add_flag(Node::Flag_is_scheduled);


< prev index next >