< prev index next >
src/hotspot/share/opto/matcher.cpp
Print this page
rev 52490 : 8213745: Don't use memset to initialize array of RegMask in matcher.cpp
rev 52491 : 8213746: GC/C2 abstraction for C2 matcher
@@ -21,10 +21,12 @@
* questions.
*
*/
#include "precompiled.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "opto/ad.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
@@ -39,13 +41,10 @@
#include "opto/type.hpp"
#include "opto/vectornode.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/align.hpp"
-#if INCLUDE_ZGC
-#include "gc/z/zBarrierSetRuntime.hpp"
-#endif // INCLUDE_ZGC
OptoReg::Name OptoReg::c_frame_pointer;
const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
RegMask Matcher::mreg2regmask[_last_Mach_Reg];
@@ -2069,12 +2068,95 @@
if (nstate == Visit) {
mstack.set_state(Post_Visit);
set_visited(n); // Flag as visited now
bool mem_op = false;
int mem_addr_idx = MemNode::Address;
+ bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_visit(this, mstack, n, nop, mem_op, mem_addr_idx);
+ if (!gc_handled) {
+ if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
+ continue;
+ }
+ }
+ for(int i = n->req() - 1; i >= 0; --i) { // For my children
+ Node *m = n->in(i); // Get ith input
+ if (m == NULL) continue; // Ignore NULLs
+ uint mop = m->Opcode();
+
+ // Must clone all producers of flags, or we will not match correctly.
+ // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
+ // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
+ // are also there, so we may match a float-branch to int-flags and
+ // expect the allocator to haul the flags from the int-side to the
+ // fp-side. No can do.
+ if( _must_clone[mop] ) {
+ mstack.push(m, Visit);
+ continue; // for(int i = ...)
+ }
+
+ if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) {
+ // Bases used in addresses must be shared but since
+ // they are shared through a DecodeN they may appear
+ // to have a single use so force sharing here.
+ set_shared(m->in(AddPNode::Base)->in(1));
+ }
+
+ // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
+#ifdef X86
+ if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
+ mstack.push(m, Visit);
+ continue;
+ }
+#endif
+
+ // Clone addressing expressions as they are "free" in memory access instructions
+ if (mem_op && i == mem_addr_idx && mop == Op_AddP &&
+ // When there are other uses besides address expressions
+ // put it on stack and mark as shared.
+ !is_visited(m)) {
+ // Some inputs for address expression are not put on stack
+ // to avoid marking them as shared and forcing them into register
+ // if they are used only in address expressions.
+ // But they should be marked as shared if there are other uses
+ // besides address expressions.
+
+ if (clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
+ continue;
+ }
+ } // if( mem_op &&
+ mstack.push(m, Pre_Visit);
+ } // for(int i = ...)
+ }
+ else if (nstate == Alt_Post_Visit) {
+ mstack.pop(); // Remove node from stack
+ // We cannot remove the Cmp input from the Bool here, as the Bool may be
+ // shared and all users of the Bool need to move the Cmp in parallel.
+ // This leaves both the Bool and the If pointing at the Cmp. To
+ // prevent the Matcher from trying to Match the Cmp along both paths
+ // BoolNode::match_edge always returns a zero.
+
+ // We reorder the Op_If in a pre-order manner, so we can visit without
+ // accidentally sharing the Cmp (the Bool and the If make 2 users).
+ n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
+ }
+ else if (nstate == Post_Visit) {
+ mstack.pop(); // Remove node from stack
- switch( nop ) { // Handle some opcodes special
+ // Now hack a few special opcodes
+ uint opcode = n->Opcode();
+ bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
+ if (!gc_handled) {
+ find_shared_post_visit(n, opcode);
+ }
+ }
+ else {
+ ShouldNotReachHere();
+ }
+ } // end of while (mstack.is_nonempty())
+}
+
+bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
+ switch(opcode) { // Handle some opcodes special
case Op_Phi: // Treat Phis as shared roots
case Op_Parm:
case Op_Proj: // All handled specially during matching
case Op_SafePointScalarObject:
set_shared(n);
@@ -2088,11 +2170,11 @@
// Bool and CmpX side-by-side, because it can only get at constants
// that are at the leaves of Match trees, and the Bool's condition acts
// as a constant here.
mstack.push(n->in(1), Visit); // Clone the Bool
mstack.push(n->in(0), Pre_Visit); // Visit control input
- continue; // while (mstack.is_nonempty())
+ return true; // while (mstack.is_nonempty())
case Op_ConvI2D: // These forms efficiently match with a prior
case Op_ConvI2F: // Load but not a following Store
if( n->in(1)->is_Load() && // Prior load
n->outcnt() == 1 && // Not already shared
n->unique_out()->is_Store() ) // Following store
@@ -2118,11 +2200,11 @@
set_dontcare(n);
break;
case Op_Jump:
mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared)
mstack.push(n->in(0), Pre_Visit); // Visit Control input
- continue; // while (mstack.is_nonempty())
+ return true; // while (mstack.is_nonempty())
case Op_StrComp:
case Op_StrEquals:
case Op_StrIndexOf:
case Op_StrIndexOfChar:
case Op_AryEq:
@@ -2157,21 +2239,10 @@
break;
case Op_ClearArray:
case Op_SafePoint:
mem_op = true;
break;
-#if INCLUDE_ZGC
- case Op_CallLeaf:
- if (UseZGC) {
- if (n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr() ||
- n->as_Call()->entry_point() == ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr()) {
- mem_op = true;
- mem_addr_idx = TypeFunc::Parms+1;
- }
- break;
- }
-#endif
default:
if( n->is_Store() ) {
// Do match stores, despite no ideal reg
mem_op = true;
break;
@@ -2184,77 +2255,15 @@
}
// Fall into default case
if( !n->ideal_reg() )
set_dontcare(n); // Unmatchable Nodes
} // end_switch
+ return false;
+}
- for(int i = n->req() - 1; i >= 0; --i) { // For my children
- Node *m = n->in(i); // Get ith input
- if (m == NULL) continue; // Ignore NULLs
- uint mop = m->Opcode();
-
- // Must clone all producers of flags, or we will not match correctly.
- // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
- // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
- // are also there, so we may match a float-branch to int-flags and
- // expect the allocator to haul the flags from the int-side to the
- // fp-side. No can do.
- if( _must_clone[mop] ) {
- mstack.push(m, Visit);
- continue; // for(int i = ...)
- }
-
- if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) {
- // Bases used in addresses must be shared but since
- // they are shared through a DecodeN they may appear
- // to have a single use so force sharing here.
- set_shared(m->in(AddPNode::Base)->in(1));
- }
-
- // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
-#ifdef X86
- if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
- mstack.push(m, Visit);
- continue;
- }
-#endif
-
- // Clone addressing expressions as they are "free" in memory access instructions
- if (mem_op && i == mem_addr_idx && mop == Op_AddP &&
- // When there are other uses besides address expressions
- // put it on stack and mark as shared.
- !is_visited(m)) {
- // Some inputs for address expression are not put on stack
- // to avoid marking them as shared and forcing them into register
- // if they are used only in address expressions.
- // But they should be marked as shared if there are other uses
- // besides address expressions.
-
- if (clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
- continue;
- }
- } // if( mem_op &&
- mstack.push(m, Pre_Visit);
- } // for(int i = ...)
- }
- else if (nstate == Alt_Post_Visit) {
- mstack.pop(); // Remove node from stack
- // We cannot remove the Cmp input from the Bool here, as the Bool may be
- // shared and all users of the Bool need to move the Cmp in parallel.
- // This leaves both the Bool and the If pointing at the Cmp. To
- // prevent the Matcher from trying to Match the Cmp along both paths
- // BoolNode::match_edge always returns a zero.
-
- // We reorder the Op_If in a pre-order manner, so we can visit without
- // accidentally sharing the Cmp (the Bool and the If make 2 users).
- n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
- }
- else if (nstate == Post_Visit) {
- mstack.pop(); // Remove node from stack
-
- // Now hack a few special opcodes
- switch( n->Opcode() ) { // Handle some opcodes special
+void Matcher::find_shared_post_visit(Node* n, uint opcode) {
+ switch(opcode) { // Handle some opcodes special
case Op_StorePConditional:
case Op_StoreIConditional:
case Op_StoreLConditional:
case Op_CompareAndExchangeB:
case Op_CompareAndExchangeS:
@@ -2272,14 +2281,14 @@
case Op_CompareAndSwapS:
case Op_CompareAndSwapI:
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN: { // Convert trinary to binary-tree
- Node *newval = n->in(MemNode::ValueIn );
- Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
- Node *pair = new BinaryNode( oldval, newval );
- n->set_req(MemNode::ValueIn,pair);
+ Node* newval = n->in(MemNode::ValueIn);
+ Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
+ Node* pair = new BinaryNode(oldval, newval);
+ n->set_req(MemNode::ValueIn, pair);
n->del_req(LoadStoreConditionalNode::ExpectedIn);
break;
}
case Op_CMoveD: // Convert trinary to binary-tree
case Op_CMoveF:
@@ -2291,38 +2300,38 @@
case Op_CMoveVD: {
// Restructure into a binary tree for Matching. It's possible that
// we could move this code up next to the graph reshaping for IfNodes
// or vice-versa, but I do not want to debug this for Ladybird.
// 10/2/2000 CNC.
- Node *pair1 = new BinaryNode(n->in(1),n->in(1)->in(1));
- n->set_req(1,pair1);
- Node *pair2 = new BinaryNode(n->in(2),n->in(3));
- n->set_req(2,pair2);
+ Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
+ n->set_req(1, pair1);
+ Node* pair2 = new BinaryNode(n->in(2), n->in(3));
+ n->set_req(2, pair2);
n->del_req(3);
break;
}
case Op_LoopLimit: {
- Node *pair1 = new BinaryNode(n->in(1),n->in(2));
- n->set_req(1,pair1);
- n->set_req(2,n->in(3));
+ Node* pair1 = new BinaryNode(n->in(1), n->in(2));
+ n->set_req(1, pair1);
+ n->set_req(2, n->in(3));
n->del_req(3);
break;
}
case Op_StrEquals:
case Op_StrIndexOfChar: {
- Node *pair1 = new BinaryNode(n->in(2),n->in(3));
- n->set_req(2,pair1);
- n->set_req(3,n->in(4));
+ Node* pair1 = new BinaryNode(n->in(2), n->in(3));
+ n->set_req(2, pair1);
+ n->set_req(3, n->in(4));
n->del_req(4);
break;
}
case Op_StrComp:
case Op_StrIndexOf: {
- Node *pair1 = new BinaryNode(n->in(2),n->in(3));
- n->set_req(2,pair1);
- Node *pair2 = new BinaryNode(n->in(4),n->in(5));
- n->set_req(3,pair2);
+ Node* pair1 = new BinaryNode(n->in(2), n->in(3));
+ n->set_req(2, pair1);
+ Node* pair2 = new BinaryNode(n->in(4),n->in(5));
+ n->set_req(3, pair2);
n->del_req(5);
n->del_req(4);
break;
}
case Op_StrCompressedCopy:
@@ -2346,15 +2355,10 @@
break;
}
default:
break;
}
- }
- else {
- ShouldNotReachHere();
- }
- } // end of while (mstack.is_nonempty())
}
#ifdef ASSERT
// machine-independent root to machine-dependent root
void Matcher::dump_old2new_map() {
@@ -2514,11 +2518,12 @@
xop == Op_CompareAndSwapB ||
xop == Op_CompareAndSwapS ||
xop == Op_CompareAndSwapL ||
xop == Op_CompareAndSwapP ||
xop == Op_CompareAndSwapN ||
- xop == Op_CompareAndSwapI) {
+ xop == Op_CompareAndSwapI ||
+ BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
return true;
}
// Op_FastLock previously appeared in the Op_* list above.
// With biased locking we're no longer guaranteed that a monitor
< prev index next >