--- old/src/share/vm/opto/mulnode.cpp 2016-07-11 22:46:43.810386850 +0900 +++ new/src/share/vm/opto/mulnode.cpp 2016-07-11 22:46:43.672387333 +0900 @@ -41,7 +41,7 @@ // (commute) inputs to MulNodes willy-nilly so the hash function must return // the same value in the presence of edge swapping. uint MulNode::hash() const { - return (uintptr_t)in(1) + (uintptr_t)in(2) + Opcode(); + return (uintptr_t)in(1) + (uintptr_t)in(2) + static_cast(Opcode()); } //------------------------------Identity--------------------------------------- @@ -78,15 +78,15 @@ // If the right input is a constant, and the left input is a product of a // constant, flatten the expression tree. - uint op = Opcode(); + Opcodes op = Opcode(); if( t2->singleton() && // Right input is a constant? - op != Op_MulF && // Float & double cannot reassociate - op != Op_MulD ) { + op != Opcodes::Op_MulF && // Float & double cannot reassociate + op != Opcodes::Op_MulD ) { if( t2 == Type::TOP ) return NULL; Node *mul1 = in(1); #ifdef ASSERT // Check for dead loop - int op1 = mul1->Opcode(); + Opcodes op1 = mul1->Opcode(); if( phase->eqv( mul1, this ) || phase->eqv( in(2), this ) || ( op1 == mul_opcode() || op1 == add_opcode() ) && ( phase->eqv( mul1->in(1), this ) || phase->eqv( mul1->in(2), this ) || @@ -148,8 +148,8 @@ // Either input is ZERO ==> the result is ZERO. // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0 - int op = Opcode(); - if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) { + Opcodes op = Opcode(); + if( op == Opcodes::Op_MulI || op == Opcodes::Op_AndI || op == Opcodes::Op_MulL || op == Opcodes::Op_AndL ) { const Type *zero = add_id(); // The multiplicative zero if( t1->higher_equal( zero ) ) return zero; if( t2->higher_equal( zero ) ) return zero; @@ -162,7 +162,7 @@ #if defined(IA32) // Can't trust native compilers to properly fold strict double // multiplication with round-to-zero on this platform. - if (op == Op_MulD && phase->C->method()->is_strict()) { + if (op == Opcodes::Op_MulD && phase->C->method()->is_strict()) { return TypeD::DOUBLE; } #endif @@ -438,7 +438,7 @@ if (phase->eqv(in(1), in(2))) return in(1); Node* in1 = in(1); - uint op = in1->Opcode(); + Opcodes op = in1->Opcode(); const TypeInt* t2 = phase->type(in(2))->isa_int(); if (t2 && t2->is_con()) { int con = t2->get_con(); @@ -451,7 +451,7 @@ } // Masking off the high bits of a unsigned-shift-right is not // needed either. - if (op == Op_URShiftI) { + if (op == Opcodes::Op_URShiftI) { const TypeInt* t12 = phase->type(in1->in(2))->isa_int(); if (t12 && t12->is_con()) { // Shift is by a constant int shift = t12->get_con(); @@ -472,17 +472,17 @@ if( !t2 || !t2->is_con() ) return MulNode::Ideal(phase, can_reshape); const int mask = t2->get_con(); Node *load = in(1); - uint lop = load->Opcode(); + Opcodes lop = load->Opcode(); // Masking bits off of a Character? Hi bits are already zero. - if( lop == Op_LoadUS && + if( lop == Opcodes::Op_LoadUS && (mask & 0xFFFF0000) ) // Can we make a smaller mask? return new AndINode(load,phase->intcon(mask&0xFFFF)); // Masking bits off of a Short? Loading a Character does some masking if (can_reshape && load->outcnt() == 1 && load->unique_out() == this) { - if (lop == Op_LoadS && (mask & 0xFFFF0000) == 0 ) { + if (lop == Opcodes::Op_LoadS && (mask & 0xFFFF0000) == 0 ) { Node* ldus = load->as_Load()->convert_to_unsigned_load(*phase); ldus = phase->transform(ldus); return new AndINode(ldus, phase->intcon(mask & 0xFFFF)); @@ -490,7 +490,7 @@ // Masking sign bits off of a Byte? Do an unsigned byte load plus // an and. - if (lop == Op_LoadB && (mask & 0xFFFFFF00) == 0) { + if (lop == Opcodes::Op_LoadB && (mask & 0xFFFFFF00) == 0) { Node* ldub = load->as_Load()->convert_to_unsigned_load(*phase); ldub = phase->transform(ldub); return new AndINode(ldub, phase->intcon(mask)); @@ -498,7 +498,7 @@ } // Masking off sign bits? Dont make them! - if( lop == Op_RShiftI ) { + if( lop == Opcodes::Op_RShiftI ) { const TypeInt *t12 = phase->type(load->in(2))->isa_int(); if( t12 && t12->is_con() ) { // Shift is by a constant int shift = t12->get_con(); @@ -517,7 +517,7 @@ // Check for 'negate/and-1', a pattern emitted when someone asks for // 'mod 2'. Negate leaves the low order bit unchanged (think: complement // plus 1) and the mask is of the low order bit. Skip the negate. - if( lop == Op_SubI && mask == 1 && load->in(1) && + if( lop == Opcodes::Op_SubI && mask == 1 && load->in(1) && phase->type(load->in(1)) == TypeInt::ZERO ) return new AndINode( load->in(2), in(2) ); @@ -571,10 +571,10 @@ if ((t1_support & con) == t1_support) return usr; } - uint lop = usr->Opcode(); + Opcodes lop = usr->Opcode(); // Masking off the high bits of a unsigned-shift-right is not // needed either. - if( lop == Op_URShiftL ) { + if( lop == Opcodes::Op_URShiftL ) { const TypeInt *t12 = phase->type( usr->in(2) )->isa_int(); if( t12 && t12->is_con() ) { // Shift is by a constant int shift = t12->get_con(); @@ -596,21 +596,21 @@ const jlong mask = t2->get_con(); Node* in1 = in(1); - uint op = in1->Opcode(); + Opcodes op = in1->Opcode(); // Are we masking a long that was converted from an int with a mask // that fits in 32-bits? Commute them and use an AndINode. Don't // convert masks which would cause a sign extension of the integer // value. This check includes UI2L masks (0x00000000FFFFFFFF) which // would be optimized away later in Identity. - if (op == Op_ConvI2L && (mask & UCONST64(0xFFFFFFFF80000000)) == 0) { + if (op == Opcodes::Op_ConvI2L && (mask & UCONST64(0xFFFFFFFF80000000)) == 0) { Node* andi = new AndINode(in1->in(1), phase->intcon(mask)); andi = phase->transform(andi); return new ConvI2LNode(andi); } // Masking off sign bits? Dont make them! - if (op == Op_RShiftL) { + if (op == Opcodes::Op_RShiftL) { const TypeInt* t12 = phase->type(in1->in(2))->isa_int(); if( t12 && t12->is_con() ) { // Shift is by a constant int shift = t12->get_con(); @@ -650,8 +650,8 @@ // Left input is an add of a constant? Node *add1 = in(1); - int add1_op = add1->Opcode(); - if( add1_op == Op_AddI ) { // Left input is an add? + Opcodes add1_op = add1->Opcode(); + if( add1_op == Opcodes::Op_AddI ) { // Left input is an add? assert( add1 != add1->in(1), "dead loop in LShiftINode::Ideal" ); const TypeInt *t12 = phase->type(add1->in(2))->isa_int(); if( t12 && t12->is_con() ){ // Left input is an add of a con? @@ -667,16 +667,16 @@ } // Check for "(x>>c0)<in(2) == in(2) ) // Convert to "(x & -(1<in(1),phase->intcon( -(1<>c0) & Y)<in(1); - int add2_op = add2->Opcode(); - if( (add2_op == Op_RShiftI || add2_op == Op_URShiftI ) && + Opcodes add2_op = add2->Opcode(); + if( (add2_op == Opcodes::Op_RShiftI || add2_op == Opcodes::Op_URShiftI ) && add2->in(2) == in(2) ) { // Convert to "(x & (Y<transform( new LShiftINode( add1->in(2), in(2) ) ); @@ -687,7 +687,7 @@ // Check for ((x & ((1<<(32-c0))-1)) << c0) which ANDs off high bits // before shifting them away. const jint bits_mask = right_n_bits(BitsPerJavaInteger-con); - if( add1_op == Op_AndI && + if( add1_op == Opcodes::Op_AndI && phase->type(add1->in(2)) == TypeInt::make( bits_mask ) ) return new LShiftINode( add1->in(1), in(2) ); @@ -762,8 +762,8 @@ // Left input is an add of a constant? Node *add1 = in(1); - int add1_op = add1->Opcode(); - if( add1_op == Op_AddL ) { // Left input is an add? + Opcodes add1_op = add1->Opcode(); + if( add1_op == Opcodes::Op_AddL ) { // Left input is an add? // Avoid dead data cycles from dead loops assert( add1 != add1->in(1), "dead loop in LShiftLNode::Ideal" ); const TypeLong *t12 = phase->type(add1->in(2))->isa_long(); @@ -776,16 +776,16 @@ } // Check for "(x>>c0)<in(2) == in(2) ) // Convert to "(x & -(1<in(1),phase->longcon( -(CONST64(1)<>c0) & Y)<in(1); - int add2_op = add2->Opcode(); - if( (add2_op == Op_RShiftL || add2_op == Op_URShiftL ) && + Opcodes add2_op = add2->Opcode(); + if( (add2_op == Opcodes::Op_RShiftL || add2_op == Opcodes::Op_URShiftL ) && add2->in(2) == in(2) ) { // Convert to "(x & (Y<transform( new LShiftLNode( add1->in(2), in(2) ) ); @@ -796,7 +796,7 @@ // Check for ((x & ((CONST64(1)<<(64-c0))-1)) << c0) which ANDs off high bits // before shifting them away. const jlong bits_mask = jlong(max_julong >> con); - if( add1_op == Op_AndL && + if( add1_op == Opcodes::Op_AndL && phase->type(add1->in(2)) == TypeLong::make( bits_mask ) ) return new LShiftLNode( add1->in(1), in(2) ); @@ -859,7 +859,7 @@ return in(1); // Check for useless sign-masking - if( in(1)->Opcode() == Op_LShiftI && + if( in(1)->Opcode() == Opcodes::Op_LShiftI && in(1)->req() == 3 && in(1)->in(2) == in(2) && t2->is_con() ) { @@ -894,7 +894,7 @@ // Check for (x & 0xFF000000) >> 24, whose mask can be made smaller. // Such expressions arise normally from shift chains like (byte)(x >> 24). const Node *mask = in(1); - if( mask->Opcode() == Op_AndI && + if( mask->Opcode() == Opcodes::Op_AndI && (t3 = phase->type(mask->in(2))->isa_int()) && t3->is_con() ) { Node *x = mask->in(1); @@ -906,13 +906,13 @@ // Check for "(short[i] <<16)>>16" which simply sign-extends const Node *shl = in(1); - if( shl->Opcode() != Op_LShiftI ) return NULL; + if( shl->Opcode() != Opcodes::Op_LShiftI ) return NULL; if( shift == 16 && (t3 = phase->type(shl->in(2))->isa_int()) && t3->is_con(16) ) { Node *ld = shl->in(1); - if( ld->Opcode() == Op_LoadS ) { + if( ld->Opcode() == Opcodes::Op_LoadS ) { // Sign extension is just useless here. Return a RShiftI of zero instead // returning 'ld' directly. We cannot return an old Node directly as // that is the job of 'Identity' calls and Identity calls only work on @@ -923,7 +923,7 @@ return this; } else if( can_reshape && - ld->Opcode() == Op_LoadUS && + ld->Opcode() == Opcodes::Op_LoadUS && ld->outcnt() == 1 && ld->unique_out() == shl) // Replace zero-extension-load with sign-extension-load return ld->as_Load()->convert_to_signed_load(*phase); @@ -934,7 +934,7 @@ (t3 = phase->type(shl->in(2))->isa_int()) && t3->is_con(24) ) { Node *ld = shl->in(1); - if( ld->Opcode() == Op_LoadB ) { + if( ld->Opcode() == Opcodes::Op_LoadB ) { // Sign extension is just useless here set_req(1, ld); set_req(2, phase->intcon(0)); @@ -1068,10 +1068,10 @@ // Happens during new-array length computation. // Safe if 'x' is in the range [0..(max_int>>LogBytesPerWord)] Node *add = in(1); - if( add->Opcode() == Op_AddI ) { + if( add->Opcode() == Opcodes::Op_AddI ) { const TypeInt *t2 = phase->type(add->in(2))->isa_int(); if( t2 && t2->is_con(wordSize - 1) && - add->in(1)->Opcode() == Op_LShiftI ) { + add->in(1)->Opcode() == Opcodes::Op_LShiftI ) { // Check that shift_counts are LogBytesPerWord Node *lshift_count = add->in(1)->in(2); const TypeInt *t_lshift_count = phase->type(lshift_count)->isa_int(); @@ -1098,10 +1098,10 @@ // We'll be wanting the right-shift amount as a mask of that many bits const int mask = right_n_bits(BitsPerJavaInteger - con); - int in1_op = in(1)->Opcode(); + Opcodes in1_op = in(1)->Opcode(); // Check for ((x>>>a)>>>b) and replace with (x>>>(a+b)) when a+b < 32 - if( in1_op == Op_URShiftI ) { + if( in1_op == Opcodes::Op_URShiftI ) { const TypeInt *t12 = phase->type( in(1)->in(2) )->isa_int(); if( t12 && t12->is_con() ) { // Right input is a constant assert( in(1) != in(1)->in(1), "dead loop in URShiftINode::Ideal" ); @@ -1117,9 +1117,9 @@ // If Q is "X << z" the rounding is useless. Look for patterns like // ((X<>> Z and replace with (X + Y>>>Z) & Z-mask. Node *add = in(1); - if( in1_op == Op_AddI ) { + if( in1_op == Opcodes::Op_AddI ) { Node *lshl = add->in(1); - if( lshl->Opcode() == Op_LShiftI && + if( lshl->Opcode() == Opcodes::Op_LShiftI && phase->type(lshl->in(2)) == t2 ) { Node *y_z = phase->transform( new URShiftINode(add->in(2),in(2)) ); Node *sum = phase->transform( new AddINode( lshl->in(1), y_z ) ); @@ -1131,7 +1131,7 @@ // This shortens the mask. Also, if we are extracting a high byte and // storing it to a buffer, the mask will be removed completely. Node *andi = in(1); - if( in1_op == Op_AndI ) { + if( in1_op == Opcodes::Op_AndI ) { const TypeInt *t3 = phase->type( andi->in(2) )->isa_int(); if( t3 && t3->is_con() ) { // Right input is a constant jint mask2 = t3->get_con(); @@ -1147,7 +1147,7 @@ // Check for "(X << z ) >>> z" which simply zero-extends Node *shl = in(1); - if( in1_op == Op_LShiftI && + if( in1_op == Opcodes::Op_LShiftI && phase->type(shl->in(2)) == t2 ) return new AndINode( shl->in(1), phase->intcon(mask) ); @@ -1250,9 +1250,9 @@ // If Q is "X << z" the rounding is useless. Look for patterns like // ((X<>> Z and replace with (X + Y>>>Z) & Z-mask. Node *add = in(1); - if( add->Opcode() == Op_AddL ) { + if( add->Opcode() == Opcodes::Op_AddL ) { Node *lshl = add->in(1); - if( lshl->Opcode() == Op_LShiftL && + if( lshl->Opcode() == Opcodes::Op_LShiftL && phase->type(lshl->in(2)) == t2 ) { Node *y_z = phase->transform( new URShiftLNode(add->in(2),in(2)) ); Node *sum = phase->transform( new AddLNode( lshl->in(1), y_z ) ); @@ -1264,7 +1264,7 @@ // This shortens the mask. Also, if we are extracting a high byte and // storing it to a buffer, the mask will be removed completely. Node *andi = in(1); - if( andi->Opcode() == Op_AndL ) { + if( andi->Opcode() == Opcodes::Op_AndL ) { const TypeLong *t3 = phase->type( andi->in(2) )->isa_long(); if( t3 && t3->is_con() ) { // Right input is a constant jlong mask2 = t3->get_con(); @@ -1276,7 +1276,7 @@ // Check for "(X << z ) >>> z" which simply zero-extends Node *shl = in(1); - if( shl->Opcode() == Op_LShiftL && + if( shl->Opcode() == Opcodes::Op_LShiftL && phase->type(shl->in(2)) == t2 ) return new AndLNode( shl->in(1), phase->longcon(mask) );