--- old/src/cpu/x86/vm/x86.ad 2016-07-11 22:46:04.937522952 +0900 +++ new/src/cpu/x86/vm/x86.ad 2016-07-11 22:46:04.793523456 +0900 @@ -1659,69 +1659,69 @@ #endif -const bool Matcher::match_rule_supported(int opcode) { +const bool Matcher::match_rule_supported(Opcodes opcode) { if (!has_match_rule(opcode)) return false; bool ret_value = true; switch (opcode) { - case Op_PopCountI: - case Op_PopCountL: + case Opcodes::Op_PopCountI: + case Opcodes::Op_PopCountL: if (!UsePopCountInstruction) ret_value = false; break; - case Op_MulVI: + case Opcodes::Op_MulVI: if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX ret_value = false; break; - case Op_MulVL: - case Op_MulReductionVL: + case Opcodes::Op_MulVL: + case Opcodes::Op_MulReductionVL: if (VM_Version::supports_avx512dq() == false) ret_value = false; break; - case Op_AddReductionVL: + case Opcodes::Op_AddReductionVL: if (UseAVX < 3) // only EVEX : vector connectivity becomes an issue here ret_value = false; break; - case Op_AddReductionVI: + case Opcodes::Op_AddReductionVI: if (UseSSE < 3) // requires at least SSE3 ret_value = false; break; - case Op_MulReductionVI: + case Opcodes::Op_MulReductionVI: if (UseSSE < 4) // requires at least SSE4 ret_value = false; break; - case Op_AddReductionVF: - case Op_AddReductionVD: - case Op_MulReductionVF: - case Op_MulReductionVD: + case Opcodes::Op_AddReductionVF: + case Opcodes::Op_AddReductionVD: + case Opcodes::Op_MulReductionVF: + case Opcodes::Op_MulReductionVD: if (UseSSE < 1) // requires at least SSE ret_value = false; break; - case Op_SqrtVD: + case Opcodes::Op_SqrtVD: if (UseAVX < 1) // enabled for AVX only ret_value = false; break; - case Op_CompareAndSwapL: + case Opcodes::Op_CompareAndSwapL: #ifdef _LP64 - case Op_CompareAndSwapP: + case Opcodes::Op_CompareAndSwapP: #endif if (!VM_Version::supports_cx8()) ret_value = false; break; - case Op_CMoveVD: + case Opcodes::Op_CMoveVD: if (UseAVX < 1 || UseAVX > 2) ret_value = false; break; - case Op_StrIndexOf: + case Opcodes::Op_StrIndexOf: if (!UseSSE42Intrinsics) ret_value = false; break; - case Op_StrIndexOfChar: + case Opcodes::Op_StrIndexOfChar: if (!UseSSE42Intrinsics) ret_value = false; break; - case Op_OnSpinWait: + case Opcodes::Op_OnSpinWait: if (VM_Version::supports_on_spin_wait() == false) ret_value = false; break; @@ -1730,27 +1730,27 @@ return ret_value; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { +const bool Matcher::match_rule_supported_vector(Opcodes opcode, int vlen) { // identify extra cases that we might want to provide match rules for // e.g. Op_ vector nodes and other intrinsics while guarding with vlen bool ret_value = match_rule_supported(opcode); if (ret_value) { switch (opcode) { - case Op_AddVB: - case Op_SubVB: + case Opcodes::Op_AddVB: + case Opcodes::Op_SubVB: if ((vlen == 64) && (VM_Version::supports_avx512bw() == false)) ret_value = false; break; - case Op_URShiftVS: - case Op_RShiftVS: - case Op_LShiftVS: - case Op_MulVS: - case Op_AddVS: - case Op_SubVS: + case Opcodes::Op_URShiftVS: + case Opcodes::Op_RShiftVS: + case Opcodes::Op_LShiftVS: + case Opcodes::Op_MulVS: + case Opcodes::Op_AddVS: + case Opcodes::Op_SubVS: if ((vlen == 32) && (VM_Version::supports_avx512bw() == false)) ret_value = false; break; - case Op_CMoveVD: + case Opcodes::Op_CMoveVD: if (vlen != 4) ret_value = false; break; @@ -1834,22 +1834,22 @@ } // Vector ideal reg corresponding to specidied size in bytes -const int Matcher::vector_ideal_reg(int size) { +const Opcodes Matcher::vector_ideal_reg(int size) { assert(MaxVectorSize >= size, ""); switch(size) { - case 4: return Op_VecS; - case 8: return Op_VecD; - case 16: return Op_VecX; - case 32: return Op_VecY; - case 64: return Op_VecZ; + case 4: return Opcodes::Op_VecS; + case 8: return Opcodes::Op_VecD; + case 16: return Opcodes::Op_VecX; + case 32: return Opcodes::Op_VecY; + case 64: return Opcodes::Op_VecZ; } ShouldNotReachHere(); - return 0; + return Opcodes::Op_Node; } // Only lowest bits of xmm reg are used for vector shift count. -const int Matcher::vector_shift_count_ideal_reg(int size) { - return Op_VecS; +const Opcodes Matcher::vector_shift_count_ideal_reg(int size) { + return Opcodes::Op_VecS; } // x86 supports misaligned vectors store/load. @@ -1868,7 +1868,7 @@ // Check for shift by small constant as well static bool clone_shift(Node* shift, Matcher* matcher, Matcher::MStack& mstack, VectorSet& address_visited) { - if (shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() && + if (shift->Opcode() == Opcodes::Op_LShiftX && shift->in(2)->is_Con() && shift->in(2)->get_int() <= 3 && // Are there other uses besides address expressions? !matcher->is_visited(shift)) { @@ -1879,7 +1879,7 @@ // Allow Matcher to match the rule which bypass // ConvI2L operation for an array index on LP64 // if the index value is positive. - if (conv->Opcode() == Op_ConvI2L && + if (conv->Opcode() == Opcodes::Op_ConvI2L && conv->as_Type()->type()->is_long()->_lo >= 0 && // Are there other uses besides address expressions? !matcher->is_visited(conv)) { @@ -1938,11 +1938,11 @@ // Helper methods for MachSpillCopyNode::implementation(). static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, - int src_hi, int dst_hi, uint ireg, outputStream* st) { + int src_hi, int dst_hi, Opcodes ireg, outputStream* st) { // In 64-bit VM size calculation is very complex. Emitting instructions // into scratch buffer is used to get size in 64-bit VM. LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) - assert(ireg == Op_VecS || // 32bit vector + assert(ireg == Opcodes::Op_VecS || // 32bit vector (src_lo & 1) == 0 && (src_lo + 1) == src_hi && (dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi, "no non-adjacent vector moves" ); @@ -1950,15 +1950,15 @@ MacroAssembler _masm(cbuf); int offset = __ offset(); switch (ireg) { - case Op_VecS: // copy whole register - case Op_VecD: - case Op_VecX: + case Opcodes::Op_VecS: // copy whole register + case Opcodes::Op_VecD: + case Opcodes::Op_VecX: __ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); break; - case Op_VecY: + case Opcodes::Op_VecY: __ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo])); break; - case Op_VecZ: + case Opcodes::Op_VecZ: __ evmovdquq(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]), 2); break; default: @@ -1973,13 +1973,13 @@ #ifndef PRODUCT } else if (!do_size) { switch (ireg) { - case Op_VecS: - case Op_VecD: - case Op_VecX: + case Opcodes::Op_VecS: + case Opcodes::Op_VecD: + case Opcodes::Op_VecX: st->print("movdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); break; - case Op_VecY: - case Op_VecZ: + case Opcodes::Op_VecY: + case Opcodes::Op_VecZ: st->print("vmovdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]); break; default: @@ -1992,7 +1992,7 @@ } static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load, - int stack_offset, int reg, uint ireg, outputStream* st) { + int stack_offset, int reg, Opcodes ireg, outputStream* st) { // In 64-bit VM size calculation is very complex. Emitting instructions // into scratch buffer is used to get size in 64-bit VM. LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); ) @@ -2001,19 +2001,19 @@ int offset = __ offset(); if (is_load) { switch (ireg) { - case Op_VecS: + case Opcodes::Op_VecS: __ movdl(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); break; - case Op_VecD: + case Opcodes::Op_VecD: __ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); break; - case Op_VecX: + case Opcodes::Op_VecX: __ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); break; - case Op_VecY: + case Opcodes::Op_VecY: __ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset)); break; - case Op_VecZ: + case Opcodes::Op_VecZ: __ evmovdquq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset), 2); break; default: @@ -2021,19 +2021,19 @@ } } else { // store switch (ireg) { - case Op_VecS: + case Opcodes::Op_VecS: __ movdl(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); break; - case Op_VecD: + case Opcodes::Op_VecD: __ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); break; - case Op_VecX: + case Opcodes::Op_VecX: __ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); break; - case Op_VecY: + case Opcodes::Op_VecY: __ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg])); break; - case Op_VecZ: + case Opcodes::Op_VecZ: __ evmovdquq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]), 2); break; default: @@ -2051,17 +2051,17 @@ } else if (!do_size) { if (is_load) { switch (ireg) { - case Op_VecS: + case Opcodes::Op_VecS: st->print("movd %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); break; - case Op_VecD: + case Opcodes::Op_VecD: st->print("movq %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); break; - case Op_VecX: + case Opcodes::Op_VecX: st->print("movdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); break; - case Op_VecY: - case Op_VecZ: + case Opcodes::Op_VecY: + case Opcodes::Op_VecZ: st->print("vmovdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset); break; default: @@ -2069,17 +2069,17 @@ } } else { // store switch (ireg) { - case Op_VecS: + case Opcodes::Op_VecS: st->print("movd [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); break; - case Op_VecD: + case Opcodes::Op_VecD: st->print("movq [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); break; - case Op_VecX: + case Opcodes::Op_VecX: st->print("movdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); break; - case Op_VecY: - case Op_VecZ: + case Opcodes::Op_VecY: + case Opcodes::Op_VecZ: st->print("vmovdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]); break; default: @@ -2094,19 +2094,19 @@ int tuple_type = Assembler::EVEX_FVM; int input_size = Assembler::EVEX_32bit; switch (ireg) { - case Op_VecS: + case Opcodes::Op_VecS: tuple_type = Assembler::EVEX_T1S; break; - case Op_VecD: + case Opcodes::Op_VecD: tuple_type = Assembler::EVEX_T1S; input_size = Assembler::EVEX_64bit; break; - case Op_VecX: + case Opcodes::Op_VecX: break; - case Op_VecY: + case Opcodes::Op_VecY: vec_len = 1; break; - case Op_VecZ: + case Opcodes::Op_VecZ: vec_len = 2; break; }