--- old/src/share/vm/opto/matcher.cpp 2016-07-11 22:46:38.935403918 +0900 +++ new/src/share/vm/opto/matcher.cpp 2016-07-11 22:46:38.794404412 +0900 @@ -43,7 +43,7 @@ OptoReg::Name OptoReg::c_frame_pointer; -const RegMask *Matcher::idealreg2regmask[_last_machine_leaf]; +const RegMask *Matcher::idealreg2regmask[static_cast(Opcodes::_last_machine_leaf)]; RegMask Matcher::mreg2regmask[_last_Mach_Reg]; RegMask Matcher::STACK_ONLY_mask; RegMask Matcher::c_frame_ptr_mask; @@ -74,41 +74,41 @@ _dontcare(&_states_arena) { C->set_matcher(this); - idealreg2spillmask [Op_RegI] = NULL; - idealreg2spillmask [Op_RegN] = NULL; - idealreg2spillmask [Op_RegL] = NULL; - idealreg2spillmask [Op_RegF] = NULL; - idealreg2spillmask [Op_RegD] = NULL; - idealreg2spillmask [Op_RegP] = NULL; - idealreg2spillmask [Op_VecS] = NULL; - idealreg2spillmask [Op_VecD] = NULL; - idealreg2spillmask [Op_VecX] = NULL; - idealreg2spillmask [Op_VecY] = NULL; - idealreg2spillmask [Op_VecZ] = NULL; - - idealreg2debugmask [Op_RegI] = NULL; - idealreg2debugmask [Op_RegN] = NULL; - idealreg2debugmask [Op_RegL] = NULL; - idealreg2debugmask [Op_RegF] = NULL; - idealreg2debugmask [Op_RegD] = NULL; - idealreg2debugmask [Op_RegP] = NULL; - idealreg2debugmask [Op_VecS] = NULL; - idealreg2debugmask [Op_VecD] = NULL; - idealreg2debugmask [Op_VecX] = NULL; - idealreg2debugmask [Op_VecY] = NULL; - idealreg2debugmask [Op_VecZ] = NULL; - - idealreg2mhdebugmask[Op_RegI] = NULL; - idealreg2mhdebugmask[Op_RegN] = NULL; - idealreg2mhdebugmask[Op_RegL] = NULL; - idealreg2mhdebugmask[Op_RegF] = NULL; - idealreg2mhdebugmask[Op_RegD] = NULL; - idealreg2mhdebugmask[Op_RegP] = NULL; - idealreg2mhdebugmask[Op_VecS] = NULL; - idealreg2mhdebugmask[Op_VecD] = NULL; - idealreg2mhdebugmask[Op_VecX] = NULL; - idealreg2mhdebugmask[Op_VecY] = NULL; - idealreg2mhdebugmask[Op_VecZ] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_RegI)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_RegN)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_RegL)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_RegF)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_RegD)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_RegP)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_VecS)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_VecD)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_VecX)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_VecY)] = NULL; + idealreg2spillmask [static_cast(Opcodes::Op_VecZ)] = NULL; + + idealreg2debugmask [static_cast(Opcodes::Op_RegI)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_RegN)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_RegL)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_RegF)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_RegD)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_RegP)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_VecS)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_VecD)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_VecX)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_VecY)] = NULL; + idealreg2debugmask [static_cast(Opcodes::Op_VecZ)] = NULL; + + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegI)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegN)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegL)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegF)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegD)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegP)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_VecS)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_VecD)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_VecX)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_VecY)] = NULL; + idealreg2mhdebugmask[static_cast(Opcodes::Op_VecZ)] = NULL; debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node } @@ -185,9 +185,9 @@ const TypeTuple *range = C->tf()->range(); if( range->cnt() > TypeFunc::Parms ) { // If not a void function // Get ideal-register return type - int ireg = range->field_at(TypeFunc::Parms)->ideal_reg(); + Opcodes ireg = range->field_at(TypeFunc::Parms)->ideal_reg(); // Get machine return register - uint sop = C->start()->Opcode(); + Opcodes sop = C->start()->Opcode(); OptoRegPair regs = return_value(ireg, false); // And mask for same @@ -419,32 +419,32 @@ // Allocate storage for spill masks as masks for the appropriate load type. RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+5)); - idealreg2spillmask [Op_RegN] = &rms[0]; - idealreg2spillmask [Op_RegI] = &rms[1]; - idealreg2spillmask [Op_RegL] = &rms[2]; - idealreg2spillmask [Op_RegF] = &rms[3]; - idealreg2spillmask [Op_RegD] = &rms[4]; - idealreg2spillmask [Op_RegP] = &rms[5]; - - idealreg2debugmask [Op_RegN] = &rms[6]; - idealreg2debugmask [Op_RegI] = &rms[7]; - idealreg2debugmask [Op_RegL] = &rms[8]; - idealreg2debugmask [Op_RegF] = &rms[9]; - idealreg2debugmask [Op_RegD] = &rms[10]; - idealreg2debugmask [Op_RegP] = &rms[11]; - - idealreg2mhdebugmask[Op_RegN] = &rms[12]; - idealreg2mhdebugmask[Op_RegI] = &rms[13]; - idealreg2mhdebugmask[Op_RegL] = &rms[14]; - idealreg2mhdebugmask[Op_RegF] = &rms[15]; - idealreg2mhdebugmask[Op_RegD] = &rms[16]; - idealreg2mhdebugmask[Op_RegP] = &rms[17]; - - idealreg2spillmask [Op_VecS] = &rms[18]; - idealreg2spillmask [Op_VecD] = &rms[19]; - idealreg2spillmask [Op_VecX] = &rms[20]; - idealreg2spillmask [Op_VecY] = &rms[21]; - idealreg2spillmask [Op_VecZ] = &rms[22]; + idealreg2spillmask [static_cast(Opcodes::Op_RegN)] = &rms[0]; + idealreg2spillmask [static_cast(Opcodes::Op_RegI)] = &rms[1]; + idealreg2spillmask [static_cast(Opcodes::Op_RegL)] = &rms[2]; + idealreg2spillmask [static_cast(Opcodes::Op_RegF)] = &rms[3]; + idealreg2spillmask [static_cast(Opcodes::Op_RegD)] = &rms[4]; + idealreg2spillmask [static_cast(Opcodes::Op_RegP)] = &rms[5]; + + idealreg2debugmask [static_cast(Opcodes::Op_RegN)] = &rms[6]; + idealreg2debugmask [static_cast(Opcodes::Op_RegI)] = &rms[7]; + idealreg2debugmask [static_cast(Opcodes::Op_RegL)] = &rms[8]; + idealreg2debugmask [static_cast(Opcodes::Op_RegF)] = &rms[9]; + idealreg2debugmask [static_cast(Opcodes::Op_RegD)] = &rms[10]; + idealreg2debugmask [static_cast(Opcodes::Op_RegP)] = &rms[11]; + + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegN)] = &rms[12]; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegI)] = &rms[13]; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegL)] = &rms[14]; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegF)] = &rms[15]; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegD)] = &rms[16]; + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegP)] = &rms[17]; + + idealreg2spillmask [static_cast(Opcodes::Op_VecS)] = &rms[18]; + idealreg2spillmask [static_cast(Opcodes::Op_VecD)] = &rms[19]; + idealreg2spillmask [static_cast(Opcodes::Op_VecX)] = &rms[20]; + idealreg2spillmask [static_cast(Opcodes::Op_VecY)] = &rms[21]; + idealreg2spillmask [static_cast(Opcodes::Op_VecZ)] = &rms[22]; OptoReg::Name i; @@ -472,32 +472,32 @@ aligned_stack_mask.clear_to_pairs(); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); - *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP]; + *idealreg2spillmask[static_cast(Opcodes::Op_RegP)] = *idealreg2regmask[static_cast(Opcodes::Op_RegP)]; #ifdef _LP64 - *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN]; - idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask()); - idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask); + *idealreg2spillmask[static_cast(Opcodes::Op_RegN)] = *idealreg2regmask[static_cast(Opcodes::Op_RegN)]; + idealreg2spillmask[static_cast(Opcodes::Op_RegN)]->OR(C->FIRST_STACK_mask()); + idealreg2spillmask[static_cast(Opcodes::Op_RegP)]->OR(aligned_stack_mask); #else - idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask()); + idealreg2spillmask[static_cast(Opcodes::Op_RegP)]->OR(C->FIRST_STACK_mask()); #endif - *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI]; - idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask()); - *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL]; - idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask); - *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF]; - idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask()); - *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD]; - idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask); + *idealreg2spillmask[static_cast(Opcodes::Op_RegI)] = *idealreg2regmask[static_cast(Opcodes::Op_RegI)]; + idealreg2spillmask[static_cast(Opcodes::Op_RegI)]->OR(C->FIRST_STACK_mask()); + *idealreg2spillmask[static_cast(Opcodes::Op_RegL)] = *idealreg2regmask[static_cast(Opcodes::Op_RegL)]; + idealreg2spillmask[static_cast(Opcodes::Op_RegL)]->OR(aligned_stack_mask); + *idealreg2spillmask[static_cast(Opcodes::Op_RegF)] = *idealreg2regmask[static_cast(Opcodes::Op_RegF)]; + idealreg2spillmask[static_cast(Opcodes::Op_RegF)]->OR(C->FIRST_STACK_mask()); + *idealreg2spillmask[static_cast(Opcodes::Op_RegD)] = *idealreg2regmask[static_cast(Opcodes::Op_RegD)]; + idealreg2spillmask[static_cast(Opcodes::Op_RegD)]->OR(aligned_stack_mask); if (Matcher::vector_size_supported(T_BYTE,4)) { - *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS]; - idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask()); + *idealreg2spillmask[static_cast(Opcodes::Op_VecS)] = *idealreg2regmask[static_cast(Opcodes::Op_VecS)]; + idealreg2spillmask[static_cast(Opcodes::Op_VecS)]->OR(C->FIRST_STACK_mask()); } if (Matcher::vector_size_supported(T_FLOAT,2)) { // For VecD we need dual alignment and 8 bytes (2 slots) for spills. // RA guarantees such alignment since it is needed for Double and Long values. - *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD]; - idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask); + *idealreg2spillmask[static_cast(Opcodes::Op_VecD)] = *idealreg2regmask[static_cast(Opcodes::Op_VecD)]; + idealreg2spillmask[static_cast(Opcodes::Op_VecD)]->OR(aligned_stack_mask); } if (Matcher::vector_size_supported(T_FLOAT,4)) { // For VecX we need quadro alignment and 16 bytes (4 slots) for spills. @@ -514,8 +514,8 @@ } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); - *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX]; - idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask); + *idealreg2spillmask[static_cast(Opcodes::Op_VecX)] = *idealreg2regmask[static_cast(Opcodes::Op_VecX)]; + idealreg2spillmask[static_cast(Opcodes::Op_VecX)]->OR(aligned_stack_mask); } if (Matcher::vector_size_supported(T_FLOAT,8)) { // For VecY we need octo alignment and 32 bytes (8 slots) for spills. @@ -526,8 +526,8 @@ } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); - *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY]; - idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask); + *idealreg2spillmask[static_cast(Opcodes::Op_VecY)] = *idealreg2regmask[static_cast(Opcodes::Op_VecY)]; + idealreg2spillmask[static_cast(Opcodes::Op_VecY)]->OR(aligned_stack_mask); } if (Matcher::vector_size_supported(T_FLOAT,16)) { // For VecZ we need enough alignment and 64 bytes (16 slots) for spills. @@ -538,28 +538,28 @@ } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); - *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ]; - idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask); + *idealreg2spillmask[static_cast(Opcodes::Op_VecZ)] = *idealreg2regmask[static_cast(Opcodes::Op_VecZ)]; + idealreg2spillmask[static_cast(Opcodes::Op_VecZ)]->OR(aligned_stack_mask); } if (UseFPUForSpilling) { // This mask logic assumes that the spill operations are // symmetric and that the registers involved are the same size. // On sparc for instance we may have to use 64 bit moves will // kill 2 registers when used with F0-F31. - idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]); - idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]); + idealreg2spillmask[static_cast(Opcodes::Op_RegI)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegF)]); + idealreg2spillmask[static_cast(Opcodes::Op_RegF)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegI)]); #ifdef _LP64 - idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]); - idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]); - idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]); - idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]); + idealreg2spillmask[static_cast(Opcodes::Op_RegN)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegF)]); + idealreg2spillmask[static_cast(Opcodes::Op_RegL)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegD)]); + idealreg2spillmask[static_cast(Opcodes::Op_RegD)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegL)]); + idealreg2spillmask[static_cast(Opcodes::Op_RegP)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegD)]); #else - idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]); + idealreg2spillmask[static_cast(Opcodes::Op_RegP)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegF)]); #ifdef ARM // ARM has support for moving 64bit values between a pair of // integer registers and a double register - idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]); - idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]); + idealreg2spillmask[static_cast(Opcodes::Op_RegL)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegD)]); + idealreg2spillmask[static_cast(Opcodes::Op_RegD)]->OR(*idealreg2regmask[static_cast(Opcodes::Op_RegL)]); #endif #endif } @@ -567,19 +567,19 @@ // Make up debug masks. Any spill slot plus callee-save registers. // Caller-save registers are assumed to be trashable by the various // inline-cache fixup routines. - *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN]; - *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI]; - *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL]; - *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF]; - *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD]; - *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP]; - - *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN]; - *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI]; - *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL]; - *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF]; - *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD]; - *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP]; + *idealreg2debugmask [static_cast(Opcodes::Op_RegN)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegN)]; + *idealreg2debugmask [static_cast(Opcodes::Op_RegI)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegI)]; + *idealreg2debugmask [static_cast(Opcodes::Op_RegL)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegL)]; + *idealreg2debugmask [static_cast(Opcodes::Op_RegF)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegF)]; + *idealreg2debugmask [static_cast(Opcodes::Op_RegD)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegD)]; + *idealreg2debugmask [static_cast(Opcodes::Op_RegP)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegP)]; + + *idealreg2mhdebugmask[static_cast(Opcodes::Op_RegN)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegN)]; + *idealreg2mhdebugmask[static_cast(Opcodes::Op_RegI)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegI)]; + *idealreg2mhdebugmask[static_cast(Opcodes::Op_RegL)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegL)]; + *idealreg2mhdebugmask[static_cast(Opcodes::Op_RegF)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegF)]; + *idealreg2mhdebugmask[static_cast(Opcodes::Op_RegD)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegD)]; + *idealreg2mhdebugmask[static_cast(Opcodes::Op_RegP)]= *idealreg2spillmask[static_cast(Opcodes::Op_RegP)]; // Prevent stub compilations from attempting to reference // callee-saved registers from debug info @@ -590,31 +590,31 @@ if( _register_save_policy[i] == 'C' || _register_save_policy[i] == 'A' || (_register_save_policy[i] == 'E' && exclude_soe) ) { - idealreg2debugmask [Op_RegN]->Remove(i); - idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call - idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug - idealreg2debugmask [Op_RegF]->Remove(i); // masks - idealreg2debugmask [Op_RegD]->Remove(i); - idealreg2debugmask [Op_RegP]->Remove(i); - - idealreg2mhdebugmask[Op_RegN]->Remove(i); - idealreg2mhdebugmask[Op_RegI]->Remove(i); - idealreg2mhdebugmask[Op_RegL]->Remove(i); - idealreg2mhdebugmask[Op_RegF]->Remove(i); - idealreg2mhdebugmask[Op_RegD]->Remove(i); - idealreg2mhdebugmask[Op_RegP]->Remove(i); + idealreg2debugmask [static_cast(Opcodes::Op_RegN)]->Remove(i); + idealreg2debugmask [static_cast(Opcodes::Op_RegI)]->Remove(i); // Exclude save-on-call + idealreg2debugmask [static_cast(Opcodes::Op_RegL)]->Remove(i); // registers from debug + idealreg2debugmask [static_cast(Opcodes::Op_RegF)]->Remove(i); // masks + idealreg2debugmask [static_cast(Opcodes::Op_RegD)]->Remove(i); + idealreg2debugmask [static_cast(Opcodes::Op_RegP)]->Remove(i); + + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegN)]->Remove(i); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegI)]->Remove(i); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegL)]->Remove(i); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegF)]->Remove(i); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegD)]->Remove(i); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegP)]->Remove(i); } } // Subtract the register we use to save the SP for MethodHandle // invokes to from the debug mask. const RegMask save_mask = method_handle_invoke_SP_save_mask(); - idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask); - idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask); - idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask); - idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask); - idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask); - idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegN)]->SUBTRACT(save_mask); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegI)]->SUBTRACT(save_mask); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegL)]->SUBTRACT(save_mask); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegF)]->SUBTRACT(save_mask); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegD)]->SUBTRACT(save_mask); + idealreg2mhdebugmask[static_cast(Opcodes::Op_RegP)]->SUBTRACT(save_mask); } //---------------------------is_save_on_entry---------------------------------- @@ -682,7 +682,7 @@ // the tail_call_rms array. for( i=1; i < root->req(); i++ ) { MachReturnNode *m = root->in(i)->as_MachReturn(); - if( m->ideal_Opcode() == Op_TailCall ) { + if( m->ideal_Opcode() == Opcodes::Op_TailCall ) { tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0); tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); break; @@ -695,7 +695,7 @@ // the tail_jump_rms array. for( i=1; i < root->req(); i++ ) { MachReturnNode *m = root->in(i)->as_MachReturn(); - if( m->ideal_Opcode() == Op_TailJump ) { + if( m->ideal_Opcode() == Opcodes::Op_TailJump ) { tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0); tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); break; @@ -710,11 +710,11 @@ for( i=1; i < root->req(); i++ ) { MachReturnNode *exit = root->in(i)->as_MachReturn(); switch( exit->ideal_Opcode() ) { - case Op_Return : exit->_in_rms = ret_rms; break; - case Op_Rethrow : exit->_in_rms = reth_rms; break; - case Op_TailCall : exit->_in_rms = tail_call_rms; break; - case Op_TailJump : exit->_in_rms = tail_jump_rms; break; - case Op_Halt : exit->_in_rms = halt_rms; break; + case Opcodes::Op_Return : exit->_in_rms = ret_rms; break; + case Opcodes::Op_Rethrow : exit->_in_rms = reth_rms; break; + case Opcodes::Op_TailCall : exit->_in_rms = tail_call_rms; break; + case Opcodes::Op_TailJump : exit->_in_rms = tail_jump_rms; break; + case Opcodes::Op_Halt : exit->_in_rms = halt_rms; break; default : ShouldNotReachHere(); } } @@ -735,15 +735,15 @@ tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i]; // Halts need the SOE registers, but only in the stack as debug info. // A just-prior uncommon-trap or deoptimization will use the SOE regs. - halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]]; + halt_rms [ halt_edge_cnt] = *idealreg2spillmask[static_cast(_register_save_type[i])]; Node *mproj; // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's // into a single RegD. if( (i&1) == 0 && - _register_save_type[i ] == Op_RegF && - _register_save_type[i+1] == Op_RegF && + _register_save_type[i ] == Opcodes::Op_RegF && + _register_save_type[i+1] == Opcodes::Op_RegF && is_save_on_entry(i+1) ) { // Add other bit for double ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1)); @@ -751,12 +751,12 @@ tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1)); tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1)); halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1)); - mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD ); + mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Opcodes::Op_RegD ); proj_cnt += 2; // Skip 2 for doubles } else if( (i&1) == 1 && // Else check for high half of double - _register_save_type[i-1] == Op_RegF && - _register_save_type[i ] == Op_RegF && + _register_save_type[i-1] == Opcodes::Op_RegF && + _register_save_type[i ] == Opcodes::Op_RegF && is_save_on_entry(i-1) ) { ret_rms [ ret_edge_cnt] = RegMask::Empty; reth_rms [ reth_edge_cnt] = RegMask::Empty; @@ -768,8 +768,8 @@ // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's // into a single RegL. else if( (i&1) == 0 && - _register_save_type[i ] == Op_RegI && - _register_save_type[i+1] == Op_RegI && + _register_save_type[i ] == Opcodes::Op_RegI && + _register_save_type[i+1] == Opcodes::Op_RegI && is_save_on_entry(i+1) ) { // Add other bit for long ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1)); @@ -777,12 +777,12 @@ tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1)); tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1)); halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1)); - mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL ); + mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Opcodes::Op_RegL ); proj_cnt += 2; // Skip 2 for longs } else if( (i&1) == 1 && // Else check for high half of long - _register_save_type[i-1] == Op_RegI && - _register_save_type[i ] == Op_RegI && + _register_save_type[i-1] == Opcodes::Op_RegI && + _register_save_type[i ] == Opcodes::Op_RegI && is_save_on_entry(i-1) ) { ret_rms [ ret_edge_cnt] = RegMask::Empty; reth_rms [ reth_edge_cnt] = RegMask::Empty; @@ -810,7 +810,7 @@ //------------------------------init_spill_mask-------------------------------- void Matcher::init_spill_mask( Node *ret ) { - if( idealreg2regmask[Op_RegI] ) return; // One time only init + if( idealreg2regmask[static_cast(Opcodes::Op_RegI)] ) return; // One time only init OptoReg::c_frame_pointer = c_frame_pointer(); c_frame_ptr_mask = c_frame_pointer(); @@ -856,35 +856,35 @@ spillD != NULL && spillP != NULL, ""); // Get the ADLC notion of the right regmask, for each basic type. #ifdef _LP64 - idealreg2regmask[Op_RegN] = &spillCP->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_RegN)] = &spillCP->out_RegMask(); #endif - idealreg2regmask[Op_RegI] = &spillI->out_RegMask(); - idealreg2regmask[Op_RegL] = &spillL->out_RegMask(); - idealreg2regmask[Op_RegF] = &spillF->out_RegMask(); - idealreg2regmask[Op_RegD] = &spillD->out_RegMask(); - idealreg2regmask[Op_RegP] = &spillP->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_RegI)] = &spillI->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_RegL)] = &spillL->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_RegF)] = &spillF->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_RegD)] = &spillD->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_RegP)] = &spillP->out_RegMask(); // Vector regmasks. if (Matcher::vector_size_supported(T_BYTE,4)) { TypeVect::VECTS = TypeVect::make(T_BYTE, 4); MachNode *spillVectS = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS)); - idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_VecS)] = &spillVectS->out_RegMask(); } if (Matcher::vector_size_supported(T_FLOAT,2)) { MachNode *spillVectD = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD)); - idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_VecD)] = &spillVectD->out_RegMask(); } if (Matcher::vector_size_supported(T_FLOAT,4)) { MachNode *spillVectX = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX)); - idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_VecX)] = &spillVectX->out_RegMask(); } if (Matcher::vector_size_supported(T_FLOAT,8)) { MachNode *spillVectY = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY)); - idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_VecY)] = &spillVectY->out_RegMask(); } if (Matcher::vector_size_supported(T_FLOAT,16)) { MachNode *spillVectZ = match_tree(new LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTZ)); - idealreg2regmask[Op_VecZ] = &spillVectZ->out_RegMask(); + idealreg2regmask[static_cast(Opcodes::Op_VecZ)] = &spillVectZ->out_RegMask(); } } @@ -909,7 +909,7 @@ // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases: if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) { switch (n->Opcode()) { - case Op_PrefetchAllocation: + case Opcodes::Op_PrefetchAllocation: nidx = Compile::AliasIdxRaw; nat = TypeRawPtr::BOTTOM; break; @@ -917,7 +917,7 @@ } if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) { switch (n->Opcode()) { - case Op_ClearArray: + case Opcodes::Op_ClearArray: midx = Compile::AliasIdxRaw; mat = TypeRawPtr::BOTTOM; break; @@ -925,11 +925,11 @@ } if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) { switch (n->Opcode()) { - case Op_Return: - case Op_Rethrow: - case Op_Halt: - case Op_TailCall: - case Op_TailJump: + case Opcodes::Op_Return: + case Opcodes::Op_Rethrow: + case Opcodes::Op_Halt: + case Opcodes::Op_TailCall: + case Opcodes::Op_TailJump: nidx = Compile::AliasIdxBot; nat = TypePtr::BOTTOM; break; @@ -937,18 +937,18 @@ } if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) { switch (n->Opcode()) { - case Op_StrComp: - case Op_StrEquals: - case Op_StrIndexOf: - case Op_StrIndexOfChar: - case Op_AryEq: - case Op_HasNegatives: - case Op_MemBarVolatile: - case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type? - case Op_StrInflatedCopy: - case Op_StrCompressedCopy: - case Op_OnSpinWait: - case Op_EncodeISOArray: + case Opcodes::Op_StrComp: + case Opcodes::Op_StrEquals: + case Opcodes::Op_StrIndexOf: + case Opcodes::Op_StrIndexOfChar: + case Opcodes::Op_AryEq: + case Opcodes::Op_HasNegatives: + case Opcodes::Op_MemBarVolatile: + case Opcodes::Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type? + case Opcodes::Op_StrInflatedCopy: + case Opcodes::Op_StrCompressedCopy: + case Opcodes::Op_OnSpinWait: + case Opcodes::Op_EncodeISOArray: nidx = Compile::AliasIdxTop; nat = NULL; break; @@ -1060,10 +1060,10 @@ // Monitor boxes are also represented directly. for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do Node *m = n->in(i); // Get input - int op = m->Opcode(); - assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); - if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass || - op == Op_ConF || op == Op_ConD || op == Op_ConL + Opcodes op = m->Opcode(); + assert((op == Opcodes::Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites"); + if( op == Opcodes::Op_ConI || op == Opcodes::Op_ConP || op == Opcodes::Op_ConN || op == Opcodes::Op_ConNKlass || + op == Opcodes::Op_ConF || op == Opcodes::Op_ConD || op == Opcodes::Op_ConL // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp ) { m = m->clone(); @@ -1298,7 +1298,7 @@ // is excluded on the max-per-method basis, debug info cannot land in // this killed area. uint r_cnt = mcall->tf()->range()->cnt(); - MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj ); + MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, static_cast(MachProjNode::projType::fat_proj) ); if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) { C->record_method_not_compilable("unsupported outgoing calling sequence"); } else { @@ -1346,7 +1346,7 @@ // making GotoNodes while building the CFG and in init_spill_mask() to identify // a Load's result RegMask for memoization in idealreg2regmask[] MachNode *Matcher::match_tree( const Node *n ) { - assert( n->Opcode() != Op_Phi, "cannot match" ); + assert( n->Opcode() != Opcodes::Op_Phi, "cannot match" ); assert( !n->is_block_start(), "cannot match" ); // Set the mark for all locally allocated State objects. // When this call returns, the _states_arena arena will be reset @@ -1925,9 +1925,9 @@ class FusedPatternMatcher { Node* _op1_node; Node* _mop_node; - int _con_op; + Opcodes _con_op; - static int match_next(Node* n, int next_op, int next_op_idx) { + static int match_next(Node* n, Opcodes next_op, int next_op_idx) { if (n->in(1) == NULL || n->in(2) == NULL) { return -1; } @@ -1947,11 +1947,11 @@ return -1; } public: - FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) : + FusedPatternMatcher(Node* op1_node, Node *mop_node, Opcodes con_op) : _op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { } - bool match(int op1, int op1_op2_idx, // op1 and the index of the op1->op2 edge, -1 if op1 is commutative - int op2, int op2_con_idx, // op2 and the index of the op2->con edge, -1 if op2 is commutative + bool match(Opcodes op1, int op1_op2_idx, // op1 and the index of the op1->op2 edge, -1 if op1 is commutative + Opcodes op2, int op2_con_idx, // op2 and the index of the op2->con edge, -1 if op2 is commutative typename ConType::NativeType con_value) { if (_op1_node->Opcode() != op1) { return false; @@ -1995,16 +1995,16 @@ bool Matcher::is_bmi_pattern(Node *n, Node *m) { if (n != NULL && m != NULL) { - if (m->Opcode() == Op_LoadI) { - FusedPatternMatcher bmii(n, m, Op_ConI); - return bmii.match(Op_AndI, -1, Op_SubI, 1, 0) || - bmii.match(Op_AndI, -1, Op_AddI, -1, -1) || - bmii.match(Op_XorI, -1, Op_AddI, -1, -1); - } else if (m->Opcode() == Op_LoadL) { - FusedPatternMatcher bmil(n, m, Op_ConL); - return bmil.match(Op_AndL, -1, Op_SubL, 1, 0) || - bmil.match(Op_AndL, -1, Op_AddL, -1, -1) || - bmil.match(Op_XorL, -1, Op_AddL, -1, -1); + if (m->Opcode() == Opcodes::Op_LoadI) { + FusedPatternMatcher bmii(n, m, Opcodes::Op_ConI); + return bmii.match(Opcodes::Op_AndI, -1, Opcodes::Op_SubI, 1, 0) || + bmii.match(Opcodes::Op_AndI, -1, Opcodes::Op_AddI, -1, -1) || + bmii.match(Opcodes::Op_XorI, -1, Opcodes::Op_AddI, -1, -1); + } else if (m->Opcode() == Opcodes::Op_LoadL) { + FusedPatternMatcher bmil(n, m, Opcodes::Op_ConL); + return bmil.match(Opcodes::Op_AndL, -1, Opcodes::Op_SubL, 1, 0) || + bmil.match(Opcodes::Op_AndL, -1, Opcodes::Op_AddL, -1, -1) || + bmil.match(Opcodes::Op_XorL, -1, Opcodes::Op_AddL, -1, -1); } } return false; @@ -2039,7 +2039,7 @@ while (mstack.is_nonempty()) { n = mstack.node(); // Leave node on stack Node_State nstate = mstack.state(); - uint nop = n->Opcode(); + Opcodes nop = n->Opcode(); if (nstate == Pre_Visit) { if (address_visited.test(n->_idx)) { // Visited in address already? // Flag as visited and shared now. @@ -2060,15 +2060,15 @@ bool mem_op = false; switch( nop ) { // Handle some opcodes special - case Op_Phi: // Treat Phis as shared roots - case Op_Parm: - case Op_Proj: // All handled specially during matching - case Op_SafePointScalarObject: + case Opcodes::Op_Phi: // Treat Phis as shared roots + case Opcodes::Op_Parm: + case Opcodes::Op_Proj: // All handled specially during matching + case Opcodes::Op_SafePointScalarObject: set_shared(n); set_dontcare(n); break; - case Op_If: - case Op_CountedLoopEnd: + case Opcodes::Op_If: + case Opcodes::Op_CountedLoopEnd: mstack.set_state(Alt_Post_Visit); // Alternative way // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps // with matching cmp/branch in 1 instruction. The Matcher needs the @@ -2078,48 +2078,48 @@ mstack.push(n->in(1), Visit); // Clone the Bool mstack.push(n->in(0), Pre_Visit); // Visit control input continue; // while (mstack.is_nonempty()) - case Op_ConvI2D: // These forms efficiently match with a prior - case Op_ConvI2F: // Load but not a following Store + case Opcodes::Op_ConvI2D: // These forms efficiently match with a prior + case Opcodes::Op_ConvI2F: // Load but not a following Store if( n->in(1)->is_Load() && // Prior load n->outcnt() == 1 && // Not already shared n->unique_out()->is_Store() ) // Following store set_shared(n); // Force it to be a root break; - case Op_ReverseBytesI: - case Op_ReverseBytesL: + case Opcodes::Op_ReverseBytesI: + case Opcodes::Op_ReverseBytesL: if( n->in(1)->is_Load() && // Prior load n->outcnt() == 1 ) // Not already shared set_shared(n); // Force it to be a root break; - case Op_BoxLock: // Cant match until we get stack-regs in ADLC - case Op_IfFalse: - case Op_IfTrue: - case Op_MachProj: - case Op_MergeMem: - case Op_Catch: - case Op_CatchProj: - case Op_CProj: - case Op_JumpProj: - case Op_JProj: - case Op_NeverBranch: + case Opcodes::Op_BoxLock: // Cant match until we get stack-regs in ADLC + case Opcodes::Op_IfFalse: + case Opcodes::Op_IfTrue: + case Opcodes::Op_MachProj: + case Opcodes::Op_MergeMem: + case Opcodes::Op_Catch: + case Opcodes::Op_CatchProj: + case Opcodes::Op_CProj: + case Opcodes::Op_JumpProj: + case Opcodes::Op_JProj: + case Opcodes::Op_NeverBranch: set_dontcare(n); break; - case Op_Jump: + case Opcodes::Op_Jump: mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared) mstack.push(n->in(0), Pre_Visit); // Visit Control input continue; // while (mstack.is_nonempty()) - case Op_StrComp: - case Op_StrEquals: - case Op_StrIndexOf: - case Op_StrIndexOfChar: - case Op_AryEq: - case Op_HasNegatives: - case Op_StrInflatedCopy: - case Op_StrCompressedCopy: - case Op_EncodeISOArray: + case Opcodes::Op_StrComp: + case Opcodes::Op_StrEquals: + case Opcodes::Op_StrIndexOf: + case Opcodes::Op_StrIndexOfChar: + case Opcodes::Op_AryEq: + case Opcodes::Op_HasNegatives: + case Opcodes::Op_StrInflatedCopy: + case Opcodes::Op_StrCompressedCopy: + case Opcodes::Op_EncodeISOArray: set_shared(n); // Force result into register (it will be anyways) break; - case Op_ConP: { // Convert pointers above the centerline to NUL + case Opcodes::Op_ConP: { // Convert pointers above the centerline to NUL TypeNode *tn = n->as_Type(); // Constants derive from type nodes const TypePtr* tp = tn->type()->is_ptr(); if (tp->_ptr == TypePtr::AnyNull) { @@ -2127,7 +2127,7 @@ } break; } - case Op_ConN: { // Convert narrow pointers above the centerline to NUL + case Opcodes::Op_ConN: { // Convert narrow pointers above the centerline to NUL TypeNode *tn = n->as_Type(); // Constants derive from type nodes const TypePtr* tp = tn->type()->make_ptr(); if (tp && tp->_ptr == TypePtr::AnyNull) { @@ -2135,11 +2135,11 @@ } break; } - case Op_Binary: // These are introduced in the Post_Visit state. + case Opcodes::Op_Binary: // These are introduced in the Post_Visit state. ShouldNotReachHere(); break; - case Op_ClearArray: - case Op_SafePoint: + case Opcodes::Op_ClearArray: + case Opcodes::Op_SafePoint: mem_op = true; break; default: @@ -2155,14 +2155,14 @@ set_shared(n); } // Fall into default case - if( !n->ideal_reg() ) + if( n->ideal_reg() != Opcodes::Op_Node ) set_dontcare(n); // Unmatchable Nodes } // end_switch for(int i = n->req() - 1; i >= 0; --i) { // For my children Node *m = n->in(i); // Get ith input if (m == NULL) continue; // Ignore NULLs - uint mop = m->Opcode(); + Opcodes mop = m->Opcode(); // Must clone all producers of flags, or we will not match correctly. // Suppose a compare setting int-flags is shared (e.g., a switch-tree) @@ -2170,12 +2170,12 @@ // are also there, so we may match a float-branch to int-flags and // expect the allocator to haul the flags from the int-side to the // fp-side. No can do. - if( _must_clone[mop] ) { + if( _must_clone[static_cast(mop)] ) { mstack.push(m, Visit); continue; // for(int i = ...) } - if( mop == Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) { + if( mop == Opcodes::Op_AddP && m->in(AddPNode::Base)->is_DecodeNarrowPtr()) { // Bases used in addresses must be shared but since // they are shared through a DecodeN they may appear // to have a single use so force sharing here. @@ -2191,7 +2191,7 @@ #endif // Clone addressing expressions as they are "free" in memory access instructions - if (mem_op && i == MemNode::Address && mop == Op_AddP && + if (mem_op && i == MemNode::Address && mop == Opcodes::Op_AddP && // When there are other uses besides address expressions // put it on stack and mark as shared. !is_visited(m)) { @@ -2225,27 +2225,27 @@ // Now hack a few special opcodes switch( n->Opcode() ) { // Handle some opcodes special - case Op_StorePConditional: - case Op_StoreIConditional: - case Op_StoreLConditional: - case Op_CompareAndExchangeB: - case Op_CompareAndExchangeS: - case Op_CompareAndExchangeI: - case Op_CompareAndExchangeL: - case Op_CompareAndExchangeP: - case Op_CompareAndExchangeN: - case Op_WeakCompareAndSwapB: - case Op_WeakCompareAndSwapS: - case Op_WeakCompareAndSwapI: - case Op_WeakCompareAndSwapL: - case Op_WeakCompareAndSwapP: - case Op_WeakCompareAndSwapN: - case Op_CompareAndSwapB: - case Op_CompareAndSwapS: - case Op_CompareAndSwapI: - case Op_CompareAndSwapL: - case Op_CompareAndSwapP: - case Op_CompareAndSwapN: { // Convert trinary to binary-tree + case Opcodes::Op_StorePConditional: + case Opcodes::Op_StoreIConditional: + case Opcodes::Op_StoreLConditional: + case Opcodes::Op_CompareAndExchangeB: + case Opcodes::Op_CompareAndExchangeS: + case Opcodes::Op_CompareAndExchangeI: + case Opcodes::Op_CompareAndExchangeL: + case Opcodes::Op_CompareAndExchangeP: + case Opcodes::Op_CompareAndExchangeN: + case Opcodes::Op_WeakCompareAndSwapB: + case Opcodes::Op_WeakCompareAndSwapS: + case Opcodes::Op_WeakCompareAndSwapI: + case Opcodes::Op_WeakCompareAndSwapL: + case Opcodes::Op_WeakCompareAndSwapP: + case Opcodes::Op_WeakCompareAndSwapN: + case Opcodes::Op_CompareAndSwapB: + case Opcodes::Op_CompareAndSwapS: + case Opcodes::Op_CompareAndSwapI: + case Opcodes::Op_CompareAndSwapL: + case Opcodes::Op_CompareAndSwapP: + case Opcodes::Op_CompareAndSwapN: { // Convert trinary to binary-tree Node *newval = n->in(MemNode::ValueIn ); Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn); Node *pair = new BinaryNode( oldval, newval ); @@ -2253,13 +2253,13 @@ n->del_req(LoadStoreConditionalNode::ExpectedIn); break; } - case Op_CMoveD: // Convert trinary to binary-tree - case Op_CMoveF: - case Op_CMoveI: - case Op_CMoveL: - case Op_CMoveN: - case Op_CMoveP: - case Op_CMoveVD: { + case Opcodes::Op_CMoveD: // Convert trinary to binary-tree + case Opcodes::Op_CMoveF: + case Opcodes::Op_CMoveI: + case Opcodes::Op_CMoveL: + case Opcodes::Op_CMoveN: + case Opcodes::Op_CMoveP: + case Opcodes::Op_CMoveVD: { // Restructure into a binary tree for Matching. It's possible that // we could move this code up next to the graph reshaping for IfNodes // or vice-versa, but I do not want to debug this for Ladybird. @@ -2271,23 +2271,23 @@ n->del_req(3); break; } - case Op_LoopLimit: { + case Opcodes::Op_LoopLimit: { Node *pair1 = new BinaryNode(n->in(1),n->in(2)); n->set_req(1,pair1); n->set_req(2,n->in(3)); n->del_req(3); break; } - case Op_StrEquals: - case Op_StrIndexOfChar: { + case Opcodes::Op_StrEquals: + case Opcodes::Op_StrIndexOfChar: { Node *pair1 = new BinaryNode(n->in(2),n->in(3)); n->set_req(2,pair1); n->set_req(3,n->in(4)); n->del_req(4); break; } - case Op_StrComp: - case Op_StrIndexOf: { + case Opcodes::Op_StrComp: + case Opcodes::Op_StrIndexOf: { Node *pair1 = new BinaryNode(n->in(2),n->in(3)); n->set_req(2,pair1); Node *pair2 = new BinaryNode(n->in(4),n->in(5)); @@ -2296,9 +2296,9 @@ n->del_req(4); break; } - case Op_StrCompressedCopy: - case Op_StrInflatedCopy: - case Op_EncodeISOArray: { + case Opcodes::Op_StrCompressedCopy: + case Opcodes::Op_StrInflatedCopy: + case Opcodes::Op_EncodeISOArray: { // Restructure into a binary tree for Matching. Node* pair = new BinaryNode(n->in(3), n->in(4)); n->set_req(3, pair); @@ -2329,19 +2329,19 @@ // value being tested. void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) { Node *iff = proj->in(0); - if( iff->Opcode() == Op_If ) { + if( iff->Opcode() == Opcodes::Op_If ) { // During matching If's have Bool & Cmp side-by-side BoolNode *b = iff->in(1)->as_Bool(); Node *cmp = iff->in(2); - int opc = cmp->Opcode(); - if (opc != Op_CmpP && opc != Op_CmpN) return; + Opcodes opc = cmp->Opcode(); + if (opc != Opcodes::Op_CmpP && opc != Opcodes::Op_CmpN) return; const Type* ct = cmp->in(2)->bottom_type(); if (ct == TypePtr::NULL_PTR || - (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) { + (opc == Opcodes::Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) { bool push_it = false; - if( proj->Opcode() == Op_IfTrue ) { + if( proj->Opcode() == Opcodes::Op_IfTrue ) { #ifndef PRODUCT extern int all_null_checks_found; all_null_checks_found++; @@ -2350,7 +2350,7 @@ push_it = true; } } else { - assert( proj->Opcode() == Op_IfFalse, "" ); + assert( proj->Opcode() == Opcodes::Op_IfFalse, "" ); if( b->_test._test == BoolTest::eq ) { push_it = true; } @@ -2431,7 +2431,7 @@ bool Matcher::post_store_load_barrier(const Node* vmb) { Compile* C = Compile::current(); assert(vmb->is_MemBar(), ""); - assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, ""); + assert(vmb->Opcode() != Opcodes::Op_MemBarAcquire && vmb->Opcode() != Opcodes::Op_LoadFence, ""); const MemBarNode* membar = vmb->as_MemBar(); // Get the Ideal Proj node, ctrl, that can be used to iterate forward @@ -2449,7 +2449,7 @@ for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) { Node *x = ctrl->fast_out(j); - int xop = x->Opcode(); + Opcodes xop = x->Opcode(); // We don't need current barrier if we see another or a lock // before seeing volatile load. @@ -2458,39 +2458,39 @@ // With the advent of 1-0 lock operations we're no longer guaranteed // that a monitor exit operation contains a serializing instruction. - if (xop == Op_MemBarVolatile || - xop == Op_CompareAndExchangeB || - xop == Op_CompareAndExchangeS || - xop == Op_CompareAndExchangeI || - xop == Op_CompareAndExchangeL || - xop == Op_CompareAndExchangeP || - xop == Op_CompareAndExchangeN || - xop == Op_WeakCompareAndSwapB || - xop == Op_WeakCompareAndSwapS || - xop == Op_WeakCompareAndSwapL || - xop == Op_WeakCompareAndSwapP || - xop == Op_WeakCompareAndSwapN || - xop == Op_WeakCompareAndSwapI || - xop == Op_CompareAndSwapB || - xop == Op_CompareAndSwapS || - xop == Op_CompareAndSwapL || - xop == Op_CompareAndSwapP || - xop == Op_CompareAndSwapN || - xop == Op_CompareAndSwapI) { + if (xop == Opcodes::Op_MemBarVolatile || + xop == Opcodes::Op_CompareAndExchangeB || + xop == Opcodes::Op_CompareAndExchangeS || + xop == Opcodes::Op_CompareAndExchangeI || + xop == Opcodes::Op_CompareAndExchangeL || + xop == Opcodes::Op_CompareAndExchangeP || + xop == Opcodes::Op_CompareAndExchangeN || + xop == Opcodes::Op_WeakCompareAndSwapB || + xop == Opcodes::Op_WeakCompareAndSwapS || + xop == Opcodes::Op_WeakCompareAndSwapL || + xop == Opcodes::Op_WeakCompareAndSwapP || + xop == Opcodes::Op_WeakCompareAndSwapN || + xop == Opcodes::Op_WeakCompareAndSwapI || + xop == Opcodes::Op_CompareAndSwapB || + xop == Opcodes::Op_CompareAndSwapS || + xop == Opcodes::Op_CompareAndSwapL || + xop == Opcodes::Op_CompareAndSwapP || + xop == Opcodes::Op_CompareAndSwapN || + xop == Opcodes::Op_CompareAndSwapI) { return true; } // Op_FastLock previously appeared in the Op_* list above. // With biased locking we're no longer guaranteed that a monitor // enter operation contains a serializing instruction. - if ((xop == Op_FastLock) && !UseBiasedLocking) { + if ((xop == Opcodes::Op_FastLock) && !UseBiasedLocking) { return true; } if (x->is_MemBar()) { // We must retain this membar if there is an upcoming volatile // load, which will be followed by acquire membar. - if (xop == Op_MemBarAcquire || xop == Op_LoadFence) { + if (xop == Opcodes::Op_MemBarAcquire || xop == Opcodes::Op_LoadFence) { return false; } else { // For other kinds of barriers, check by pretending we