1 // 2 // Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // SPARC Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 register %{ 32 //----------Architecture Description Register Definitions---------------------- 33 // General Registers 34 // "reg_def" name ( register save type, C convention save type, 35 // ideal register type, encoding, vm name ); 36 // Register Save Types: 37 // 38 // NS = No-Save: The register allocator assumes that these registers 39 // can be used without saving upon entry to the method, & 40 // that they do not need to be saved at call sites. 41 // 42 // SOC = Save-On-Call: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, 44 // but that they must be saved at call sites. 45 // 46 // SOE = Save-On-Entry: The register allocator assumes that these registers 47 // must be saved before using them upon entry to the 48 // method, but they do not need to be saved at call 49 // sites. 50 // 51 // AS = Always-Save: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, & that they must be saved at call sites. 54 // 55 // Ideal Register Type is used to determine how to save & restore a 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 58 // 59 // The encoding number is the actual bit-pattern placed into the opcodes. 60 61 62 // ---------------------------- 63 // Integer/Long Registers 64 // ---------------------------- 65 66 // Need to expose the hi/lo aspect of 64-bit registers 67 // This register set is used for both the 64-bit build and 68 // the 32-bit build with 1-register longs. 69 70 // Global Registers 0-7 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next()); 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg()); 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next()); 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg()); 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next()); 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg()); 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next()); 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg()); 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next()); 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg()); 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next()); 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg()); 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next()); 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg()); 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next()); 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg()); 87 88 // Output Registers 0-7 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next()); 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg()); 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next()); 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg()); 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next()); 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg()); 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next()); 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg()); 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next()); 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg()); 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next()); 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg()); 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next()); 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg()); 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next()); 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg()); 105 106 // Local Registers 0-7 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next()); 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg()); 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next()); 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg()); 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next()); 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg()); 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next()); 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg()); 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next()); 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg()); 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next()); 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg()); 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next()); 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg()); 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next()); 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg()); 123 124 // Input Registers 0-7 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next()); 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg()); 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next()); 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg()); 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next()); 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg()); 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next()); 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg()); 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next()); 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg()); 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next()); 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg()); 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next()); 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg()); 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next()); 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg()); 141 142 // ---------------------------- 143 // Float/Double Registers 144 // ---------------------------- 145 146 // Float Registers 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); 179 180 // Double Registers 181 // The rules of ADL require that double registers be defined in pairs. 182 // Each pair must be two 32-bit values, but not necessarily a pair of 183 // single float registers. In each pair, ADLC-assigned register numbers 184 // must be adjacent, with the lower number even. Finally, when the 185 // CPU stores such a register pair to memory, the word associated with 186 // the lower ADLC-assigned number must be stored to the lower address. 187 188 // These definitions specify the actual bit encodings of the sparc 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 190 // wants 0-63, so we have to convert every time we want to use fp regs 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 192 // 255 is a flag meaning "don't go here". 193 // I believe we can't handle callee-save doubles D32 and up until 194 // the place in the sparc stack crawler that asserts on the 255 is 195 // fixed up. 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg()); 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next()); 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg()); 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next()); 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg()); 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next()); 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg()); 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next()); 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg()); 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next()); 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()); 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next()); 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()); 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next()); 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()); 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next()); 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()); 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next()); 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()); 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next()); 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()); 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next()); 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()); 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next()); 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()); 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next()); 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()); 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next()); 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()); 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next()); 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()); 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next()); 228 229 230 // ---------------------------- 231 // Special Registers 232 // Condition Codes Flag Registers 233 // I tried to break out ICC and XCC but it's not very pretty. 234 // Every Sparc instruction which defs/kills one also kills the other. 235 // Hence every compare instruction which defs one kind of flags ends 236 // up needing a kill of the other. 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 238 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad()); 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad()); 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad()); 243 244 // ---------------------------- 245 // Specify the enum values for the registers. These enums are only used by the 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed 247 // for visibility to the rest of the vm. The order of this enum influences the 248 // register allocator so having the freedom to set this order and not be stuck 249 // with the order that is natural for the rest of the vm is worth it. 250 alloc_class chunk0( 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H, 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H, 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H, 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H); 255 256 // Note that a register is not allocatable unless it is also mentioned 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg. 258 259 alloc_class chunk1( 260 // The first registers listed here are those most likely to be used 261 // as temporaries. We move F0..F7 away from the front of the list, 262 // to reduce the likelihood of interferences with parameters and 263 // return values. Likewise, we avoid using F0/F1 for parameters, 264 // since they are used for return values. 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean. 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23, 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31, 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x, 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x, 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x); 274 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3); 276 277 //----------Architecture Description Register Classes-------------------------- 278 // Several register classes are automatically defined based upon information in 279 // this architecture description. 280 // 1) reg_class inline_cache_reg ( as defined in frame section ) 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // G0 is not included in integer class since it has special meaning. 286 reg_class g0_reg(R_G0); 287 288 // ---------------------------- 289 // Integer Register Classes 290 // ---------------------------- 291 // Exclusions from i_reg: 292 // R_G0: hardwired zero 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java) 294 // R_G6: reserved by Solaris ABI to tools 295 // R_G7: reserved by Solaris ABI to libthread 296 // R_O7: Used as a temp in many encodings 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 298 299 // Class for all integer registers, except the G registers. This is used for 300 // encodings which use G registers as temps. The regular inputs to such 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator 302 // will not put an input into a temp register. 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 304 305 reg_class g1_regI(R_G1); 306 reg_class g3_regI(R_G3); 307 reg_class g4_regI(R_G4); 308 reg_class o0_regI(R_O0); 309 reg_class o7_regI(R_O7); 310 311 // ---------------------------- 312 // Pointer Register Classes 313 // ---------------------------- 314 #ifdef _LP64 315 // 64-bit build means 64-bit pointers means hi/lo pairs 316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 320 // Lock encodings use G3 and G4 internally 321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5, 322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 325 // Special class for storeP instructions, which can store SP or RPC to TLS. 326 // It is also used for memory addressing, allowing direct TLS addressing. 327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP, 329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP ); 331 // R_L7 is the lowest-priority callee-save (i.e., NS) register 332 // We use it to save R_G2 across calls out of Java. 333 reg_class l7_regP(R_L7H,R_L7); 334 335 // Other special pointer regs 336 reg_class g1_regP(R_G1H,R_G1); 337 reg_class g2_regP(R_G2H,R_G2); 338 reg_class g3_regP(R_G3H,R_G3); 339 reg_class g4_regP(R_G4H,R_G4); 340 reg_class g5_regP(R_G5H,R_G5); 341 reg_class i0_regP(R_I0H,R_I0); 342 reg_class o0_regP(R_O0H,R_O0); 343 reg_class o1_regP(R_O1H,R_O1); 344 reg_class o2_regP(R_O2H,R_O2); 345 reg_class o7_regP(R_O7H,R_O7); 346 347 #else // _LP64 348 // 32-bit build means 32-bit pointers means 1 register. 349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5, 350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 353 // Lock encodings use G3 and G4 internally 354 reg_class lock_ptr_reg(R_G1, R_G5, 355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 358 // Special class for storeP instructions, which can store SP or RPC to TLS. 359 // It is also used for memory addressing, allowing direct TLS addressing. 360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5, 361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP, 362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP); 364 // R_L7 is the lowest-priority callee-save (i.e., NS) register 365 // We use it to save R_G2 across calls out of Java. 366 reg_class l7_regP(R_L7); 367 368 // Other special pointer regs 369 reg_class g1_regP(R_G1); 370 reg_class g2_regP(R_G2); 371 reg_class g3_regP(R_G3); 372 reg_class g4_regP(R_G4); 373 reg_class g5_regP(R_G5); 374 reg_class i0_regP(R_I0); 375 reg_class o0_regP(R_O0); 376 reg_class o1_regP(R_O1); 377 reg_class o2_regP(R_O2); 378 reg_class o7_regP(R_O7); 379 #endif // _LP64 380 381 382 // ---------------------------- 383 // Long Register Classes 384 // ---------------------------- 385 // Longs in 1 register. Aligned adjacent hi/lo pairs. 386 // Note: O7 is never in this class; it is sometimes used as an encoding temp. 387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 389 #ifdef _LP64 390 // 64-bit, longs in 1 register: use all 64-bit integer registers 391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's. 392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 394 #endif // _LP64 395 ); 396 397 reg_class g1_regL(R_G1H,R_G1); 398 reg_class g3_regL(R_G3H,R_G3); 399 reg_class o2_regL(R_O2H,R_O2); 400 reg_class o7_regL(R_O7H,R_O7); 401 402 // ---------------------------- 403 // Special Class for Condition Code Flags Register 404 reg_class int_flags(CCR); 405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3); 406 reg_class float_flag0(FCC0); 407 408 409 // ---------------------------- 410 // Float Point Register Classes 411 // ---------------------------- 412 // Skip F30/F31, they are reserved for mem-mem copies 413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 414 415 // Paired floating point registers--they show up in the same order as the floats, 416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29, 419 /* Use extra V9 double registers; this AD file does not support V8 */ 420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x 422 ); 423 424 // Paired floating point registers--they show up in the same order as the floats, 425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 426 // This class is usable for mis-aligned loads as happen in I2C adapters. 427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 429 %} 430 431 //----------DEFINITION BLOCK--------------------------------------------------- 432 // Define name --> value mappings to inform the ADLC of an integer valued name 433 // Current support includes integer values in the range [0, 0x7FFFFFFF] 434 // Format: 435 // int_def <name> ( <int_value>, <expression>); 436 // Generated Code in ad_<arch>.hpp 437 // #define <name> (<expression>) 438 // // value == <int_value> 439 // Generated code in ad_<arch>.cpp adlc_verification() 440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 441 // 442 definitions %{ 443 // The default cost (of an ALU instruction). 444 int_def DEFAULT_COST ( 100, 100); 445 int_def HUGE_COST (1000000, 1000000); 446 447 // Memory refs are twice as expensive as run-of-the-mill. 448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); 449 450 // Branches are even more expensive. 451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3); 452 int_def CALL_COST ( 300, DEFAULT_COST * 3); 453 %} 454 455 456 //----------SOURCE BLOCK------------------------------------------------------- 457 // This is a block of C++ code which provides values, functions, and 458 // definitions necessary in the rest of the architecture description 459 source_hpp %{ 460 // Must be visible to the DFA in dfa_sparc.cpp 461 extern bool can_branch_register( Node *bol, Node *cmp ); 462 463 extern bool use_block_zeroing(Node* count); 464 465 // Macros to extract hi & lo halves from a long pair. 466 // G0 is not part of any long pair, so assert on that. 467 // Prevents accidentally using G1 instead of G0. 468 #define LONG_HI_REG(x) (x) 469 #define LONG_LO_REG(x) (x) 470 471 %} 472 473 source %{ 474 #define __ _masm. 475 476 // tertiary op of a LoadP or StoreP encoding 477 #define REGP_OP true 478 479 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding); 480 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding); 481 static Register reg_to_register_object(int register_encoding); 482 483 // Used by the DFA in dfa_sparc.cpp. 484 // Check for being able to use a V9 branch-on-register. Requires a 485 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign- 486 // extended. Doesn't work following an integer ADD, for example, because of 487 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On 488 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and 489 // replace them with zero, which could become sign-extension in a different OS 490 // release. There's no obvious reason why an interrupt will ever fill these 491 // bits with non-zero junk (the registers are reloaded with standard LD 492 // instructions which either zero-fill or sign-fill). 493 bool can_branch_register( Node *bol, Node *cmp ) { 494 if( !BranchOnRegister ) return false; 495 #ifdef _LP64 496 if( cmp->Opcode() == Op_CmpP ) 497 return true; // No problems with pointer compares 498 #endif 499 if( cmp->Opcode() == Op_CmpL ) 500 return true; // No problems with long compares 501 502 if( !SparcV9RegsHiBitsZero ) return false; 503 if( bol->as_Bool()->_test._test != BoolTest::ne && 504 bol->as_Bool()->_test._test != BoolTest::eq ) 505 return false; 506 507 // Check for comparing against a 'safe' value. Any operation which 508 // clears out the high word is safe. Thus, loads and certain shifts 509 // are safe, as are non-negative constants. Any operation which 510 // preserves zero bits in the high word is safe as long as each of its 511 // inputs are safe. Thus, phis and bitwise booleans are safe if their 512 // inputs are safe. At present, the only important case to recognize 513 // seems to be loads. Constants should fold away, and shifts & 514 // logicals can use the 'cc' forms. 515 Node *x = cmp->in(1); 516 if( x->is_Load() ) return true; 517 if( x->is_Phi() ) { 518 for( uint i = 1; i < x->req(); i++ ) 519 if( !x->in(i)->is_Load() ) 520 return false; 521 return true; 522 } 523 return false; 524 } 525 526 bool use_block_zeroing(Node* count) { 527 // Use BIS for zeroing if count is not constant 528 // or it is >= BlockZeroingLowLimit. 529 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit); 530 } 531 532 // **************************************************************************** 533 534 // REQUIRED FUNCTIONALITY 535 536 // !!!!! Special hack to get all type of calls to specify the byte offset 537 // from the start of the call to the point where the return address 538 // will point. 539 // The "return address" is the address of the call instruction, plus 8. 540 541 int MachCallStaticJavaNode::ret_addr_offset() { 542 int offset = NativeCall::instruction_size; // call; delay slot 543 if (_method_handle_invoke) 544 offset += 4; // restore SP 545 return offset; 546 } 547 548 int MachCallDynamicJavaNode::ret_addr_offset() { 549 int vtable_index = this->_vtable_index; 550 if (vtable_index < 0) { 551 // must be invalid_vtable_index, not nonvirtual_vtable_index 552 assert(vtable_index == methodOopDesc::invalid_vtable_index, "correct sentinel value"); 553 return (NativeMovConstReg::instruction_size + 554 NativeCall::instruction_size); // sethi; setlo; call; delay slot 555 } else { 556 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 557 int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 558 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 559 int klass_load_size; 560 if (UseCompressedOops) { 561 assert(Universe::heap() != NULL, "java heap should be initialized"); 562 if (Universe::narrow_oop_base() == NULL) 563 klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass() 564 else 565 klass_load_size = 3*BytesPerInstWord; 566 } else { 567 klass_load_size = 1*BytesPerInstWord; 568 } 569 if (Assembler::is_simm13(v_off)) { 570 return klass_load_size + 571 (2*BytesPerInstWord + // ld_ptr, ld_ptr 572 NativeCall::instruction_size); // call; delay slot 573 } else { 574 return klass_load_size + 575 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr 576 NativeCall::instruction_size); // call; delay slot 577 } 578 } 579 } 580 581 int MachCallRuntimeNode::ret_addr_offset() { 582 #ifdef _LP64 583 if (MacroAssembler::is_far_target(entry_point())) { 584 return NativeFarCall::instruction_size; 585 } else { 586 return NativeCall::instruction_size; 587 } 588 #else 589 return NativeCall::instruction_size; // call; delay slot 590 #endif 591 } 592 593 // Indicate if the safepoint node needs the polling page as an input. 594 // Since Sparc does not have absolute addressing, it does. 595 bool SafePointNode::needs_polling_address_input() { 596 return true; 597 } 598 599 // emit an interrupt that is caught by the debugger (for debugging compiler) 600 void emit_break(CodeBuffer &cbuf) { 601 MacroAssembler _masm(&cbuf); 602 __ breakpoint_trap(); 603 } 604 605 #ifndef PRODUCT 606 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const { 607 st->print("TA"); 608 } 609 #endif 610 611 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 612 emit_break(cbuf); 613 } 614 615 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 616 return MachNode::size(ra_); 617 } 618 619 // Traceable jump 620 void emit_jmpl(CodeBuffer &cbuf, int jump_target) { 621 MacroAssembler _masm(&cbuf); 622 Register rdest = reg_to_register_object(jump_target); 623 __ JMP(rdest, 0); 624 __ delayed()->nop(); 625 } 626 627 // Traceable jump and set exception pc 628 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) { 629 MacroAssembler _masm(&cbuf); 630 Register rdest = reg_to_register_object(jump_target); 631 __ JMP(rdest, 0); 632 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc ); 633 } 634 635 void emit_nop(CodeBuffer &cbuf) { 636 MacroAssembler _masm(&cbuf); 637 __ nop(); 638 } 639 640 void emit_illtrap(CodeBuffer &cbuf) { 641 MacroAssembler _masm(&cbuf); 642 __ illtrap(0); 643 } 644 645 646 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) { 647 assert(n->rule() != loadUB_rule, ""); 648 649 intptr_t offset = 0; 650 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP 651 const Node* addr = n->get_base_and_disp(offset, adr_type); 652 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP"); 653 assert(addr != NULL && addr != (Node*)-1, "invalid addr"); 654 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 655 atype = atype->add_offset(offset); 656 assert(disp32 == offset, "wrong disp32"); 657 return atype->_offset; 658 } 659 660 661 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) { 662 assert(n->rule() != loadUB_rule, ""); 663 664 intptr_t offset = 0; 665 Node* addr = n->in(2); 666 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 667 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) { 668 Node* a = addr->in(2/*AddPNode::Address*/); 669 Node* o = addr->in(3/*AddPNode::Offset*/); 670 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot; 671 atype = a->bottom_type()->is_ptr()->add_offset(offset); 672 assert(atype->isa_oop_ptr(), "still an oop"); 673 } 674 offset = atype->is_ptr()->_offset; 675 if (offset != Type::OffsetBot) offset += disp32; 676 return offset; 677 } 678 679 static inline jdouble replicate_immI(int con, int count, int width) { 680 // Load a constant replicated "count" times with width "width" 681 int bit_width = width * 8; 682 jlong elt_val = con; 683 elt_val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 684 jlong val = elt_val; 685 for (int i = 0; i < count - 1; i++) { 686 val <<= bit_width; 687 val |= elt_val; 688 } 689 jdouble dval = *((jdouble*) &val); // coerce to double type 690 return dval; 691 } 692 693 // Standard Sparc opcode form2 field breakdown 694 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 695 f0 &= (1<<19)-1; // Mask displacement to 19 bits 696 int op = (f30 << 30) | 697 (f29 << 29) | 698 (f25 << 25) | 699 (f22 << 22) | 700 (f20 << 20) | 701 (f19 << 19) | 702 (f0 << 0); 703 cbuf.insts()->emit_int32(op); 704 } 705 706 // Standard Sparc opcode form2 field breakdown 707 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) { 708 f0 >>= 10; // Drop 10 bits 709 f0 &= (1<<22)-1; // Mask displacement to 22 bits 710 int op = (f30 << 30) | 711 (f25 << 25) | 712 (f22 << 22) | 713 (f0 << 0); 714 cbuf.insts()->emit_int32(op); 715 } 716 717 // Standard Sparc opcode form3 field breakdown 718 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) { 719 int op = (f30 << 30) | 720 (f25 << 25) | 721 (f19 << 19) | 722 (f14 << 14) | 723 (f5 << 5) | 724 (f0 << 0); 725 cbuf.insts()->emit_int32(op); 726 } 727 728 // Standard Sparc opcode form3 field breakdown 729 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) { 730 simm13 &= (1<<13)-1; // Mask to 13 bits 731 int op = (f30 << 30) | 732 (f25 << 25) | 733 (f19 << 19) | 734 (f14 << 14) | 735 (1 << 13) | // bit to indicate immediate-mode 736 (simm13<<0); 737 cbuf.insts()->emit_int32(op); 738 } 739 740 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 741 simm10 &= (1<<10)-1; // Mask to 10 bits 742 emit3_simm13(cbuf,f30,f25,f19,f14,simm10); 743 } 744 745 #ifdef ASSERT 746 // Helper function for VerifyOops in emit_form3_mem_reg 747 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) { 748 warning("VerifyOops encountered unexpected instruction:"); 749 n->dump(2); 750 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]); 751 } 752 #endif 753 754 755 void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary, 756 int src1_enc, int disp32, int src2_enc, int dst_enc) { 757 758 #ifdef ASSERT 759 // The following code implements the +VerifyOops feature. 760 // It verifies oop values which are loaded into or stored out of 761 // the current method activation. +VerifyOops complements techniques 762 // like ScavengeALot, because it eagerly inspects oops in transit, 763 // as they enter or leave the stack, as opposed to ScavengeALot, 764 // which inspects oops "at rest", in the stack or heap, at safepoints. 765 // For this reason, +VerifyOops can sometimes detect bugs very close 766 // to their point of creation. It can also serve as a cross-check 767 // on the validity of oop maps, when used toegether with ScavengeALot. 768 769 // It would be good to verify oops at other points, especially 770 // when an oop is used as a base pointer for a load or store. 771 // This is presently difficult, because it is hard to know when 772 // a base address is biased or not. (If we had such information, 773 // it would be easy and useful to make a two-argument version of 774 // verify_oop which unbiases the base, and performs verification.) 775 776 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary"); 777 bool is_verified_oop_base = false; 778 bool is_verified_oop_load = false; 779 bool is_verified_oop_store = false; 780 int tmp_enc = -1; 781 if (VerifyOops && src1_enc != R_SP_enc) { 782 // classify the op, mainly for an assert check 783 int st_op = 0, ld_op = 0; 784 switch (primary) { 785 case Assembler::stb_op3: st_op = Op_StoreB; break; 786 case Assembler::sth_op3: st_op = Op_StoreC; break; 787 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0 788 case Assembler::stw_op3: st_op = Op_StoreI; break; 789 case Assembler::std_op3: st_op = Op_StoreL; break; 790 case Assembler::stf_op3: st_op = Op_StoreF; break; 791 case Assembler::stdf_op3: st_op = Op_StoreD; break; 792 793 case Assembler::ldsb_op3: ld_op = Op_LoadB; break; 794 case Assembler::lduh_op3: ld_op = Op_LoadUS; break; 795 case Assembler::ldsh_op3: ld_op = Op_LoadS; break; 796 case Assembler::ldx_op3: // may become LoadP or stay LoadI 797 case Assembler::ldsw_op3: // may become LoadP or stay LoadI 798 case Assembler::lduw_op3: ld_op = Op_LoadI; break; 799 case Assembler::ldd_op3: ld_op = Op_LoadL; break; 800 case Assembler::ldf_op3: ld_op = Op_LoadF; break; 801 case Assembler::lddf_op3: ld_op = Op_LoadD; break; 802 case Assembler::ldub_op3: ld_op = Op_LoadB; break; 803 case Assembler::prefetch_op3: ld_op = Op_LoadI; break; 804 805 default: ShouldNotReachHere(); 806 } 807 if (tertiary == REGP_OP) { 808 if (st_op == Op_StoreI) st_op = Op_StoreP; 809 else if (ld_op == Op_LoadI) ld_op = Op_LoadP; 810 else ShouldNotReachHere(); 811 if (st_op) { 812 // a store 813 // inputs are (0:control, 1:memory, 2:address, 3:value) 814 Node* n2 = n->in(3); 815 if (n2 != NULL) { 816 const Type* t = n2->bottom_type(); 817 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 818 } 819 } else { 820 // a load 821 const Type* t = n->bottom_type(); 822 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 823 } 824 } 825 826 if (ld_op) { 827 // a Load 828 // inputs are (0:control, 1:memory, 2:address) 829 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases 830 !(n->ideal_Opcode()==Op_LoadLLocked && ld_op==Op_LoadI) && 831 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && 832 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && 833 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && 834 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) && 835 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) && 836 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) && 837 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) && 838 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) && 839 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) && 840 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && 841 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) && 842 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) && 843 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) && 844 !(n->ideal_Opcode()==Op_Load2I && ld_op==Op_LoadD) && 845 !(n->ideal_Opcode()==Op_Load4C && ld_op==Op_LoadD) && 846 !(n->ideal_Opcode()==Op_Load4S && ld_op==Op_LoadD) && 847 !(n->ideal_Opcode()==Op_Load8B && ld_op==Op_LoadD) && 848 !(n->rule() == loadUB_rule)) { 849 verify_oops_warning(n, n->ideal_Opcode(), ld_op); 850 } 851 } else if (st_op) { 852 // a Store 853 // inputs are (0:control, 1:memory, 2:address, 3:value) 854 if (!(n->ideal_Opcode()==st_op) && // Following are special cases 855 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) && 856 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && 857 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && 858 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && 859 !(n->ideal_Opcode()==Op_Store2I && st_op==Op_StoreD) && 860 !(n->ideal_Opcode()==Op_Store4C && st_op==Op_StoreD) && 861 !(n->ideal_Opcode()==Op_Store8B && st_op==Op_StoreD) && 862 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { 863 verify_oops_warning(n, n->ideal_Opcode(), st_op); 864 } 865 } 866 867 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) { 868 Node* addr = n->in(2); 869 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) { 870 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr? 871 if (atype != NULL) { 872 intptr_t offset = get_offset_from_base(n, atype, disp32); 873 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32); 874 if (offset != offset_2) { 875 get_offset_from_base(n, atype, disp32); 876 get_offset_from_base_2(n, atype, disp32); 877 } 878 assert(offset == offset_2, "different offsets"); 879 if (offset == disp32) { 880 // we now know that src1 is a true oop pointer 881 is_verified_oop_base = true; 882 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) { 883 if( primary == Assembler::ldd_op3 ) { 884 is_verified_oop_base = false; // Cannot 'ldd' into O7 885 } else { 886 tmp_enc = dst_enc; 887 dst_enc = R_O7_enc; // Load into O7; preserve source oop 888 assert(src1_enc != dst_enc, ""); 889 } 890 } 891 } 892 if (st_op && (( offset == oopDesc::klass_offset_in_bytes()) 893 || offset == oopDesc::mark_offset_in_bytes())) { 894 // loading the mark should not be allowed either, but 895 // we don't check this since it conflicts with InlineObjectHash 896 // usage of LoadINode to get the mark. We could keep the 897 // check if we create a new LoadMarkNode 898 // but do not verify the object before its header is initialized 899 ShouldNotReachHere(); 900 } 901 } 902 } 903 } 904 } 905 #endif 906 907 uint instr; 908 instr = (Assembler::ldst_op << 30) 909 | (dst_enc << 25) 910 | (primary << 19) 911 | (src1_enc << 14); 912 913 uint index = src2_enc; 914 int disp = disp32; 915 916 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) 917 disp += STACK_BIAS; 918 919 // We should have a compiler bailout here rather than a guarantee. 920 // Better yet would be some mechanism to handle variable-size matches correctly. 921 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" ); 922 923 if( disp == 0 ) { 924 // use reg-reg form 925 // bit 13 is already zero 926 instr |= index; 927 } else { 928 // use reg-imm form 929 instr |= 0x00002000; // set bit 13 to one 930 instr |= disp & 0x1FFF; 931 } 932 933 cbuf.insts()->emit_int32(instr); 934 935 #ifdef ASSERT 936 { 937 MacroAssembler _masm(&cbuf); 938 if (is_verified_oop_base) { 939 __ verify_oop(reg_to_register_object(src1_enc)); 940 } 941 if (is_verified_oop_store) { 942 __ verify_oop(reg_to_register_object(dst_enc)); 943 } 944 if (tmp_enc != -1) { 945 __ mov(O7, reg_to_register_object(tmp_enc)); 946 } 947 if (is_verified_oop_load) { 948 __ verify_oop(reg_to_register_object(dst_enc)); 949 } 950 } 951 #endif 952 } 953 954 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) { 955 // The method which records debug information at every safepoint 956 // expects the call to be the first instruction in the snippet as 957 // it creates a PcDesc structure which tracks the offset of a call 958 // from the start of the codeBlob. This offset is computed as 959 // code_end() - code_begin() of the code which has been emitted 960 // so far. 961 // In this particular case we have skirted around the problem by 962 // putting the "mov" instruction in the delay slot but the problem 963 // may bite us again at some other point and a cleaner/generic 964 // solution using relocations would be needed. 965 MacroAssembler _masm(&cbuf); 966 __ set_inst_mark(); 967 968 // We flush the current window just so that there is a valid stack copy 969 // the fact that the current window becomes active again instantly is 970 // not a problem there is nothing live in it. 971 972 #ifdef ASSERT 973 int startpos = __ offset(); 974 #endif /* ASSERT */ 975 976 __ call((address)entry_point, rtype); 977 978 if (preserve_g2) __ delayed()->mov(G2, L7); 979 else __ delayed()->nop(); 980 981 if (preserve_g2) __ mov(L7, G2); 982 983 #ifdef ASSERT 984 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { 985 #ifdef _LP64 986 // Trash argument dump slots. 987 __ set(0xb0b8ac0db0b8ac0d, G1); 988 __ mov(G1, G5); 989 __ stx(G1, SP, STACK_BIAS + 0x80); 990 __ stx(G1, SP, STACK_BIAS + 0x88); 991 __ stx(G1, SP, STACK_BIAS + 0x90); 992 __ stx(G1, SP, STACK_BIAS + 0x98); 993 __ stx(G1, SP, STACK_BIAS + 0xA0); 994 __ stx(G1, SP, STACK_BIAS + 0xA8); 995 #else // _LP64 996 // this is also a native call, so smash the first 7 stack locations, 997 // and the various registers 998 999 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset], 1000 // while [SP+0x44..0x58] are the argument dump slots. 1001 __ set((intptr_t)0xbaadf00d, G1); 1002 __ mov(G1, G5); 1003 __ sllx(G1, 32, G1); 1004 __ or3(G1, G5, G1); 1005 __ mov(G1, G5); 1006 __ stx(G1, SP, 0x40); 1007 __ stx(G1, SP, 0x48); 1008 __ stx(G1, SP, 0x50); 1009 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot 1010 #endif // _LP64 1011 } 1012 #endif /*ASSERT*/ 1013 } 1014 1015 //============================================================================= 1016 // REQUIRED FUNCTIONALITY for encoding 1017 void emit_lo(CodeBuffer &cbuf, int val) { } 1018 void emit_hi(CodeBuffer &cbuf, int val) { } 1019 1020 1021 //============================================================================= 1022 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask(); 1023 1024 int Compile::ConstantTable::calculate_table_base_offset() const { 1025 if (UseRDPCForConstantTableBase) { 1026 // The table base offset might be less but then it fits into 1027 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit). 1028 return Assembler::min_simm13(); 1029 } else { 1030 int offset = -(size() / 2); 1031 if (!Assembler::is_simm13(offset)) { 1032 offset = Assembler::min_simm13(); 1033 } 1034 return offset; 1035 } 1036 } 1037 1038 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1039 Compile* C = ra_->C; 1040 Compile::ConstantTable& constant_table = C->constant_table(); 1041 MacroAssembler _masm(&cbuf); 1042 1043 Register r = as_Register(ra_->get_encode(this)); 1044 CodeSection* consts_section = __ code()->consts(); 1045 int consts_size = consts_section->align_at_start(consts_section->size()); 1046 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size)); 1047 1048 if (UseRDPCForConstantTableBase) { 1049 // For the following RDPC logic to work correctly the consts 1050 // section must be allocated right before the insts section. This 1051 // assert checks for that. The layout and the SECT_* constants 1052 // are defined in src/share/vm/asm/codeBuffer.hpp. 1053 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 1054 int insts_offset = __ offset(); 1055 1056 // Layout: 1057 // 1058 // |----------- consts section ------------|----------- insts section -----------... 1059 // |------ constant table -----|- padding -|------------------x---- 1060 // \ current PC (RDPC instruction) 1061 // |<------------- consts_size ----------->|<- insts_offset ->| 1062 // \ table base 1063 // The table base offset is later added to the load displacement 1064 // so it has to be negative. 1065 int table_base_offset = -(consts_size + insts_offset); 1066 int disp; 1067 1068 // If the displacement from the current PC to the constant table 1069 // base fits into simm13 we set the constant table base to the 1070 // current PC. 1071 if (Assembler::is_simm13(table_base_offset)) { 1072 constant_table.set_table_base_offset(table_base_offset); 1073 disp = 0; 1074 } else { 1075 // Otherwise we set the constant table base offset to the 1076 // maximum negative displacement of load instructions to keep 1077 // the disp as small as possible: 1078 // 1079 // |<------------- consts_size ----------->|<- insts_offset ->| 1080 // |<--------- min_simm13 --------->|<-------- disp --------->| 1081 // \ table base 1082 table_base_offset = Assembler::min_simm13(); 1083 constant_table.set_table_base_offset(table_base_offset); 1084 disp = (consts_size + insts_offset) + table_base_offset; 1085 } 1086 1087 __ rdpc(r); 1088 1089 if (disp != 0) { 1090 assert(r != O7, "need temporary"); 1091 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 1092 } 1093 } 1094 else { 1095 // Materialize the constant table base. 1096 address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); 1097 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 1098 AddressLiteral base(baseaddr, rspec); 1099 __ set(base, r); 1100 } 1101 } 1102 1103 uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 1104 if (UseRDPCForConstantTableBase) { 1105 // This is really the worst case but generally it's only 1 instruction. 1106 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord; 1107 } else { 1108 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord; 1109 } 1110 } 1111 1112 #ifndef PRODUCT 1113 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1114 char reg[128]; 1115 ra_->dump_register(this, reg); 1116 if (UseRDPCForConstantTableBase) { 1117 st->print("RDPC %s\t! constant table base", reg); 1118 } else { 1119 st->print("SET &constanttable,%s\t! constant table base", reg); 1120 } 1121 } 1122 #endif 1123 1124 1125 //============================================================================= 1126 1127 #ifndef PRODUCT 1128 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1129 Compile* C = ra_->C; 1130 1131 for (int i = 0; i < OptoPrologueNops; i++) { 1132 st->print_cr("NOP"); st->print("\t"); 1133 } 1134 1135 if( VerifyThread ) { 1136 st->print_cr("Verify_Thread"); st->print("\t"); 1137 } 1138 1139 size_t framesize = C->frame_slots() << LogBytesPerInt; 1140 1141 // Calls to C2R adapters often do not accept exceptional returns. 1142 // We require that their callers must bang for them. But be careful, because 1143 // some VM calls (such as call site linkage) can use several kilobytes of 1144 // stack. But the stack safety zone should account for that. 1145 // See bugs 4446381, 4468289, 4497237. 1146 if (C->need_stack_bang(framesize)) { 1147 st->print_cr("! stack bang"); st->print("\t"); 1148 } 1149 1150 if (Assembler::is_simm13(-framesize)) { 1151 st->print ("SAVE R_SP,-%d,R_SP",framesize); 1152 } else { 1153 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t"); 1154 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t"); 1155 st->print ("SAVE R_SP,R_G3,R_SP"); 1156 } 1157 1158 } 1159 #endif 1160 1161 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1162 Compile* C = ra_->C; 1163 MacroAssembler _masm(&cbuf); 1164 1165 for (int i = 0; i < OptoPrologueNops; i++) { 1166 __ nop(); 1167 } 1168 1169 __ verify_thread(); 1170 1171 size_t framesize = C->frame_slots() << LogBytesPerInt; 1172 assert(framesize >= 16*wordSize, "must have room for reg. save area"); 1173 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1174 1175 // Calls to C2R adapters often do not accept exceptional returns. 1176 // We require that their callers must bang for them. But be careful, because 1177 // some VM calls (such as call site linkage) can use several kilobytes of 1178 // stack. But the stack safety zone should account for that. 1179 // See bugs 4446381, 4468289, 4497237. 1180 if (C->need_stack_bang(framesize)) { 1181 __ generate_stack_overflow_check(framesize); 1182 } 1183 1184 if (Assembler::is_simm13(-framesize)) { 1185 __ save(SP, -framesize, SP); 1186 } else { 1187 __ sethi(-framesize & ~0x3ff, G3); 1188 __ add(G3, -framesize & 0x3ff, G3); 1189 __ save(SP, G3, SP); 1190 } 1191 C->set_frame_complete( __ offset() ); 1192 1193 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) { 1194 // NOTE: We set the table base offset here because users might be 1195 // emitted before MachConstantBaseNode. 1196 Compile::ConstantTable& constant_table = C->constant_table(); 1197 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 1198 } 1199 } 1200 1201 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1202 return MachNode::size(ra_); 1203 } 1204 1205 int MachPrologNode::reloc() const { 1206 return 10; // a large enough number 1207 } 1208 1209 //============================================================================= 1210 #ifndef PRODUCT 1211 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1212 Compile* C = ra_->C; 1213 1214 if( do_polling() && ra_->C->is_method_compilation() ) { 1215 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); 1216 #ifdef _LP64 1217 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); 1218 #else 1219 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t"); 1220 #endif 1221 } 1222 1223 if( do_polling() ) 1224 st->print("RET\n\t"); 1225 1226 st->print("RESTORE"); 1227 } 1228 #endif 1229 1230 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1231 MacroAssembler _masm(&cbuf); 1232 Compile* C = ra_->C; 1233 1234 __ verify_thread(); 1235 1236 // If this does safepoint polling, then do it here 1237 if( do_polling() && ra_->C->is_method_compilation() ) { 1238 AddressLiteral polling_page(os::get_polling_page()); 1239 __ sethi(polling_page, L0); 1240 __ relocate(relocInfo::poll_return_type); 1241 __ ld_ptr( L0, 0, G0 ); 1242 } 1243 1244 // If this is a return, then stuff the restore in the delay slot 1245 if( do_polling() ) { 1246 __ ret(); 1247 __ delayed()->restore(); 1248 } else { 1249 __ restore(); 1250 } 1251 } 1252 1253 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1254 return MachNode::size(ra_); 1255 } 1256 1257 int MachEpilogNode::reloc() const { 1258 return 16; // a large enough number 1259 } 1260 1261 const Pipeline * MachEpilogNode::pipeline() const { 1262 return MachNode::pipeline_class(); 1263 } 1264 1265 int MachEpilogNode::safepoint_offset() const { 1266 assert( do_polling(), "no return for this epilog node"); 1267 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; 1268 } 1269 1270 //============================================================================= 1271 1272 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack 1273 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1274 static enum RC rc_class( OptoReg::Name reg ) { 1275 if( !OptoReg::is_valid(reg) ) return rc_bad; 1276 if (OptoReg::is_stack(reg)) return rc_stack; 1277 VMReg r = OptoReg::as_VMReg(reg); 1278 if (r->is_Register()) return rc_int; 1279 assert(r->is_FloatRegister(), "must be"); 1280 return rc_float; 1281 } 1282 1283 static int impl_helper( const MachNode *mach, CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) { 1284 if( cbuf ) { 1285 // Better yet would be some mechanism to handle variable-size matches correctly 1286 if (!Assembler::is_simm13(offset + STACK_BIAS)) { 1287 ra_->C->record_method_not_compilable("unable to handle large constant offsets"); 1288 } else { 1289 emit_form3_mem_reg(*cbuf, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]); 1290 } 1291 } 1292 #ifndef PRODUCT 1293 else if( !do_size ) { 1294 if( size != 0 ) st->print("\n\t"); 1295 if( is_load ) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg)); 1296 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset); 1297 } 1298 #endif 1299 return size+4; 1300 } 1301 1302 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) { 1303 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] ); 1304 #ifndef PRODUCT 1305 else if( !do_size ) { 1306 if( size != 0 ) st->print("\n\t"); 1307 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst)); 1308 } 1309 #endif 1310 return size+4; 1311 } 1312 1313 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, 1314 PhaseRegAlloc *ra_, 1315 bool do_size, 1316 outputStream* st ) const { 1317 // Get registers to move 1318 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1319 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1320 OptoReg::Name dst_second = ra_->get_reg_second(this ); 1321 OptoReg::Name dst_first = ra_->get_reg_first(this ); 1322 1323 enum RC src_second_rc = rc_class(src_second); 1324 enum RC src_first_rc = rc_class(src_first); 1325 enum RC dst_second_rc = rc_class(dst_second); 1326 enum RC dst_first_rc = rc_class(dst_first); 1327 1328 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" ); 1329 1330 // Generate spill code! 1331 int size = 0; 1332 1333 if( src_first == dst_first && src_second == dst_second ) 1334 return size; // Self copy, no move 1335 1336 // -------------------------------------- 1337 // Check for mem-mem move. Load into unused float registers and fall into 1338 // the float-store case. 1339 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { 1340 int offset = ra_->reg2offset(src_first); 1341 // Further check for aligned-adjacent pair, so we can use a double load 1342 if( (src_first&1)==0 && src_first+1 == src_second ) { 1343 src_second = OptoReg::Name(R_F31_num); 1344 src_second_rc = rc_float; 1345 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st); 1346 } else { 1347 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st); 1348 } 1349 src_first = OptoReg::Name(R_F30_num); 1350 src_first_rc = rc_float; 1351 } 1352 1353 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { 1354 int offset = ra_->reg2offset(src_second); 1355 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st); 1356 src_second = OptoReg::Name(R_F31_num); 1357 src_second_rc = rc_float; 1358 } 1359 1360 // -------------------------------------- 1361 // Check for float->int copy; requires a trip through memory 1362 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) { 1363 int offset = frame::register_save_words*wordSize; 1364 if (cbuf) { 1365 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 ); 1366 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1367 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1368 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 ); 1369 } 1370 #ifndef PRODUCT 1371 else if (!do_size) { 1372 if (size != 0) st->print("\n\t"); 1373 st->print( "SUB R_SP,16,R_SP\n"); 1374 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1375 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1376 st->print("\tADD R_SP,16,R_SP\n"); 1377 } 1378 #endif 1379 size += 16; 1380 } 1381 1382 // Check for float->int copy on T4 1383 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) { 1384 // Further check for aligned-adjacent pair, so we can use a double move 1385 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1386 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st); 1387 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st); 1388 } 1389 // Check for int->float copy on T4 1390 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) { 1391 // Further check for aligned-adjacent pair, so we can use a double move 1392 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1393 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st); 1394 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st); 1395 } 1396 1397 // -------------------------------------- 1398 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. 1399 // In such cases, I have to do the big-endian swap. For aligned targets, the 1400 // hardware does the flop for me. Doubles are always aligned, so no problem 1401 // there. Misaligned sources only come from native-long-returns (handled 1402 // special below). 1403 #ifndef _LP64 1404 if( src_first_rc == rc_int && // source is already big-endian 1405 src_second_rc != rc_bad && // 64-bit move 1406 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst 1407 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" ); 1408 // Do the big-endian flop. 1409 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ; 1410 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc; 1411 } 1412 #endif 1413 1414 // -------------------------------------- 1415 // Check for integer reg-reg copy 1416 if( src_first_rc == rc_int && dst_first_rc == rc_int ) { 1417 #ifndef _LP64 1418 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case 1419 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1420 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1421 // operand contains the least significant word of the 64-bit value and vice versa. 1422 OptoReg::Name tmp = OptoReg::Name(R_O7_num); 1423 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" ); 1424 // Shift O0 left in-place, zero-extend O1, then OR them into the dst 1425 if( cbuf ) { 1426 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 ); 1427 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 ); 1428 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] ); 1429 #ifndef PRODUCT 1430 } else if( !do_size ) { 1431 if( size != 0 ) st->print("\n\t"); 1432 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp)); 1433 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second)); 1434 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first)); 1435 #endif 1436 } 1437 return size+12; 1438 } 1439 else if( dst_first == R_I0_num && dst_second == R_I1_num ) { 1440 // returning a long value in I0/I1 1441 // a SpillCopy must be able to target a return instruction's reg_class 1442 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1443 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1444 // operand contains the least significant word of the 64-bit value and vice versa. 1445 OptoReg::Name tdest = dst_first; 1446 1447 if (src_first == dst_first) { 1448 tdest = OptoReg::Name(R_O7_num); 1449 size += 4; 1450 } 1451 1452 if( cbuf ) { 1453 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg"); 1454 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1 1455 // ShrL_reg_imm6 1456 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 ); 1457 // ShrR_reg_imm6 src, 0, dst 1458 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 ); 1459 if (tdest != dst_first) { 1460 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] ); 1461 } 1462 } 1463 #ifndef PRODUCT 1464 else if( !do_size ) { 1465 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!! 1466 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest)); 1467 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second)); 1468 if (tdest != dst_first) { 1469 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first)); 1470 } 1471 } 1472 #endif // PRODUCT 1473 return size+8; 1474 } 1475 #endif // !_LP64 1476 // Else normal reg-reg copy 1477 assert( src_second != dst_first, "smashed second before evacuating it" ); 1478 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st); 1479 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" ); 1480 // This moves an aligned adjacent pair. 1481 // See if we are done. 1482 if( src_first+1 == src_second && dst_first+1 == dst_second ) 1483 return size; 1484 } 1485 1486 // Check for integer store 1487 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) { 1488 int offset = ra_->reg2offset(dst_first); 1489 // Further check for aligned-adjacent pair, so we can use a double store 1490 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1491 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st); 1492 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st); 1493 } 1494 1495 // Check for integer load 1496 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) { 1497 int offset = ra_->reg2offset(src_first); 1498 // Further check for aligned-adjacent pair, so we can use a double load 1499 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1500 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st); 1501 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1502 } 1503 1504 // Check for float reg-reg copy 1505 if( src_first_rc == rc_float && dst_first_rc == rc_float ) { 1506 // Further check for aligned-adjacent pair, so we can use a double move 1507 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1508 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st); 1509 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st); 1510 } 1511 1512 // Check for float store 1513 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) { 1514 int offset = ra_->reg2offset(dst_first); 1515 // Further check for aligned-adjacent pair, so we can use a double store 1516 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1517 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st); 1518 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1519 } 1520 1521 // Check for float load 1522 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) { 1523 int offset = ra_->reg2offset(src_first); 1524 // Further check for aligned-adjacent pair, so we can use a double load 1525 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1526 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st); 1527 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st); 1528 } 1529 1530 // -------------------------------------------------------------------- 1531 // Check for hi bits still needing moving. Only happens for misaligned 1532 // arguments to native calls. 1533 if( src_second == dst_second ) 1534 return size; // Self copy; no move 1535 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" ); 1536 1537 #ifndef _LP64 1538 // In the LP64 build, all registers can be moved as aligned/adjacent 1539 // pairs, so there's never any need to move the high bits separately. 1540 // The 32-bit builds have to deal with the 32-bit ABI which can force 1541 // all sorts of silly alignment problems. 1542 1543 // Check for integer reg-reg copy. Hi bits are stuck up in the top 1544 // 32-bits of a 64-bit register, but are needed in low bits of another 1545 // register (else it's a hi-bits-to-hi-bits copy which should have 1546 // happened already as part of a 64-bit move) 1547 if( src_second_rc == rc_int && dst_second_rc == rc_int ) { 1548 assert( (src_second&1)==1, "its the evil O0/O1 native return case" ); 1549 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" ); 1550 // Shift src_second down to dst_second's low bits. 1551 if( cbuf ) { 1552 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1553 #ifndef PRODUCT 1554 } else if( !do_size ) { 1555 if( size != 0 ) st->print("\n\t"); 1556 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second)); 1557 #endif 1558 } 1559 return size+4; 1560 } 1561 1562 // Check for high word integer store. Must down-shift the hi bits 1563 // into a temp register, then fall into the case of storing int bits. 1564 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) { 1565 // Shift src_second down to dst_second's low bits. 1566 if( cbuf ) { 1567 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1568 #ifndef PRODUCT 1569 } else if( !do_size ) { 1570 if( size != 0 ) st->print("\n\t"); 1571 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num)); 1572 #endif 1573 } 1574 size+=4; 1575 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num! 1576 } 1577 1578 // Check for high word integer load 1579 if( dst_second_rc == rc_int && src_second_rc == rc_stack ) 1580 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st); 1581 1582 // Check for high word integer store 1583 if( src_second_rc == rc_int && dst_second_rc == rc_stack ) 1584 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st); 1585 1586 // Check for high word float store 1587 if( src_second_rc == rc_float && dst_second_rc == rc_stack ) 1588 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st); 1589 1590 #endif // !_LP64 1591 1592 Unimplemented(); 1593 } 1594 1595 #ifndef PRODUCT 1596 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1597 implementation( NULL, ra_, false, st ); 1598 } 1599 #endif 1600 1601 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1602 implementation( &cbuf, ra_, false, NULL ); 1603 } 1604 1605 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1606 return implementation( NULL, ra_, true, NULL ); 1607 } 1608 1609 //============================================================================= 1610 #ifndef PRODUCT 1611 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const { 1612 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); 1613 } 1614 #endif 1615 1616 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const { 1617 MacroAssembler _masm(&cbuf); 1618 for(int i = 0; i < _count; i += 1) { 1619 __ nop(); 1620 } 1621 } 1622 1623 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 1624 return 4 * _count; 1625 } 1626 1627 1628 //============================================================================= 1629 #ifndef PRODUCT 1630 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1631 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1632 int reg = ra_->get_reg_first(this); 1633 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]); 1634 } 1635 #endif 1636 1637 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1638 MacroAssembler _masm(&cbuf); 1639 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS; 1640 int reg = ra_->get_encode(this); 1641 1642 if (Assembler::is_simm13(offset)) { 1643 __ add(SP, offset, reg_to_register_object(reg)); 1644 } else { 1645 __ set(offset, O7); 1646 __ add(SP, O7, reg_to_register_object(reg)); 1647 } 1648 } 1649 1650 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 1651 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_) 1652 assert(ra_ == ra_->C->regalloc(), "sanity"); 1653 return ra_->C->scratch_emit_size(this); 1654 } 1655 1656 //============================================================================= 1657 1658 // emit call stub, compiled java to interpretor 1659 void emit_java_to_interp(CodeBuffer &cbuf ) { 1660 1661 // Stub is fixed up when the corresponding call is converted from calling 1662 // compiled code to calling interpreted code. 1663 // set (empty), G5 1664 // jmp -1 1665 1666 address mark = cbuf.insts_mark(); // get mark within main instrs section 1667 1668 MacroAssembler _masm(&cbuf); 1669 1670 address base = 1671 __ start_a_stub(Compile::MAX_stubs_size); 1672 if (base == NULL) return; // CodeBuffer::expand failed 1673 1674 // static stub relocation stores the instruction address of the call 1675 __ relocate(static_stub_Relocation::spec(mark)); 1676 1677 __ set_oop(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode())); 1678 1679 __ set_inst_mark(); 1680 AddressLiteral addrlit(-1); 1681 __ JUMP(addrlit, G3, 0); 1682 1683 __ delayed()->nop(); 1684 1685 // Update current stubs pointer and restore code_end. 1686 __ end_a_stub(); 1687 } 1688 1689 // size of call stub, compiled java to interpretor 1690 uint size_java_to_interp() { 1691 // This doesn't need to be accurate but it must be larger or equal to 1692 // the real size of the stub. 1693 return (NativeMovConstReg::instruction_size + // sethi/setlo; 1694 NativeJump::instruction_size + // sethi; jmp; nop 1695 (TraceJumps ? 20 * BytesPerInstWord : 0) ); 1696 } 1697 // relocation entries for call stub, compiled java to interpretor 1698 uint reloc_java_to_interp() { 1699 return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call 1700 } 1701 1702 1703 //============================================================================= 1704 #ifndef PRODUCT 1705 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1706 st->print_cr("\nUEP:"); 1707 #ifdef _LP64 1708 if (UseCompressedOops) { 1709 assert(Universe::heap() != NULL, "java heap should be initialized"); 1710 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 1711 st->print_cr("\tSLL R_G5,3,R_G5"); 1712 if (Universe::narrow_oop_base() != NULL) 1713 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 1714 } else { 1715 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1716 } 1717 st->print_cr("\tCMP R_G5,R_G3" ); 1718 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1719 #else // _LP64 1720 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1721 st->print_cr("\tCMP R_G5,R_G3" ); 1722 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1723 #endif // _LP64 1724 } 1725 #endif 1726 1727 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1728 MacroAssembler _masm(&cbuf); 1729 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 1730 Register temp_reg = G3; 1731 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 1732 1733 // Load klass from receiver 1734 __ load_klass(O0, temp_reg); 1735 // Compare against expected klass 1736 __ cmp(temp_reg, G5_ic_reg); 1737 // Branch to miss code, checks xcc or icc depending 1738 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2); 1739 } 1740 1741 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 1742 return MachNode::size(ra_); 1743 } 1744 1745 1746 //============================================================================= 1747 1748 uint size_exception_handler() { 1749 if (TraceJumps) { 1750 return (400); // just a guess 1751 } 1752 return ( NativeJump::instruction_size ); // sethi;jmp;nop 1753 } 1754 1755 uint size_deopt_handler() { 1756 if (TraceJumps) { 1757 return (400); // just a guess 1758 } 1759 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore 1760 } 1761 1762 // Emit exception handler code. 1763 int emit_exception_handler(CodeBuffer& cbuf) { 1764 Register temp_reg = G3; 1765 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 1766 MacroAssembler _masm(&cbuf); 1767 1768 address base = 1769 __ start_a_stub(size_exception_handler()); 1770 if (base == NULL) return 0; // CodeBuffer::expand failed 1771 1772 int offset = __ offset(); 1773 1774 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp 1775 __ delayed()->nop(); 1776 1777 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1778 1779 __ end_a_stub(); 1780 1781 return offset; 1782 } 1783 1784 int emit_deopt_handler(CodeBuffer& cbuf) { 1785 // Can't use any of the current frame's registers as we may have deopted 1786 // at a poll and everything (including G3) can be live. 1787 Register temp_reg = L0; 1788 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 1789 MacroAssembler _masm(&cbuf); 1790 1791 address base = 1792 __ start_a_stub(size_deopt_handler()); 1793 if (base == NULL) return 0; // CodeBuffer::expand failed 1794 1795 int offset = __ offset(); 1796 __ save_frame(0); 1797 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp 1798 __ delayed()->restore(); 1799 1800 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1801 1802 __ end_a_stub(); 1803 return offset; 1804 1805 } 1806 1807 // Given a register encoding, produce a Integer Register object 1808 static Register reg_to_register_object(int register_encoding) { 1809 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding"); 1810 return as_Register(register_encoding); 1811 } 1812 1813 // Given a register encoding, produce a single-precision Float Register object 1814 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) { 1815 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding"); 1816 return as_SingleFloatRegister(register_encoding); 1817 } 1818 1819 // Given a register encoding, produce a double-precision Float Register object 1820 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) { 1821 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding"); 1822 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding"); 1823 return as_DoubleFloatRegister(register_encoding); 1824 } 1825 1826 const bool Matcher::match_rule_supported(int opcode) { 1827 if (!has_match_rule(opcode)) 1828 return false; 1829 1830 switch (opcode) { 1831 case Op_CountLeadingZerosI: 1832 case Op_CountLeadingZerosL: 1833 case Op_CountTrailingZerosI: 1834 case Op_CountTrailingZerosL: 1835 if (!UsePopCountInstruction) 1836 return false; 1837 break; 1838 } 1839 1840 return true; // Per default match rules are supported. 1841 } 1842 1843 int Matcher::regnum_to_fpu_offset(int regnum) { 1844 return regnum - 32; // The FP registers are in the second chunk 1845 } 1846 1847 #ifdef ASSERT 1848 address last_rethrow = NULL; // debugging aid for Rethrow encoding 1849 #endif 1850 1851 // Vector width in bytes 1852 const uint Matcher::vector_width_in_bytes(void) { 1853 return 8; 1854 } 1855 1856 // Vector ideal reg 1857 const uint Matcher::vector_ideal_reg(void) { 1858 return Op_RegD; 1859 } 1860 1861 // USII supports fxtof through the whole range of number, USIII doesn't 1862 const bool Matcher::convL2FSupported(void) { 1863 return VM_Version::has_fast_fxtof(); 1864 } 1865 1866 // Is this branch offset short enough that a short branch can be used? 1867 // 1868 // NOTE: If the platform does not provide any short branch variants, then 1869 // this method should return false for offset 0. 1870 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1871 // The passed offset is relative to address of the branch. 1872 // Don't need to adjust the offset. 1873 return UseCBCond && Assembler::is_simm12(offset); 1874 } 1875 1876 const bool Matcher::isSimpleConstant64(jlong value) { 1877 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1878 // Depends on optimizations in MacroAssembler::setx. 1879 int hi = (int)(value >> 32); 1880 int lo = (int)(value & ~0); 1881 return (hi == 0) || (hi == -1) || (lo == 0); 1882 } 1883 1884 // No scaling for the parameter the ClearArray node. 1885 const bool Matcher::init_array_count_is_in_bytes = true; 1886 1887 // Threshold size for cleararray. 1888 const int Matcher::init_array_short_size = 8 * BytesPerLong; 1889 1890 // No additional cost for CMOVL. 1891 const int Matcher::long_cmove_cost() { return 0; } 1892 1893 // CMOVF/CMOVD are expensive on T4 and on SPARC64. 1894 const int Matcher::float_cmove_cost() { 1895 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0; 1896 } 1897 1898 // Should the Matcher clone shifts on addressing modes, expecting them to 1899 // be subsumed into complex addressing expressions or compute them into 1900 // registers? True for Intel but false for most RISCs 1901 const bool Matcher::clone_shift_expressions = false; 1902 1903 // Do we need to mask the count passed to shift instructions or does 1904 // the cpu only look at the lower 5/6 bits anyway? 1905 const bool Matcher::need_masked_shift_count = false; 1906 1907 bool Matcher::narrow_oop_use_complex_address() { 1908 NOT_LP64(ShouldNotCallThis()); 1909 assert(UseCompressedOops, "only for compressed oops code"); 1910 return false; 1911 } 1912 1913 // Is it better to copy float constants, or load them directly from memory? 1914 // Intel can load a float constant from a direct address, requiring no 1915 // extra registers. Most RISCs will have to materialize an address into a 1916 // register first, so they would do better to copy the constant from stack. 1917 const bool Matcher::rematerialize_float_constants = false; 1918 1919 // If CPU can load and store mis-aligned doubles directly then no fixup is 1920 // needed. Else we split the double into 2 integer pieces and move it 1921 // piece-by-piece. Only happens when passing doubles into C code as the 1922 // Java calling convention forces doubles to be aligned. 1923 #ifdef _LP64 1924 const bool Matcher::misaligned_doubles_ok = true; 1925 #else 1926 const bool Matcher::misaligned_doubles_ok = false; 1927 #endif 1928 1929 // No-op on SPARC. 1930 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 1931 } 1932 1933 // Advertise here if the CPU requires explicit rounding operations 1934 // to implement the UseStrictFP mode. 1935 const bool Matcher::strict_fp_requires_explicit_rounding = false; 1936 1937 // Are floats conerted to double when stored to stack during deoptimization? 1938 // Sparc does not handle callee-save floats. 1939 bool Matcher::float_in_double() { return false; } 1940 1941 // Do ints take an entire long register or just half? 1942 // Note that we if-def off of _LP64. 1943 // The relevant question is how the int is callee-saved. In _LP64 1944 // the whole long is written but de-opt'ing will have to extract 1945 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written. 1946 #ifdef _LP64 1947 const bool Matcher::int_in_long = true; 1948 #else 1949 const bool Matcher::int_in_long = false; 1950 #endif 1951 1952 // Return whether or not this register is ever used as an argument. This 1953 // function is used on startup to build the trampoline stubs in generateOptoStub. 1954 // Registers not mentioned will be killed by the VM call in the trampoline, and 1955 // arguments in those registers not be available to the callee. 1956 bool Matcher::can_be_java_arg( int reg ) { 1957 // Standard sparc 6 args in registers 1958 if( reg == R_I0_num || 1959 reg == R_I1_num || 1960 reg == R_I2_num || 1961 reg == R_I3_num || 1962 reg == R_I4_num || 1963 reg == R_I5_num ) return true; 1964 #ifdef _LP64 1965 // 64-bit builds can pass 64-bit pointers and longs in 1966 // the high I registers 1967 if( reg == R_I0H_num || 1968 reg == R_I1H_num || 1969 reg == R_I2H_num || 1970 reg == R_I3H_num || 1971 reg == R_I4H_num || 1972 reg == R_I5H_num ) return true; 1973 1974 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) { 1975 return true; 1976 } 1977 1978 #else 1979 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4. 1980 // Longs cannot be passed in O regs, because O regs become I regs 1981 // after a 'save' and I regs get their high bits chopped off on 1982 // interrupt. 1983 if( reg == R_G1H_num || reg == R_G1_num ) return true; 1984 if( reg == R_G4H_num || reg == R_G4_num ) return true; 1985 #endif 1986 // A few float args in registers 1987 if( reg >= R_F0_num && reg <= R_F7_num ) return true; 1988 1989 return false; 1990 } 1991 1992 bool Matcher::is_spillable_arg( int reg ) { 1993 return can_be_java_arg(reg); 1994 } 1995 1996 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 1997 // Use hardware SDIVX instruction when it is 1998 // faster than a code which use multiply. 1999 return VM_Version::has_fast_idiv(); 2000 } 2001 2002 // Register for DIVI projection of divmodI 2003 RegMask Matcher::divI_proj_mask() { 2004 ShouldNotReachHere(); 2005 return RegMask(); 2006 } 2007 2008 // Register for MODI projection of divmodI 2009 RegMask Matcher::modI_proj_mask() { 2010 ShouldNotReachHere(); 2011 return RegMask(); 2012 } 2013 2014 // Register for DIVL projection of divmodL 2015 RegMask Matcher::divL_proj_mask() { 2016 ShouldNotReachHere(); 2017 return RegMask(); 2018 } 2019 2020 // Register for MODL projection of divmodL 2021 RegMask Matcher::modL_proj_mask() { 2022 ShouldNotReachHere(); 2023 return RegMask(); 2024 } 2025 2026 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2027 return L7_REGP_mask(); 2028 } 2029 2030 %} 2031 2032 2033 // The intptr_t operand types, defined by textual substitution. 2034 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) 2035 #ifdef _LP64 2036 #define immX immL 2037 #define immX13 immL13 2038 #define immX13m7 immL13m7 2039 #define iRegX iRegL 2040 #define g1RegX g1RegL 2041 #else 2042 #define immX immI 2043 #define immX13 immI13 2044 #define immX13m7 immI13m7 2045 #define iRegX iRegI 2046 #define g1RegX g1RegI 2047 #endif 2048 2049 //----------ENCODING BLOCK----------------------------------------------------- 2050 // This block specifies the encoding classes used by the compiler to output 2051 // byte streams. Encoding classes are parameterized macros used by 2052 // Machine Instruction Nodes in order to generate the bit encoding of the 2053 // instruction. Operands specify their base encoding interface with the 2054 // interface keyword. There are currently supported four interfaces, 2055 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 2056 // operand to generate a function which returns its register number when 2057 // queried. CONST_INTER causes an operand to generate a function which 2058 // returns the value of the constant when queried. MEMORY_INTER causes an 2059 // operand to generate four functions which return the Base Register, the 2060 // Index Register, the Scale Value, and the Offset Value of the operand when 2061 // queried. COND_INTER causes an operand to generate six functions which 2062 // return the encoding code (ie - encoding bits for the instruction) 2063 // associated with each basic boolean condition for a conditional instruction. 2064 // 2065 // Instructions specify two basic values for encoding. Again, a function 2066 // is available to check if the constant displacement is an oop. They use the 2067 // ins_encode keyword to specify their encoding classes (which must be 2068 // a sequence of enc_class names, and their parameters, specified in 2069 // the encoding block), and they use the 2070 // opcode keyword to specify, in order, their primary, secondary, and 2071 // tertiary opcode. Only the opcode sections which a particular instruction 2072 // needs for encoding need to be specified. 2073 encode %{ 2074 enc_class enc_untested %{ 2075 #ifdef ASSERT 2076 MacroAssembler _masm(&cbuf); 2077 __ untested("encoding"); 2078 #endif 2079 %} 2080 2081 enc_class form3_mem_reg( memory mem, iRegI dst ) %{ 2082 emit_form3_mem_reg(cbuf, this, $primary, $tertiary, 2083 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2084 %} 2085 2086 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{ 2087 emit_form3_mem_reg(cbuf, this, $primary, -1, 2088 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2089 %} 2090 2091 enc_class form3_mem_prefetch_read( memory mem ) %{ 2092 emit_form3_mem_reg(cbuf, this, $primary, -1, 2093 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); 2094 %} 2095 2096 enc_class form3_mem_prefetch_write( memory mem ) %{ 2097 emit_form3_mem_reg(cbuf, this, $primary, -1, 2098 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/); 2099 %} 2100 2101 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{ 2102 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2103 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2104 guarantee($mem$$index == R_G0_enc, "double index?"); 2105 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc ); 2106 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg ); 2107 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 ); 2108 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc ); 2109 %} 2110 2111 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{ 2112 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2113 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2114 guarantee($mem$$index == R_G0_enc, "double index?"); 2115 // Load long with 2 instructions 2116 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 ); 2117 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 ); 2118 %} 2119 2120 //%%% form3_mem_plus_4_reg is a hack--get rid of it 2121 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{ 2122 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4"); 2123 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg); 2124 %} 2125 2126 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{ 2127 // Encode a reg-reg copy. If it is useless, then empty encoding. 2128 if( $rs2$$reg != $rd$$reg ) 2129 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg ); 2130 %} 2131 2132 // Target lo half of long 2133 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{ 2134 // Encode a reg-reg copy. If it is useless, then empty encoding. 2135 if( $rs2$$reg != LONG_LO_REG($rd$$reg) ) 2136 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg ); 2137 %} 2138 2139 // Source lo half of long 2140 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{ 2141 // Encode a reg-reg copy. If it is useless, then empty encoding. 2142 if( LONG_LO_REG($rs2$$reg) != $rd$$reg ) 2143 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) ); 2144 %} 2145 2146 // Target hi half of long 2147 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{ 2148 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 ); 2149 %} 2150 2151 // Source lo half of long, and leave it sign extended. 2152 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{ 2153 // Sign extend low half 2154 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 ); 2155 %} 2156 2157 // Source hi half of long, and leave it sign extended. 2158 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{ 2159 // Shift high half to low half 2160 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 ); 2161 %} 2162 2163 // Source hi half of long 2164 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{ 2165 // Encode a reg-reg copy. If it is useless, then empty encoding. 2166 if( LONG_HI_REG($rs2$$reg) != $rd$$reg ) 2167 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) ); 2168 %} 2169 2170 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{ 2171 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg ); 2172 %} 2173 2174 enc_class enc_to_bool( iRegI src, iRegI dst ) %{ 2175 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg ); 2176 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 ); 2177 %} 2178 2179 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{ 2180 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg ); 2181 // clear if nothing else is happening 2182 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 ); 2183 // blt,a,pn done 2184 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 ); 2185 // mov dst,-1 in delay slot 2186 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2187 %} 2188 2189 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{ 2190 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F ); 2191 %} 2192 2193 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{ 2194 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 ); 2195 %} 2196 2197 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{ 2198 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg ); 2199 %} 2200 2201 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{ 2202 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant ); 2203 %} 2204 2205 enc_class move_return_pc_to_o1() %{ 2206 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); 2207 %} 2208 2209 #ifdef _LP64 2210 /* %%% merge with enc_to_bool */ 2211 enc_class enc_convP2B( iRegI dst, iRegP src ) %{ 2212 MacroAssembler _masm(&cbuf); 2213 2214 Register src_reg = reg_to_register_object($src$$reg); 2215 Register dst_reg = reg_to_register_object($dst$$reg); 2216 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg); 2217 %} 2218 #endif 2219 2220 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ 2221 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) 2222 MacroAssembler _masm(&cbuf); 2223 2224 Register p_reg = reg_to_register_object($p$$reg); 2225 Register q_reg = reg_to_register_object($q$$reg); 2226 Register y_reg = reg_to_register_object($y$$reg); 2227 Register tmp_reg = reg_to_register_object($tmp$$reg); 2228 2229 __ subcc( p_reg, q_reg, p_reg ); 2230 __ add ( p_reg, y_reg, tmp_reg ); 2231 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg ); 2232 %} 2233 2234 enc_class form_d2i_helper(regD src, regF dst) %{ 2235 // fcmp %fcc0,$src,$src 2236 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2237 // branch %fcc0 not-nan, predict taken 2238 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2239 // fdtoi $src,$dst 2240 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg ); 2241 // fitos $dst,$dst (if nan) 2242 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2243 // clear $dst (if nan) 2244 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2245 // carry on here... 2246 %} 2247 2248 enc_class form_d2l_helper(regD src, regD dst) %{ 2249 // fcmp %fcc0,$src,$src check for NAN 2250 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2251 // branch %fcc0 not-nan, predict taken 2252 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2253 // fdtox $src,$dst convert in delay slot 2254 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg ); 2255 // fxtod $dst,$dst (if nan) 2256 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2257 // clear $dst (if nan) 2258 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2259 // carry on here... 2260 %} 2261 2262 enc_class form_f2i_helper(regF src, regF dst) %{ 2263 // fcmps %fcc0,$src,$src 2264 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2265 // branch %fcc0 not-nan, predict taken 2266 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2267 // fstoi $src,$dst 2268 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg ); 2269 // fitos $dst,$dst (if nan) 2270 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2271 // clear $dst (if nan) 2272 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2273 // carry on here... 2274 %} 2275 2276 enc_class form_f2l_helper(regF src, regD dst) %{ 2277 // fcmps %fcc0,$src,$src 2278 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2279 // branch %fcc0 not-nan, predict taken 2280 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2281 // fstox $src,$dst 2282 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg ); 2283 // fxtod $dst,$dst (if nan) 2284 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2285 // clear $dst (if nan) 2286 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2287 // carry on here... 2288 %} 2289 2290 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2291 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2292 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2293 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2294 2295 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %} 2296 2297 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2298 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %} 2299 2300 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{ 2301 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2302 %} 2303 2304 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{ 2305 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2306 %} 2307 2308 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{ 2309 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2310 %} 2311 2312 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{ 2313 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2314 %} 2315 2316 enc_class form3_convI2F(regF rs2, regF rd) %{ 2317 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg); 2318 %} 2319 2320 // Encloding class for traceable jumps 2321 enc_class form_jmpl(g3RegP dest) %{ 2322 emit_jmpl(cbuf, $dest$$reg); 2323 %} 2324 2325 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{ 2326 emit_jmpl_set_exception_pc(cbuf, $dest$$reg); 2327 %} 2328 2329 enc_class form2_nop() %{ 2330 emit_nop(cbuf); 2331 %} 2332 2333 enc_class form2_illtrap() %{ 2334 emit_illtrap(cbuf); 2335 %} 2336 2337 2338 // Compare longs and convert into -1, 0, 1. 2339 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{ 2340 // CMP $src1,$src2 2341 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg ); 2342 // blt,a,pn done 2343 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 ); 2344 // mov dst,-1 in delay slot 2345 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2346 // bgt,a,pn done 2347 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 ); 2348 // mov dst,1 in delay slot 2349 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 ); 2350 // CLR $dst 2351 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 ); 2352 %} 2353 2354 enc_class enc_PartialSubtypeCheck() %{ 2355 MacroAssembler _masm(&cbuf); 2356 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type); 2357 __ delayed()->nop(); 2358 %} 2359 2360 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{ 2361 MacroAssembler _masm(&cbuf); 2362 Label* L = $labl$$label; 2363 Assembler::Predict predict_taken = 2364 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2365 2366 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 2367 __ delayed()->nop(); 2368 %} 2369 2370 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{ 2371 MacroAssembler _masm(&cbuf); 2372 Label* L = $labl$$label; 2373 Assembler::Predict predict_taken = 2374 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2375 2376 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L); 2377 __ delayed()->nop(); 2378 %} 2379 2380 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{ 2381 int op = (Assembler::arith_op << 30) | 2382 ($dst$$reg << 25) | 2383 (Assembler::movcc_op3 << 19) | 2384 (1 << 18) | // cc2 bit for 'icc' 2385 ($cmp$$cmpcode << 14) | 2386 (0 << 13) | // select register move 2387 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 2388 ($src$$reg << 0); 2389 cbuf.insts()->emit_int32(op); 2390 %} 2391 2392 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 2393 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2394 int op = (Assembler::arith_op << 30) | 2395 ($dst$$reg << 25) | 2396 (Assembler::movcc_op3 << 19) | 2397 (1 << 18) | // cc2 bit for 'icc' 2398 ($cmp$$cmpcode << 14) | 2399 (1 << 13) | // select immediate move 2400 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 2401 (simm11 << 0); 2402 cbuf.insts()->emit_int32(op); 2403 %} 2404 2405 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 2406 int op = (Assembler::arith_op << 30) | 2407 ($dst$$reg << 25) | 2408 (Assembler::movcc_op3 << 19) | 2409 (0 << 18) | // cc2 bit for 'fccX' 2410 ($cmp$$cmpcode << 14) | 2411 (0 << 13) | // select register move 2412 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2413 ($src$$reg << 0); 2414 cbuf.insts()->emit_int32(op); 2415 %} 2416 2417 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 2418 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2419 int op = (Assembler::arith_op << 30) | 2420 ($dst$$reg << 25) | 2421 (Assembler::movcc_op3 << 19) | 2422 (0 << 18) | // cc2 bit for 'fccX' 2423 ($cmp$$cmpcode << 14) | 2424 (1 << 13) | // select immediate move 2425 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2426 (simm11 << 0); 2427 cbuf.insts()->emit_int32(op); 2428 %} 2429 2430 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 2431 int op = (Assembler::arith_op << 30) | 2432 ($dst$$reg << 25) | 2433 (Assembler::fpop2_op3 << 19) | 2434 (0 << 18) | 2435 ($cmp$$cmpcode << 14) | 2436 (1 << 13) | // select register move 2437 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 2438 ($primary << 5) | // select single, double or quad 2439 ($src$$reg << 0); 2440 cbuf.insts()->emit_int32(op); 2441 %} 2442 2443 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 2444 int op = (Assembler::arith_op << 30) | 2445 ($dst$$reg << 25) | 2446 (Assembler::fpop2_op3 << 19) | 2447 (0 << 18) | 2448 ($cmp$$cmpcode << 14) | 2449 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 2450 ($primary << 5) | // select single, double or quad 2451 ($src$$reg << 0); 2452 cbuf.insts()->emit_int32(op); 2453 %} 2454 2455 // Used by the MIN/MAX encodings. Same as a CMOV, but 2456 // the condition comes from opcode-field instead of an argument. 2457 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{ 2458 int op = (Assembler::arith_op << 30) | 2459 ($dst$$reg << 25) | 2460 (Assembler::movcc_op3 << 19) | 2461 (1 << 18) | // cc2 bit for 'icc' 2462 ($primary << 14) | 2463 (0 << 13) | // select register move 2464 (0 << 11) | // cc1, cc0 bits for 'icc' 2465 ($src$$reg << 0); 2466 cbuf.insts()->emit_int32(op); 2467 %} 2468 2469 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 2470 int op = (Assembler::arith_op << 30) | 2471 ($dst$$reg << 25) | 2472 (Assembler::movcc_op3 << 19) | 2473 (6 << 16) | // cc2 bit for 'xcc' 2474 ($primary << 14) | 2475 (0 << 13) | // select register move 2476 (0 << 11) | // cc1, cc0 bits for 'icc' 2477 ($src$$reg << 0); 2478 cbuf.insts()->emit_int32(op); 2479 %} 2480 2481 enc_class Set13( immI13 src, iRegI rd ) %{ 2482 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 2483 %} 2484 2485 enc_class SetHi22( immI src, iRegI rd ) %{ 2486 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant ); 2487 %} 2488 2489 enc_class Set32( immI src, iRegI rd ) %{ 2490 MacroAssembler _masm(&cbuf); 2491 __ set($src$$constant, reg_to_register_object($rd$$reg)); 2492 %} 2493 2494 enc_class call_epilog %{ 2495 if( VerifyStackAtCalls ) { 2496 MacroAssembler _masm(&cbuf); 2497 int framesize = ra_->C->frame_slots() << LogBytesPerInt; 2498 Register temp_reg = G3; 2499 __ add(SP, framesize, temp_reg); 2500 __ cmp(temp_reg, FP); 2501 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc); 2502 } 2503 %} 2504 2505 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value 2506 // to G1 so the register allocator will not have to deal with the misaligned register 2507 // pair. 2508 enc_class adjust_long_from_native_call %{ 2509 #ifndef _LP64 2510 if (returns_long()) { 2511 // sllx O0,32,O0 2512 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 ); 2513 // srl O1,0,O1 2514 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 ); 2515 // or O0,O1,G1 2516 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc ); 2517 } 2518 #endif 2519 %} 2520 2521 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime 2522 // CALL directly to the runtime 2523 // The user of this is responsible for ensuring that R_L7 is empty (killed). 2524 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, 2525 /*preserve_g2=*/true); 2526 %} 2527 2528 enc_class preserve_SP %{ 2529 MacroAssembler _masm(&cbuf); 2530 __ mov(SP, L7_mh_SP_save); 2531 %} 2532 2533 enc_class restore_SP %{ 2534 MacroAssembler _masm(&cbuf); 2535 __ mov(L7_mh_SP_save, SP); 2536 %} 2537 2538 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 2539 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2540 // who we intended to call. 2541 if ( !_method ) { 2542 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); 2543 } else if (_optimized_virtual) { 2544 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); 2545 } else { 2546 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); 2547 } 2548 if( _method ) { // Emit stub for static call 2549 emit_java_to_interp(cbuf); 2550 } 2551 %} 2552 2553 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL 2554 MacroAssembler _masm(&cbuf); 2555 __ set_inst_mark(); 2556 int vtable_index = this->_vtable_index; 2557 // MachCallDynamicJavaNode::ret_addr_offset uses this same test 2558 if (vtable_index < 0) { 2559 // must be invalid_vtable_index, not nonvirtual_vtable_index 2560 assert(vtable_index == methodOopDesc::invalid_vtable_index, "correct sentinel value"); 2561 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2562 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); 2563 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); 2564 // !!!!! 2565 // Generate "set 0x01, R_G5", placeholder instruction to load oop-info 2566 // emit_call_dynamic_prologue( cbuf ); 2567 __ set_oop((jobject)Universe::non_oop_word(), G5_ic_reg); 2568 2569 address virtual_call_oop_addr = __ inst_mark(); 2570 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2571 // who we intended to call. 2572 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 2573 emit_call_reloc(cbuf, $meth$$method, relocInfo::none); 2574 } else { 2575 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 2576 // Just go thru the vtable 2577 // get receiver klass (receiver already checked for non-null) 2578 // If we end up going thru a c2i adapter interpreter expects method in G5 2579 int off = __ offset(); 2580 __ load_klass(O0, G3_scratch); 2581 int klass_load_size; 2582 if (UseCompressedOops) { 2583 assert(Universe::heap() != NULL, "java heap should be initialized"); 2584 if (Universe::narrow_oop_base() == NULL) 2585 klass_load_size = 2*BytesPerInstWord; 2586 else 2587 klass_load_size = 3*BytesPerInstWord; 2588 } else { 2589 klass_load_size = 1*BytesPerInstWord; 2590 } 2591 int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 2592 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 2593 if (Assembler::is_simm13(v_off)) { 2594 __ ld_ptr(G3, v_off, G5_method); 2595 } else { 2596 // Generate 2 instructions 2597 __ Assembler::sethi(v_off & ~0x3ff, G5_method); 2598 __ or3(G5_method, v_off & 0x3ff, G5_method); 2599 // ld_ptr, set_hi, set 2600 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord, 2601 "Unexpected instruction size(s)"); 2602 __ ld_ptr(G3, G5_method, G5_method); 2603 } 2604 // NOTE: for vtable dispatches, the vtable entry will never be null. 2605 // However it may very well end up in handle_wrong_method if the 2606 // method is abstract for the particular class. 2607 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch); 2608 // jump to target (either compiled code or c2iadapter) 2609 __ jmpl(G3_scratch, G0, O7); 2610 __ delayed()->nop(); 2611 } 2612 %} 2613 2614 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL 2615 MacroAssembler _masm(&cbuf); 2616 2617 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2618 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because 2619 // we might be calling a C2I adapter which needs it. 2620 2621 assert(temp_reg != G5_ic_reg, "conflicting registers"); 2622 // Load nmethod 2623 __ ld_ptr(G5_ic_reg, in_bytes(methodOopDesc::from_compiled_offset()), temp_reg); 2624 2625 // CALL to compiled java, indirect the contents of G3 2626 __ set_inst_mark(); 2627 __ callr(temp_reg, G0); 2628 __ delayed()->nop(); 2629 %} 2630 2631 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{ 2632 MacroAssembler _masm(&cbuf); 2633 Register Rdividend = reg_to_register_object($src1$$reg); 2634 Register Rdivisor = reg_to_register_object($src2$$reg); 2635 Register Rresult = reg_to_register_object($dst$$reg); 2636 2637 __ sra(Rdivisor, 0, Rdivisor); 2638 __ sra(Rdividend, 0, Rdividend); 2639 __ sdivx(Rdividend, Rdivisor, Rresult); 2640 %} 2641 2642 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{ 2643 MacroAssembler _masm(&cbuf); 2644 2645 Register Rdividend = reg_to_register_object($src1$$reg); 2646 int divisor = $imm$$constant; 2647 Register Rresult = reg_to_register_object($dst$$reg); 2648 2649 __ sra(Rdividend, 0, Rdividend); 2650 __ sdivx(Rdividend, divisor, Rresult); 2651 %} 2652 2653 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{ 2654 MacroAssembler _masm(&cbuf); 2655 Register Rsrc1 = reg_to_register_object($src1$$reg); 2656 Register Rsrc2 = reg_to_register_object($src2$$reg); 2657 Register Rdst = reg_to_register_object($dst$$reg); 2658 2659 __ sra( Rsrc1, 0, Rsrc1 ); 2660 __ sra( Rsrc2, 0, Rsrc2 ); 2661 __ mulx( Rsrc1, Rsrc2, Rdst ); 2662 __ srlx( Rdst, 32, Rdst ); 2663 %} 2664 2665 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{ 2666 MacroAssembler _masm(&cbuf); 2667 Register Rdividend = reg_to_register_object($src1$$reg); 2668 Register Rdivisor = reg_to_register_object($src2$$reg); 2669 Register Rresult = reg_to_register_object($dst$$reg); 2670 Register Rscratch = reg_to_register_object($scratch$$reg); 2671 2672 assert(Rdividend != Rscratch, ""); 2673 assert(Rdivisor != Rscratch, ""); 2674 2675 __ sra(Rdividend, 0, Rdividend); 2676 __ sra(Rdivisor, 0, Rdivisor); 2677 __ sdivx(Rdividend, Rdivisor, Rscratch); 2678 __ mulx(Rscratch, Rdivisor, Rscratch); 2679 __ sub(Rdividend, Rscratch, Rresult); 2680 %} 2681 2682 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{ 2683 MacroAssembler _masm(&cbuf); 2684 2685 Register Rdividend = reg_to_register_object($src1$$reg); 2686 int divisor = $imm$$constant; 2687 Register Rresult = reg_to_register_object($dst$$reg); 2688 Register Rscratch = reg_to_register_object($scratch$$reg); 2689 2690 assert(Rdividend != Rscratch, ""); 2691 2692 __ sra(Rdividend, 0, Rdividend); 2693 __ sdivx(Rdividend, divisor, Rscratch); 2694 __ mulx(Rscratch, divisor, Rscratch); 2695 __ sub(Rdividend, Rscratch, Rresult); 2696 %} 2697 2698 enc_class fabss (sflt_reg dst, sflt_reg src) %{ 2699 MacroAssembler _masm(&cbuf); 2700 2701 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2702 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2703 2704 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst); 2705 %} 2706 2707 enc_class fabsd (dflt_reg dst, dflt_reg src) %{ 2708 MacroAssembler _masm(&cbuf); 2709 2710 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2711 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2712 2713 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst); 2714 %} 2715 2716 enc_class fnegd (dflt_reg dst, dflt_reg src) %{ 2717 MacroAssembler _masm(&cbuf); 2718 2719 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2720 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2721 2722 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst); 2723 %} 2724 2725 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{ 2726 MacroAssembler _masm(&cbuf); 2727 2728 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2729 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2730 2731 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst); 2732 %} 2733 2734 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{ 2735 MacroAssembler _masm(&cbuf); 2736 2737 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2738 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2739 2740 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst); 2741 %} 2742 2743 enc_class fmovs (dflt_reg dst, dflt_reg src) %{ 2744 MacroAssembler _masm(&cbuf); 2745 2746 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2747 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2748 2749 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst); 2750 %} 2751 2752 enc_class fmovd (dflt_reg dst, dflt_reg src) %{ 2753 MacroAssembler _masm(&cbuf); 2754 2755 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2756 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2757 2758 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst); 2759 %} 2760 2761 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2762 MacroAssembler _masm(&cbuf); 2763 2764 Register Roop = reg_to_register_object($oop$$reg); 2765 Register Rbox = reg_to_register_object($box$$reg); 2766 Register Rscratch = reg_to_register_object($scratch$$reg); 2767 Register Rmark = reg_to_register_object($scratch2$$reg); 2768 2769 assert(Roop != Rscratch, ""); 2770 assert(Roop != Rmark, ""); 2771 assert(Rbox != Rscratch, ""); 2772 assert(Rbox != Rmark, ""); 2773 2774 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining); 2775 %} 2776 2777 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2778 MacroAssembler _masm(&cbuf); 2779 2780 Register Roop = reg_to_register_object($oop$$reg); 2781 Register Rbox = reg_to_register_object($box$$reg); 2782 Register Rscratch = reg_to_register_object($scratch$$reg); 2783 Register Rmark = reg_to_register_object($scratch2$$reg); 2784 2785 assert(Roop != Rscratch, ""); 2786 assert(Roop != Rmark, ""); 2787 assert(Rbox != Rscratch, ""); 2788 assert(Rbox != Rmark, ""); 2789 2790 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining); 2791 %} 2792 2793 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{ 2794 MacroAssembler _masm(&cbuf); 2795 Register Rmem = reg_to_register_object($mem$$reg); 2796 Register Rold = reg_to_register_object($old$$reg); 2797 Register Rnew = reg_to_register_object($new$$reg); 2798 2799 // casx_under_lock picks 1 of 3 encodings: 2800 // For 32-bit pointers you get a 32-bit CAS 2801 // For 64-bit pointers you get a 64-bit CASX 2802 __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 2803 __ cmp( Rold, Rnew ); 2804 %} 2805 2806 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{ 2807 Register Rmem = reg_to_register_object($mem$$reg); 2808 Register Rold = reg_to_register_object($old$$reg); 2809 Register Rnew = reg_to_register_object($new$$reg); 2810 2811 MacroAssembler _masm(&cbuf); 2812 __ mov(Rnew, O7); 2813 __ casx(Rmem, Rold, O7); 2814 __ cmp( Rold, O7 ); 2815 %} 2816 2817 // raw int cas, used for compareAndSwap 2818 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{ 2819 Register Rmem = reg_to_register_object($mem$$reg); 2820 Register Rold = reg_to_register_object($old$$reg); 2821 Register Rnew = reg_to_register_object($new$$reg); 2822 2823 MacroAssembler _masm(&cbuf); 2824 __ mov(Rnew, O7); 2825 __ cas(Rmem, Rold, O7); 2826 __ cmp( Rold, O7 ); 2827 %} 2828 2829 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{ 2830 Register Rres = reg_to_register_object($res$$reg); 2831 2832 MacroAssembler _masm(&cbuf); 2833 __ mov(1, Rres); 2834 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres ); 2835 %} 2836 2837 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{ 2838 Register Rres = reg_to_register_object($res$$reg); 2839 2840 MacroAssembler _masm(&cbuf); 2841 __ mov(1, Rres); 2842 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres ); 2843 %} 2844 2845 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{ 2846 MacroAssembler _masm(&cbuf); 2847 Register Rdst = reg_to_register_object($dst$$reg); 2848 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg) 2849 : reg_to_DoubleFloatRegister_object($src1$$reg); 2850 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg) 2851 : reg_to_DoubleFloatRegister_object($src2$$reg); 2852 2853 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1) 2854 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 2855 %} 2856 2857 2858 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{ 2859 Label Ldone, Lloop; 2860 MacroAssembler _masm(&cbuf); 2861 2862 Register str1_reg = reg_to_register_object($str1$$reg); 2863 Register str2_reg = reg_to_register_object($str2$$reg); 2864 Register cnt1_reg = reg_to_register_object($cnt1$$reg); 2865 Register cnt2_reg = reg_to_register_object($cnt2$$reg); 2866 Register result_reg = reg_to_register_object($result$$reg); 2867 2868 assert(result_reg != str1_reg && 2869 result_reg != str2_reg && 2870 result_reg != cnt1_reg && 2871 result_reg != cnt2_reg , 2872 "need different registers"); 2873 2874 // Compute the minimum of the string lengths(str1_reg) and the 2875 // difference of the string lengths (stack) 2876 2877 // See if the lengths are different, and calculate min in str1_reg. 2878 // Stash diff in O7 in case we need it for a tie-breaker. 2879 Label Lskip; 2880 __ subcc(cnt1_reg, cnt2_reg, O7); 2881 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2882 __ br(Assembler::greater, true, Assembler::pt, Lskip); 2883 // cnt2 is shorter, so use its count: 2884 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2885 __ bind(Lskip); 2886 2887 // reallocate cnt1_reg, cnt2_reg, result_reg 2888 // Note: limit_reg holds the string length pre-scaled by 2 2889 Register limit_reg = cnt1_reg; 2890 Register chr2_reg = cnt2_reg; 2891 Register chr1_reg = result_reg; 2892 // str{12} are the base pointers 2893 2894 // Is the minimum length zero? 2895 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity 2896 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2897 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2898 2899 // Load first characters 2900 __ lduh(str1_reg, 0, chr1_reg); 2901 __ lduh(str2_reg, 0, chr2_reg); 2902 2903 // Compare first characters 2904 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2905 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2906 assert(chr1_reg == result_reg, "result must be pre-placed"); 2907 __ delayed()->nop(); 2908 2909 { 2910 // Check after comparing first character to see if strings are equivalent 2911 Label LSkip2; 2912 // Check if the strings start at same location 2913 __ cmp(str1_reg, str2_reg); 2914 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2); 2915 __ delayed()->nop(); 2916 2917 // Check if the length difference is zero (in O7) 2918 __ cmp(G0, O7); 2919 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2920 __ delayed()->mov(G0, result_reg); // result is zero 2921 2922 // Strings might not be equal 2923 __ bind(LSkip2); 2924 } 2925 2926 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); 2927 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2928 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2929 2930 // Shift str1_reg and str2_reg to the end of the arrays, negate limit 2931 __ add(str1_reg, limit_reg, str1_reg); 2932 __ add(str2_reg, limit_reg, str2_reg); 2933 __ neg(chr1_reg, limit_reg); // limit = -(limit-2) 2934 2935 // Compare the rest of the characters 2936 __ lduh(str1_reg, limit_reg, chr1_reg); 2937 __ bind(Lloop); 2938 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2939 __ lduh(str2_reg, limit_reg, chr2_reg); 2940 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2941 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2942 assert(chr1_reg == result_reg, "result must be pre-placed"); 2943 __ delayed()->inccc(limit_reg, sizeof(jchar)); 2944 // annul LDUH if branch is not taken to prevent access past end of string 2945 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 2946 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2947 2948 // If strings are equal up to min length, return the length difference. 2949 __ mov(O7, result_reg); 2950 2951 // Otherwise, return the difference between the first mismatched chars. 2952 __ bind(Ldone); 2953 %} 2954 2955 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{ 2956 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone; 2957 MacroAssembler _masm(&cbuf); 2958 2959 Register str1_reg = reg_to_register_object($str1$$reg); 2960 Register str2_reg = reg_to_register_object($str2$$reg); 2961 Register cnt_reg = reg_to_register_object($cnt$$reg); 2962 Register tmp1_reg = O7; 2963 Register result_reg = reg_to_register_object($result$$reg); 2964 2965 assert(result_reg != str1_reg && 2966 result_reg != str2_reg && 2967 result_reg != cnt_reg && 2968 result_reg != tmp1_reg , 2969 "need different registers"); 2970 2971 __ cmp(str1_reg, str2_reg); //same char[] ? 2972 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 2973 __ delayed()->add(G0, 1, result_reg); 2974 2975 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn); 2976 __ delayed()->add(G0, 1, result_reg); // count == 0 2977 2978 //rename registers 2979 Register limit_reg = cnt_reg; 2980 Register chr1_reg = result_reg; 2981 Register chr2_reg = tmp1_reg; 2982 2983 //check for alignment and position the pointers to the ends 2984 __ or3(str1_reg, str2_reg, chr1_reg); 2985 __ andcc(chr1_reg, 0x3, chr1_reg); 2986 // notZero means at least one not 4-byte aligned. 2987 // We could optimize the case when both arrays are not aligned 2988 // but it is not frequent case and it requires additional checks. 2989 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare 2990 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count 2991 2992 // Compare char[] arrays aligned to 4 bytes. 2993 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg, 2994 chr1_reg, chr2_reg, Ldone); 2995 __ ba(Ldone); 2996 __ delayed()->add(G0, 1, result_reg); 2997 2998 // char by char compare 2999 __ bind(Lchar); 3000 __ add(str1_reg, limit_reg, str1_reg); 3001 __ add(str2_reg, limit_reg, str2_reg); 3002 __ neg(limit_reg); //negate count 3003 3004 __ lduh(str1_reg, limit_reg, chr1_reg); 3005 // Lchar_loop 3006 __ bind(Lchar_loop); 3007 __ lduh(str2_reg, limit_reg, chr2_reg); 3008 __ cmp(chr1_reg, chr2_reg); 3009 __ br(Assembler::notEqual, true, Assembler::pt, Ldone); 3010 __ delayed()->mov(G0, result_reg); //not equal 3011 __ inccc(limit_reg, sizeof(jchar)); 3012 // annul LDUH if branch is not taken to prevent access past end of string 3013 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop); 3014 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3015 3016 __ add(G0, 1, result_reg); //equal 3017 3018 __ bind(Ldone); 3019 %} 3020 3021 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{ 3022 Label Lvector, Ldone, Lloop; 3023 MacroAssembler _masm(&cbuf); 3024 3025 Register ary1_reg = reg_to_register_object($ary1$$reg); 3026 Register ary2_reg = reg_to_register_object($ary2$$reg); 3027 Register tmp1_reg = reg_to_register_object($tmp1$$reg); 3028 Register tmp2_reg = O7; 3029 Register result_reg = reg_to_register_object($result$$reg); 3030 3031 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3032 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 3033 3034 // return true if the same array 3035 __ cmp(ary1_reg, ary2_reg); 3036 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3037 __ delayed()->add(G0, 1, result_reg); // equal 3038 3039 __ br_null(ary1_reg, true, Assembler::pn, Ldone); 3040 __ delayed()->mov(G0, result_reg); // not equal 3041 3042 __ br_null(ary2_reg, true, Assembler::pn, Ldone); 3043 __ delayed()->mov(G0, result_reg); // not equal 3044 3045 //load the lengths of arrays 3046 __ ld(Address(ary1_reg, length_offset), tmp1_reg); 3047 __ ld(Address(ary2_reg, length_offset), tmp2_reg); 3048 3049 // return false if the two arrays are not equal length 3050 __ cmp(tmp1_reg, tmp2_reg); 3051 __ br(Assembler::notEqual, true, Assembler::pn, Ldone); 3052 __ delayed()->mov(G0, result_reg); // not equal 3053 3054 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn); 3055 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal 3056 3057 // load array addresses 3058 __ add(ary1_reg, base_offset, ary1_reg); 3059 __ add(ary2_reg, base_offset, ary2_reg); 3060 3061 // renaming registers 3062 Register chr1_reg = result_reg; // for characters in ary1 3063 Register chr2_reg = tmp2_reg; // for characters in ary2 3064 Register limit_reg = tmp1_reg; // length 3065 3066 // set byte count 3067 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); 3068 3069 // Compare char[] arrays aligned to 4 bytes. 3070 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg, 3071 chr1_reg, chr2_reg, Ldone); 3072 __ add(G0, 1, result_reg); // equals 3073 3074 __ bind(Ldone); 3075 %} 3076 3077 enc_class enc_rethrow() %{ 3078 cbuf.set_insts_mark(); 3079 Register temp_reg = G3; 3080 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 3081 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 3082 MacroAssembler _masm(&cbuf); 3083 #ifdef ASSERT 3084 __ save_frame(0); 3085 AddressLiteral last_rethrow_addrlit(&last_rethrow); 3086 __ sethi(last_rethrow_addrlit, L1); 3087 Address addr(L1, last_rethrow_addrlit.low10()); 3088 __ get_pc(L2); 3089 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 3090 __ st_ptr(L2, addr); 3091 __ restore(); 3092 #endif 3093 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp 3094 __ delayed()->nop(); 3095 %} 3096 3097 enc_class emit_mem_nop() %{ 3098 // Generates the instruction LDUXA [o6,g0],#0x82,g0 3099 cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 3100 %} 3101 3102 enc_class emit_fadd_nop() %{ 3103 // Generates the instruction FMOVS f31,f31 3104 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 3105 %} 3106 3107 enc_class emit_br_nop() %{ 3108 // Generates the instruction BPN,PN . 3109 cbuf.insts()->emit_int32((unsigned int) 0x00400000); 3110 %} 3111 3112 enc_class enc_membar_acquire %{ 3113 MacroAssembler _masm(&cbuf); 3114 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) ); 3115 %} 3116 3117 enc_class enc_membar_release %{ 3118 MacroAssembler _masm(&cbuf); 3119 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) ); 3120 %} 3121 3122 enc_class enc_membar_volatile %{ 3123 MacroAssembler _masm(&cbuf); 3124 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3125 %} 3126 3127 enc_class enc_repl8b( iRegI src, iRegL dst ) %{ 3128 MacroAssembler _masm(&cbuf); 3129 Register src_reg = reg_to_register_object($src$$reg); 3130 Register dst_reg = reg_to_register_object($dst$$reg); 3131 __ sllx(src_reg, 56, dst_reg); 3132 __ srlx(dst_reg, 8, O7); 3133 __ or3 (dst_reg, O7, dst_reg); 3134 __ srlx(dst_reg, 16, O7); 3135 __ or3 (dst_reg, O7, dst_reg); 3136 __ srlx(dst_reg, 32, O7); 3137 __ or3 (dst_reg, O7, dst_reg); 3138 %} 3139 3140 enc_class enc_repl4b( iRegI src, iRegL dst ) %{ 3141 MacroAssembler _masm(&cbuf); 3142 Register src_reg = reg_to_register_object($src$$reg); 3143 Register dst_reg = reg_to_register_object($dst$$reg); 3144 __ sll(src_reg, 24, dst_reg); 3145 __ srl(dst_reg, 8, O7); 3146 __ or3(dst_reg, O7, dst_reg); 3147 __ srl(dst_reg, 16, O7); 3148 __ or3(dst_reg, O7, dst_reg); 3149 %} 3150 3151 enc_class enc_repl4s( iRegI src, iRegL dst ) %{ 3152 MacroAssembler _masm(&cbuf); 3153 Register src_reg = reg_to_register_object($src$$reg); 3154 Register dst_reg = reg_to_register_object($dst$$reg); 3155 __ sllx(src_reg, 48, dst_reg); 3156 __ srlx(dst_reg, 16, O7); 3157 __ or3 (dst_reg, O7, dst_reg); 3158 __ srlx(dst_reg, 32, O7); 3159 __ or3 (dst_reg, O7, dst_reg); 3160 %} 3161 3162 enc_class enc_repl2i( iRegI src, iRegL dst ) %{ 3163 MacroAssembler _masm(&cbuf); 3164 Register src_reg = reg_to_register_object($src$$reg); 3165 Register dst_reg = reg_to_register_object($dst$$reg); 3166 __ sllx(src_reg, 32, dst_reg); 3167 __ srlx(dst_reg, 32, O7); 3168 __ or3 (dst_reg, O7, dst_reg); 3169 %} 3170 3171 %} 3172 3173 //----------FRAME-------------------------------------------------------------- 3174 // Definition of frame structure and management information. 3175 // 3176 // S T A C K L A Y O U T Allocators stack-slot number 3177 // | (to get allocators register number 3178 // G Owned by | | v add VMRegImpl::stack0) 3179 // r CALLER | | 3180 // o | +--------+ pad to even-align allocators stack-slot 3181 // w V | pad0 | numbers; owned by CALLER 3182 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 3183 // h ^ | in | 5 3184 // | | args | 4 Holes in incoming args owned by SELF 3185 // | | | | 3 3186 // | | +--------+ 3187 // V | | old out| Empty on Intel, window on Sparc 3188 // | old |preserve| Must be even aligned. 3189 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned 3190 // | | in | 3 area for Intel ret address 3191 // Owned by |preserve| Empty on Sparc. 3192 // SELF +--------+ 3193 // | | pad2 | 2 pad to align old SP 3194 // | +--------+ 1 3195 // | | locks | 0 3196 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned 3197 // | | pad1 | 11 pad to align new SP 3198 // | +--------+ 3199 // | | | 10 3200 // | | spills | 9 spills 3201 // V | | 8 (pad0 slot for callee) 3202 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 3203 // ^ | out | 7 3204 // | | args | 6 Holes in outgoing args owned by CALLEE 3205 // Owned by +--------+ 3206 // CALLEE | new out| 6 Empty on Intel, window on Sparc 3207 // | new |preserve| Must be even-aligned. 3208 // | SP-+--------+----> Matcher::_new_SP, even aligned 3209 // | | | 3210 // 3211 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 3212 // known from SELF's arguments and the Java calling convention. 3213 // Region 6-7 is determined per call site. 3214 // Note 2: If the calling convention leaves holes in the incoming argument 3215 // area, those holes are owned by SELF. Holes in the outgoing area 3216 // are owned by the CALLEE. Holes should not be nessecary in the 3217 // incoming area, as the Java calling convention is completely under 3218 // the control of the AD file. Doubles can be sorted and packed to 3219 // avoid holes. Holes in the outgoing arguments may be nessecary for 3220 // varargs C calling conventions. 3221 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 3222 // even aligned with pad0 as needed. 3223 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 3224 // region 6-11 is even aligned; it may be padded out more so that 3225 // the region from SP to FP meets the minimum stack alignment. 3226 3227 frame %{ 3228 // What direction does stack grow in (assumed to be same for native & Java) 3229 stack_direction(TOWARDS_LOW); 3230 3231 // These two registers define part of the calling convention 3232 // between compiled code and the interpreter. 3233 inline_cache_reg(R_G5); // Inline Cache Register or methodOop for I2C 3234 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter 3235 3236 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] 3237 cisc_spilling_operand_name(indOffset); 3238 3239 // Number of stack slots consumed by a Monitor enter 3240 #ifdef _LP64 3241 sync_stack_slots(2); 3242 #else 3243 sync_stack_slots(1); 3244 #endif 3245 3246 // Compiled code's Frame Pointer 3247 frame_pointer(R_SP); 3248 3249 // Stack alignment requirement 3250 stack_alignment(StackAlignmentInBytes); 3251 // LP64: Alignment size in bytes (128-bit -> 16 bytes) 3252 // !LP64: Alignment size in bytes (64-bit -> 8 bytes) 3253 3254 // Number of stack slots between incoming argument block and the start of 3255 // a new frame. The PROLOG must add this many slots to the stack. The 3256 // EPILOG must remove this many slots. 3257 in_preserve_stack_slots(0); 3258 3259 // Number of outgoing stack slots killed above the out_preserve_stack_slots 3260 // for calls to C. Supports the var-args backing area for register parms. 3261 // ADLC doesn't support parsing expressions, so I folded the math by hand. 3262 #ifdef _LP64 3263 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word 3264 varargs_C_out_slots_killed(12); 3265 #else 3266 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word 3267 varargs_C_out_slots_killed( 7); 3268 #endif 3269 3270 // The after-PROLOG location of the return address. Location of 3271 // return address specifies a type (REG or STACK) and a number 3272 // representing the register number (i.e. - use a register name) or 3273 // stack slot. 3274 return_addr(REG R_I7); // Ret Addr is in register I7 3275 3276 // Body of function which returns an OptoRegs array locating 3277 // arguments either in registers or in stack slots for calling 3278 // java 3279 calling_convention %{ 3280 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); 3281 3282 %} 3283 3284 // Body of function which returns an OptoRegs array locating 3285 // arguments either in registers or in stack slots for callin 3286 // C. 3287 c_calling_convention %{ 3288 // This is obviously always outgoing 3289 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length); 3290 %} 3291 3292 // Location of native (C/C++) and interpreter return values. This is specified to 3293 // be the same as Java. In the 32-bit VM, long values are actually returned from 3294 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying 3295 // to and from the register pairs is done by the appropriate call and epilog 3296 // opcodes. This simplifies the register allocator. 3297 c_return_value %{ 3298 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3299 #ifdef _LP64 3300 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3301 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3302 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3303 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3304 #else // !_LP64 3305 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3306 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3307 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3308 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3309 #endif 3310 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3311 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3312 %} 3313 3314 // Location of compiled Java return values. Same as C 3315 return_value %{ 3316 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3317 #ifdef _LP64 3318 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3319 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3320 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3321 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3322 #else // !_LP64 3323 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3324 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3325 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3326 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3327 #endif 3328 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3329 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3330 %} 3331 3332 %} 3333 3334 3335 //----------ATTRIBUTES--------------------------------------------------------- 3336 //----------Operand Attributes------------------------------------------------- 3337 op_attrib op_cost(1); // Required cost attribute 3338 3339 //----------Instruction Attributes--------------------------------------------- 3340 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute 3341 ins_attrib ins_size(32); // Required size attribute (in bits) 3342 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back 3343 ins_attrib ins_short_branch(0); // Required flag: is this instruction a 3344 // non-matching short branch variant of some 3345 // long branch? 3346 3347 //----------OPERANDS----------------------------------------------------------- 3348 // Operand definitions must precede instruction definitions for correct parsing 3349 // in the ADLC because operands constitute user defined types which are used in 3350 // instruction definitions. 3351 3352 //----------Simple Operands---------------------------------------------------- 3353 // Immediate Operands 3354 // Integer Immediate: 32-bit 3355 operand immI() %{ 3356 match(ConI); 3357 3358 op_cost(0); 3359 // formats are generated automatically for constants and base registers 3360 format %{ %} 3361 interface(CONST_INTER); 3362 %} 3363 3364 // Integer Immediate: 8-bit 3365 operand immI8() %{ 3366 predicate(Assembler::is_simm8(n->get_int())); 3367 match(ConI); 3368 op_cost(0); 3369 format %{ %} 3370 interface(CONST_INTER); 3371 %} 3372 3373 // Integer Immediate: 13-bit 3374 operand immI13() %{ 3375 predicate(Assembler::is_simm13(n->get_int())); 3376 match(ConI); 3377 op_cost(0); 3378 3379 format %{ %} 3380 interface(CONST_INTER); 3381 %} 3382 3383 // Integer Immediate: 13-bit minus 7 3384 operand immI13m7() %{ 3385 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095)); 3386 match(ConI); 3387 op_cost(0); 3388 3389 format %{ %} 3390 interface(CONST_INTER); 3391 %} 3392 3393 // Integer Immediate: 16-bit 3394 operand immI16() %{ 3395 predicate(Assembler::is_simm16(n->get_int())); 3396 match(ConI); 3397 op_cost(0); 3398 format %{ %} 3399 interface(CONST_INTER); 3400 %} 3401 3402 // Unsigned (positive) Integer Immediate: 13-bit 3403 operand immU13() %{ 3404 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); 3405 match(ConI); 3406 op_cost(0); 3407 3408 format %{ %} 3409 interface(CONST_INTER); 3410 %} 3411 3412 // Integer Immediate: 6-bit 3413 operand immU6() %{ 3414 predicate(n->get_int() >= 0 && n->get_int() <= 63); 3415 match(ConI); 3416 op_cost(0); 3417 format %{ %} 3418 interface(CONST_INTER); 3419 %} 3420 3421 // Integer Immediate: 11-bit 3422 operand immI11() %{ 3423 predicate(Assembler::is_simm11(n->get_int())); 3424 match(ConI); 3425 op_cost(0); 3426 format %{ %} 3427 interface(CONST_INTER); 3428 %} 3429 3430 // Integer Immediate: 5-bit 3431 operand immI5() %{ 3432 predicate(Assembler::is_simm5(n->get_int())); 3433 match(ConI); 3434 op_cost(0); 3435 format %{ %} 3436 interface(CONST_INTER); 3437 %} 3438 3439 // Integer Immediate: 0-bit 3440 operand immI0() %{ 3441 predicate(n->get_int() == 0); 3442 match(ConI); 3443 op_cost(0); 3444 3445 format %{ %} 3446 interface(CONST_INTER); 3447 %} 3448 3449 // Integer Immediate: the value 10 3450 operand immI10() %{ 3451 predicate(n->get_int() == 10); 3452 match(ConI); 3453 op_cost(0); 3454 3455 format %{ %} 3456 interface(CONST_INTER); 3457 %} 3458 3459 // Integer Immediate: the values 0-31 3460 operand immU5() %{ 3461 predicate(n->get_int() >= 0 && n->get_int() <= 31); 3462 match(ConI); 3463 op_cost(0); 3464 3465 format %{ %} 3466 interface(CONST_INTER); 3467 %} 3468 3469 // Integer Immediate: the values 1-31 3470 operand immI_1_31() %{ 3471 predicate(n->get_int() >= 1 && n->get_int() <= 31); 3472 match(ConI); 3473 op_cost(0); 3474 3475 format %{ %} 3476 interface(CONST_INTER); 3477 %} 3478 3479 // Integer Immediate: the values 32-63 3480 operand immI_32_63() %{ 3481 predicate(n->get_int() >= 32 && n->get_int() <= 63); 3482 match(ConI); 3483 op_cost(0); 3484 3485 format %{ %} 3486 interface(CONST_INTER); 3487 %} 3488 3489 // Immediates for special shifts (sign extend) 3490 3491 // Integer Immediate: the value 16 3492 operand immI_16() %{ 3493 predicate(n->get_int() == 16); 3494 match(ConI); 3495 op_cost(0); 3496 3497 format %{ %} 3498 interface(CONST_INTER); 3499 %} 3500 3501 // Integer Immediate: the value 24 3502 operand immI_24() %{ 3503 predicate(n->get_int() == 24); 3504 match(ConI); 3505 op_cost(0); 3506 3507 format %{ %} 3508 interface(CONST_INTER); 3509 %} 3510 3511 // Integer Immediate: the value 255 3512 operand immI_255() %{ 3513 predicate( n->get_int() == 255 ); 3514 match(ConI); 3515 op_cost(0); 3516 3517 format %{ %} 3518 interface(CONST_INTER); 3519 %} 3520 3521 // Integer Immediate: the value 65535 3522 operand immI_65535() %{ 3523 predicate(n->get_int() == 65535); 3524 match(ConI); 3525 op_cost(0); 3526 3527 format %{ %} 3528 interface(CONST_INTER); 3529 %} 3530 3531 // Long Immediate: the value FF 3532 operand immL_FF() %{ 3533 predicate( n->get_long() == 0xFFL ); 3534 match(ConL); 3535 op_cost(0); 3536 3537 format %{ %} 3538 interface(CONST_INTER); 3539 %} 3540 3541 // Long Immediate: the value FFFF 3542 operand immL_FFFF() %{ 3543 predicate( n->get_long() == 0xFFFFL ); 3544 match(ConL); 3545 op_cost(0); 3546 3547 format %{ %} 3548 interface(CONST_INTER); 3549 %} 3550 3551 // Pointer Immediate: 32 or 64-bit 3552 operand immP() %{ 3553 match(ConP); 3554 3555 op_cost(5); 3556 // formats are generated automatically for constants and base registers 3557 format %{ %} 3558 interface(CONST_INTER); 3559 %} 3560 3561 #ifdef _LP64 3562 // Pointer Immediate: 64-bit 3563 operand immP_set() %{ 3564 predicate(!VM_Version::is_niagara_plus()); 3565 match(ConP); 3566 3567 op_cost(5); 3568 // formats are generated automatically for constants and base registers 3569 format %{ %} 3570 interface(CONST_INTER); 3571 %} 3572 3573 // Pointer Immediate: 64-bit 3574 // From Niagara2 processors on a load should be better than materializing. 3575 operand immP_load() %{ 3576 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3))); 3577 match(ConP); 3578 3579 op_cost(5); 3580 // formats are generated automatically for constants and base registers 3581 format %{ %} 3582 interface(CONST_INTER); 3583 %} 3584 3585 // Pointer Immediate: 64-bit 3586 operand immP_no_oop_cheap() %{ 3587 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3)); 3588 match(ConP); 3589 3590 op_cost(5); 3591 // formats are generated automatically for constants and base registers 3592 format %{ %} 3593 interface(CONST_INTER); 3594 %} 3595 #endif 3596 3597 operand immP13() %{ 3598 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 3599 match(ConP); 3600 op_cost(0); 3601 3602 format %{ %} 3603 interface(CONST_INTER); 3604 %} 3605 3606 operand immP0() %{ 3607 predicate(n->get_ptr() == 0); 3608 match(ConP); 3609 op_cost(0); 3610 3611 format %{ %} 3612 interface(CONST_INTER); 3613 %} 3614 3615 operand immP_poll() %{ 3616 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 3617 match(ConP); 3618 3619 // formats are generated automatically for constants and base registers 3620 format %{ %} 3621 interface(CONST_INTER); 3622 %} 3623 3624 // Pointer Immediate 3625 operand immN() 3626 %{ 3627 match(ConN); 3628 3629 op_cost(10); 3630 format %{ %} 3631 interface(CONST_INTER); 3632 %} 3633 3634 // NULL Pointer Immediate 3635 operand immN0() 3636 %{ 3637 predicate(n->get_narrowcon() == 0); 3638 match(ConN); 3639 3640 op_cost(0); 3641 format %{ %} 3642 interface(CONST_INTER); 3643 %} 3644 3645 operand immL() %{ 3646 match(ConL); 3647 op_cost(40); 3648 // formats are generated automatically for constants and base registers 3649 format %{ %} 3650 interface(CONST_INTER); 3651 %} 3652 3653 operand immL0() %{ 3654 predicate(n->get_long() == 0L); 3655 match(ConL); 3656 op_cost(0); 3657 // formats are generated automatically for constants and base registers 3658 format %{ %} 3659 interface(CONST_INTER); 3660 %} 3661 3662 // Integer Immediate: 5-bit 3663 operand immL5() %{ 3664 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long())); 3665 match(ConL); 3666 op_cost(0); 3667 format %{ %} 3668 interface(CONST_INTER); 3669 %} 3670 3671 // Long Immediate: 13-bit 3672 operand immL13() %{ 3673 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L)); 3674 match(ConL); 3675 op_cost(0); 3676 3677 format %{ %} 3678 interface(CONST_INTER); 3679 %} 3680 3681 // Long Immediate: 13-bit minus 7 3682 operand immL13m7() %{ 3683 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L)); 3684 match(ConL); 3685 op_cost(0); 3686 3687 format %{ %} 3688 interface(CONST_INTER); 3689 %} 3690 3691 // Long Immediate: low 32-bit mask 3692 operand immL_32bits() %{ 3693 predicate(n->get_long() == 0xFFFFFFFFL); 3694 match(ConL); 3695 op_cost(0); 3696 3697 format %{ %} 3698 interface(CONST_INTER); 3699 %} 3700 3701 // Long Immediate: cheap (materialize in <= 3 instructions) 3702 operand immL_cheap() %{ 3703 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3); 3704 match(ConL); 3705 op_cost(0); 3706 3707 format %{ %} 3708 interface(CONST_INTER); 3709 %} 3710 3711 // Long Immediate: expensive (materialize in > 3 instructions) 3712 operand immL_expensive() %{ 3713 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3); 3714 match(ConL); 3715 op_cost(0); 3716 3717 format %{ %} 3718 interface(CONST_INTER); 3719 %} 3720 3721 // Double Immediate 3722 operand immD() %{ 3723 match(ConD); 3724 3725 op_cost(40); 3726 format %{ %} 3727 interface(CONST_INTER); 3728 %} 3729 3730 operand immD0() %{ 3731 #ifdef _LP64 3732 // on 64-bit architectures this comparision is faster 3733 predicate(jlong_cast(n->getd()) == 0); 3734 #else 3735 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO)); 3736 #endif 3737 match(ConD); 3738 3739 op_cost(0); 3740 format %{ %} 3741 interface(CONST_INTER); 3742 %} 3743 3744 // Float Immediate 3745 operand immF() %{ 3746 match(ConF); 3747 3748 op_cost(20); 3749 format %{ %} 3750 interface(CONST_INTER); 3751 %} 3752 3753 // Float Immediate: 0 3754 operand immF0() %{ 3755 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO)); 3756 match(ConF); 3757 3758 op_cost(0); 3759 format %{ %} 3760 interface(CONST_INTER); 3761 %} 3762 3763 // Integer Register Operands 3764 // Integer Register 3765 operand iRegI() %{ 3766 constraint(ALLOC_IN_RC(int_reg)); 3767 match(RegI); 3768 3769 match(notemp_iRegI); 3770 match(g1RegI); 3771 match(o0RegI); 3772 match(iRegIsafe); 3773 3774 format %{ %} 3775 interface(REG_INTER); 3776 %} 3777 3778 operand notemp_iRegI() %{ 3779 constraint(ALLOC_IN_RC(notemp_int_reg)); 3780 match(RegI); 3781 3782 match(o0RegI); 3783 3784 format %{ %} 3785 interface(REG_INTER); 3786 %} 3787 3788 operand o0RegI() %{ 3789 constraint(ALLOC_IN_RC(o0_regI)); 3790 match(iRegI); 3791 3792 format %{ %} 3793 interface(REG_INTER); 3794 %} 3795 3796 // Pointer Register 3797 operand iRegP() %{ 3798 constraint(ALLOC_IN_RC(ptr_reg)); 3799 match(RegP); 3800 3801 match(lock_ptr_RegP); 3802 match(g1RegP); 3803 match(g2RegP); 3804 match(g3RegP); 3805 match(g4RegP); 3806 match(i0RegP); 3807 match(o0RegP); 3808 match(o1RegP); 3809 match(l7RegP); 3810 3811 format %{ %} 3812 interface(REG_INTER); 3813 %} 3814 3815 operand sp_ptr_RegP() %{ 3816 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3817 match(RegP); 3818 match(iRegP); 3819 3820 format %{ %} 3821 interface(REG_INTER); 3822 %} 3823 3824 operand lock_ptr_RegP() %{ 3825 constraint(ALLOC_IN_RC(lock_ptr_reg)); 3826 match(RegP); 3827 match(i0RegP); 3828 match(o0RegP); 3829 match(o1RegP); 3830 match(l7RegP); 3831 3832 format %{ %} 3833 interface(REG_INTER); 3834 %} 3835 3836 operand g1RegP() %{ 3837 constraint(ALLOC_IN_RC(g1_regP)); 3838 match(iRegP); 3839 3840 format %{ %} 3841 interface(REG_INTER); 3842 %} 3843 3844 operand g2RegP() %{ 3845 constraint(ALLOC_IN_RC(g2_regP)); 3846 match(iRegP); 3847 3848 format %{ %} 3849 interface(REG_INTER); 3850 %} 3851 3852 operand g3RegP() %{ 3853 constraint(ALLOC_IN_RC(g3_regP)); 3854 match(iRegP); 3855 3856 format %{ %} 3857 interface(REG_INTER); 3858 %} 3859 3860 operand g1RegI() %{ 3861 constraint(ALLOC_IN_RC(g1_regI)); 3862 match(iRegI); 3863 3864 format %{ %} 3865 interface(REG_INTER); 3866 %} 3867 3868 operand g3RegI() %{ 3869 constraint(ALLOC_IN_RC(g3_regI)); 3870 match(iRegI); 3871 3872 format %{ %} 3873 interface(REG_INTER); 3874 %} 3875 3876 operand g4RegI() %{ 3877 constraint(ALLOC_IN_RC(g4_regI)); 3878 match(iRegI); 3879 3880 format %{ %} 3881 interface(REG_INTER); 3882 %} 3883 3884 operand g4RegP() %{ 3885 constraint(ALLOC_IN_RC(g4_regP)); 3886 match(iRegP); 3887 3888 format %{ %} 3889 interface(REG_INTER); 3890 %} 3891 3892 operand i0RegP() %{ 3893 constraint(ALLOC_IN_RC(i0_regP)); 3894 match(iRegP); 3895 3896 format %{ %} 3897 interface(REG_INTER); 3898 %} 3899 3900 operand o0RegP() %{ 3901 constraint(ALLOC_IN_RC(o0_regP)); 3902 match(iRegP); 3903 3904 format %{ %} 3905 interface(REG_INTER); 3906 %} 3907 3908 operand o1RegP() %{ 3909 constraint(ALLOC_IN_RC(o1_regP)); 3910 match(iRegP); 3911 3912 format %{ %} 3913 interface(REG_INTER); 3914 %} 3915 3916 operand o2RegP() %{ 3917 constraint(ALLOC_IN_RC(o2_regP)); 3918 match(iRegP); 3919 3920 format %{ %} 3921 interface(REG_INTER); 3922 %} 3923 3924 operand o7RegP() %{ 3925 constraint(ALLOC_IN_RC(o7_regP)); 3926 match(iRegP); 3927 3928 format %{ %} 3929 interface(REG_INTER); 3930 %} 3931 3932 operand l7RegP() %{ 3933 constraint(ALLOC_IN_RC(l7_regP)); 3934 match(iRegP); 3935 3936 format %{ %} 3937 interface(REG_INTER); 3938 %} 3939 3940 operand o7RegI() %{ 3941 constraint(ALLOC_IN_RC(o7_regI)); 3942 match(iRegI); 3943 3944 format %{ %} 3945 interface(REG_INTER); 3946 %} 3947 3948 operand iRegN() %{ 3949 constraint(ALLOC_IN_RC(int_reg)); 3950 match(RegN); 3951 3952 format %{ %} 3953 interface(REG_INTER); 3954 %} 3955 3956 // Long Register 3957 operand iRegL() %{ 3958 constraint(ALLOC_IN_RC(long_reg)); 3959 match(RegL); 3960 3961 format %{ %} 3962 interface(REG_INTER); 3963 %} 3964 3965 operand o2RegL() %{ 3966 constraint(ALLOC_IN_RC(o2_regL)); 3967 match(iRegL); 3968 3969 format %{ %} 3970 interface(REG_INTER); 3971 %} 3972 3973 operand o7RegL() %{ 3974 constraint(ALLOC_IN_RC(o7_regL)); 3975 match(iRegL); 3976 3977 format %{ %} 3978 interface(REG_INTER); 3979 %} 3980 3981 operand g1RegL() %{ 3982 constraint(ALLOC_IN_RC(g1_regL)); 3983 match(iRegL); 3984 3985 format %{ %} 3986 interface(REG_INTER); 3987 %} 3988 3989 operand g3RegL() %{ 3990 constraint(ALLOC_IN_RC(g3_regL)); 3991 match(iRegL); 3992 3993 format %{ %} 3994 interface(REG_INTER); 3995 %} 3996 3997 // Int Register safe 3998 // This is 64bit safe 3999 operand iRegIsafe() %{ 4000 constraint(ALLOC_IN_RC(long_reg)); 4001 4002 match(iRegI); 4003 4004 format %{ %} 4005 interface(REG_INTER); 4006 %} 4007 4008 // Condition Code Flag Register 4009 operand flagsReg() %{ 4010 constraint(ALLOC_IN_RC(int_flags)); 4011 match(RegFlags); 4012 4013 format %{ "ccr" %} // both ICC and XCC 4014 interface(REG_INTER); 4015 %} 4016 4017 // Condition Code Register, unsigned comparisons. 4018 operand flagsRegU() %{ 4019 constraint(ALLOC_IN_RC(int_flags)); 4020 match(RegFlags); 4021 4022 format %{ "icc_U" %} 4023 interface(REG_INTER); 4024 %} 4025 4026 // Condition Code Register, pointer comparisons. 4027 operand flagsRegP() %{ 4028 constraint(ALLOC_IN_RC(int_flags)); 4029 match(RegFlags); 4030 4031 #ifdef _LP64 4032 format %{ "xcc_P" %} 4033 #else 4034 format %{ "icc_P" %} 4035 #endif 4036 interface(REG_INTER); 4037 %} 4038 4039 // Condition Code Register, long comparisons. 4040 operand flagsRegL() %{ 4041 constraint(ALLOC_IN_RC(int_flags)); 4042 match(RegFlags); 4043 4044 format %{ "xcc_L" %} 4045 interface(REG_INTER); 4046 %} 4047 4048 // Condition Code Register, floating comparisons, unordered same as "less". 4049 operand flagsRegF() %{ 4050 constraint(ALLOC_IN_RC(float_flags)); 4051 match(RegFlags); 4052 match(flagsRegF0); 4053 4054 format %{ %} 4055 interface(REG_INTER); 4056 %} 4057 4058 operand flagsRegF0() %{ 4059 constraint(ALLOC_IN_RC(float_flag0)); 4060 match(RegFlags); 4061 4062 format %{ %} 4063 interface(REG_INTER); 4064 %} 4065 4066 4067 // Condition Code Flag Register used by long compare 4068 operand flagsReg_long_LTGE() %{ 4069 constraint(ALLOC_IN_RC(int_flags)); 4070 match(RegFlags); 4071 format %{ "icc_LTGE" %} 4072 interface(REG_INTER); 4073 %} 4074 operand flagsReg_long_EQNE() %{ 4075 constraint(ALLOC_IN_RC(int_flags)); 4076 match(RegFlags); 4077 format %{ "icc_EQNE" %} 4078 interface(REG_INTER); 4079 %} 4080 operand flagsReg_long_LEGT() %{ 4081 constraint(ALLOC_IN_RC(int_flags)); 4082 match(RegFlags); 4083 format %{ "icc_LEGT" %} 4084 interface(REG_INTER); 4085 %} 4086 4087 4088 operand regD() %{ 4089 constraint(ALLOC_IN_RC(dflt_reg)); 4090 match(RegD); 4091 4092 match(regD_low); 4093 4094 format %{ %} 4095 interface(REG_INTER); 4096 %} 4097 4098 operand regF() %{ 4099 constraint(ALLOC_IN_RC(sflt_reg)); 4100 match(RegF); 4101 4102 format %{ %} 4103 interface(REG_INTER); 4104 %} 4105 4106 operand regD_low() %{ 4107 constraint(ALLOC_IN_RC(dflt_low_reg)); 4108 match(regD); 4109 4110 format %{ %} 4111 interface(REG_INTER); 4112 %} 4113 4114 // Special Registers 4115 4116 // Method Register 4117 operand inline_cache_regP(iRegP reg) %{ 4118 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1 4119 match(reg); 4120 format %{ %} 4121 interface(REG_INTER); 4122 %} 4123 4124 operand interpreter_method_oop_regP(iRegP reg) %{ 4125 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1 4126 match(reg); 4127 format %{ %} 4128 interface(REG_INTER); 4129 %} 4130 4131 4132 //----------Complex Operands--------------------------------------------------- 4133 // Indirect Memory Reference 4134 operand indirect(sp_ptr_RegP reg) %{ 4135 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4136 match(reg); 4137 4138 op_cost(100); 4139 format %{ "[$reg]" %} 4140 interface(MEMORY_INTER) %{ 4141 base($reg); 4142 index(0x0); 4143 scale(0x0); 4144 disp(0x0); 4145 %} 4146 %} 4147 4148 // Indirect with simm13 Offset 4149 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{ 4150 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4151 match(AddP reg offset); 4152 4153 op_cost(100); 4154 format %{ "[$reg + $offset]" %} 4155 interface(MEMORY_INTER) %{ 4156 base($reg); 4157 index(0x0); 4158 scale(0x0); 4159 disp($offset); 4160 %} 4161 %} 4162 4163 // Indirect with simm13 Offset minus 7 4164 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{ 4165 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4166 match(AddP reg offset); 4167 4168 op_cost(100); 4169 format %{ "[$reg + $offset]" %} 4170 interface(MEMORY_INTER) %{ 4171 base($reg); 4172 index(0x0); 4173 scale(0x0); 4174 disp($offset); 4175 %} 4176 %} 4177 4178 // Note: Intel has a swapped version also, like this: 4179 //operand indOffsetX(iRegI reg, immP offset) %{ 4180 // constraint(ALLOC_IN_RC(int_reg)); 4181 // match(AddP offset reg); 4182 // 4183 // op_cost(100); 4184 // format %{ "[$reg + $offset]" %} 4185 // interface(MEMORY_INTER) %{ 4186 // base($reg); 4187 // index(0x0); 4188 // scale(0x0); 4189 // disp($offset); 4190 // %} 4191 //%} 4192 //// However, it doesn't make sense for SPARC, since 4193 // we have no particularly good way to embed oops in 4194 // single instructions. 4195 4196 // Indirect with Register Index 4197 operand indIndex(iRegP addr, iRegX index) %{ 4198 constraint(ALLOC_IN_RC(ptr_reg)); 4199 match(AddP addr index); 4200 4201 op_cost(100); 4202 format %{ "[$addr + $index]" %} 4203 interface(MEMORY_INTER) %{ 4204 base($addr); 4205 index($index); 4206 scale(0x0); 4207 disp(0x0); 4208 %} 4209 %} 4210 4211 //----------Special Memory Operands-------------------------------------------- 4212 // Stack Slot Operand - This operand is used for loading and storing temporary 4213 // values on the stack where a match requires a value to 4214 // flow through memory. 4215 operand stackSlotI(sRegI reg) %{ 4216 constraint(ALLOC_IN_RC(stack_slots)); 4217 op_cost(100); 4218 //match(RegI); 4219 format %{ "[$reg]" %} 4220 interface(MEMORY_INTER) %{ 4221 base(0xE); // R_SP 4222 index(0x0); 4223 scale(0x0); 4224 disp($reg); // Stack Offset 4225 %} 4226 %} 4227 4228 operand stackSlotP(sRegP reg) %{ 4229 constraint(ALLOC_IN_RC(stack_slots)); 4230 op_cost(100); 4231 //match(RegP); 4232 format %{ "[$reg]" %} 4233 interface(MEMORY_INTER) %{ 4234 base(0xE); // R_SP 4235 index(0x0); 4236 scale(0x0); 4237 disp($reg); // Stack Offset 4238 %} 4239 %} 4240 4241 operand stackSlotF(sRegF reg) %{ 4242 constraint(ALLOC_IN_RC(stack_slots)); 4243 op_cost(100); 4244 //match(RegF); 4245 format %{ "[$reg]" %} 4246 interface(MEMORY_INTER) %{ 4247 base(0xE); // R_SP 4248 index(0x0); 4249 scale(0x0); 4250 disp($reg); // Stack Offset 4251 %} 4252 %} 4253 operand stackSlotD(sRegD reg) %{ 4254 constraint(ALLOC_IN_RC(stack_slots)); 4255 op_cost(100); 4256 //match(RegD); 4257 format %{ "[$reg]" %} 4258 interface(MEMORY_INTER) %{ 4259 base(0xE); // R_SP 4260 index(0x0); 4261 scale(0x0); 4262 disp($reg); // Stack Offset 4263 %} 4264 %} 4265 operand stackSlotL(sRegL reg) %{ 4266 constraint(ALLOC_IN_RC(stack_slots)); 4267 op_cost(100); 4268 //match(RegL); 4269 format %{ "[$reg]" %} 4270 interface(MEMORY_INTER) %{ 4271 base(0xE); // R_SP 4272 index(0x0); 4273 scale(0x0); 4274 disp($reg); // Stack Offset 4275 %} 4276 %} 4277 4278 // Operands for expressing Control Flow 4279 // NOTE: Label is a predefined operand which should not be redefined in 4280 // the AD file. It is generically handled within the ADLC. 4281 4282 //----------Conditional Branch Operands---------------------------------------- 4283 // Comparison Op - This is the operation of the comparison, and is limited to 4284 // the following set of codes: 4285 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4286 // 4287 // Other attributes of the comparison, such as unsignedness, are specified 4288 // by the comparison instruction that sets a condition code flags register. 4289 // That result is represented by a flags operand whose subtype is appropriate 4290 // to the unsignedness (etc.) of the comparison. 4291 // 4292 // Later, the instruction which matches both the Comparison Op (a Bool) and 4293 // the flags (produced by the Cmp) specifies the coding of the comparison op 4294 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4295 4296 operand cmpOp() %{ 4297 match(Bool); 4298 4299 format %{ "" %} 4300 interface(COND_INTER) %{ 4301 equal(0x1); 4302 not_equal(0x9); 4303 less(0x3); 4304 greater_equal(0xB); 4305 less_equal(0x2); 4306 greater(0xA); 4307 %} 4308 %} 4309 4310 // Comparison Op, unsigned 4311 operand cmpOpU() %{ 4312 match(Bool); 4313 4314 format %{ "u" %} 4315 interface(COND_INTER) %{ 4316 equal(0x1); 4317 not_equal(0x9); 4318 less(0x5); 4319 greater_equal(0xD); 4320 less_equal(0x4); 4321 greater(0xC); 4322 %} 4323 %} 4324 4325 // Comparison Op, pointer (same as unsigned) 4326 operand cmpOpP() %{ 4327 match(Bool); 4328 4329 format %{ "p" %} 4330 interface(COND_INTER) %{ 4331 equal(0x1); 4332 not_equal(0x9); 4333 less(0x5); 4334 greater_equal(0xD); 4335 less_equal(0x4); 4336 greater(0xC); 4337 %} 4338 %} 4339 4340 // Comparison Op, branch-register encoding 4341 operand cmpOp_reg() %{ 4342 match(Bool); 4343 4344 format %{ "" %} 4345 interface(COND_INTER) %{ 4346 equal (0x1); 4347 not_equal (0x5); 4348 less (0x3); 4349 greater_equal(0x7); 4350 less_equal (0x2); 4351 greater (0x6); 4352 %} 4353 %} 4354 4355 // Comparison Code, floating, unordered same as less 4356 operand cmpOpF() %{ 4357 match(Bool); 4358 4359 format %{ "fl" %} 4360 interface(COND_INTER) %{ 4361 equal(0x9); 4362 not_equal(0x1); 4363 less(0x3); 4364 greater_equal(0xB); 4365 less_equal(0xE); 4366 greater(0x6); 4367 %} 4368 %} 4369 4370 // Used by long compare 4371 operand cmpOp_commute() %{ 4372 match(Bool); 4373 4374 format %{ "" %} 4375 interface(COND_INTER) %{ 4376 equal(0x1); 4377 not_equal(0x9); 4378 less(0xA); 4379 greater_equal(0x2); 4380 less_equal(0xB); 4381 greater(0x3); 4382 %} 4383 %} 4384 4385 //----------OPERAND CLASSES---------------------------------------------------- 4386 // Operand Classes are groups of operands that are used to simplify 4387 // instruction definitions by not requiring the AD writer to specify separate 4388 // instructions for every form of operand when the instruction accepts 4389 // multiple operand types with the same basic encoding and format. The classic 4390 // case of this is memory operands. 4391 opclass memory( indirect, indOffset13, indIndex ); 4392 opclass indIndexMemory( indIndex ); 4393 4394 //----------PIPELINE----------------------------------------------------------- 4395 pipeline %{ 4396 4397 //----------ATTRIBUTES--------------------------------------------------------- 4398 attributes %{ 4399 fixed_size_instructions; // Fixed size instructions 4400 branch_has_delay_slot; // Branch has delay slot following 4401 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle 4402 instruction_unit_size = 4; // An instruction is 4 bytes long 4403 instruction_fetch_unit_size = 16; // The processor fetches one line 4404 instruction_fetch_units = 1; // of 16 bytes 4405 4406 // List of nop instructions 4407 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR ); 4408 %} 4409 4410 //----------RESOURCES---------------------------------------------------------- 4411 // Resources are the functional units available to the machine 4412 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1); 4413 4414 //----------PIPELINE DESCRIPTION----------------------------------------------- 4415 // Pipeline Description specifies the stages in the machine's pipeline 4416 4417 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D); 4418 4419 //----------PIPELINE CLASSES--------------------------------------------------- 4420 // Pipeline Classes describe the stages in which input and output are 4421 // referenced by the hardware pipeline. 4422 4423 // Integer ALU reg-reg operation 4424 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4425 single_instruction; 4426 dst : E(write); 4427 src1 : R(read); 4428 src2 : R(read); 4429 IALU : R; 4430 %} 4431 4432 // Integer ALU reg-reg long operation 4433 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 4434 instruction_count(2); 4435 dst : E(write); 4436 src1 : R(read); 4437 src2 : R(read); 4438 IALU : R; 4439 IALU : R; 4440 %} 4441 4442 // Integer ALU reg-reg long dependent operation 4443 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{ 4444 instruction_count(1); multiple_bundles; 4445 dst : E(write); 4446 src1 : R(read); 4447 src2 : R(read); 4448 cr : E(write); 4449 IALU : R(2); 4450 %} 4451 4452 // Integer ALU reg-imm operaion 4453 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4454 single_instruction; 4455 dst : E(write); 4456 src1 : R(read); 4457 IALU : R; 4458 %} 4459 4460 // Integer ALU reg-reg operation with condition code 4461 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{ 4462 single_instruction; 4463 dst : E(write); 4464 cr : E(write); 4465 src1 : R(read); 4466 src2 : R(read); 4467 IALU : R; 4468 %} 4469 4470 // Integer ALU reg-imm operation with condition code 4471 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{ 4472 single_instruction; 4473 dst : E(write); 4474 cr : E(write); 4475 src1 : R(read); 4476 IALU : R; 4477 %} 4478 4479 // Integer ALU zero-reg operation 4480 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 4481 single_instruction; 4482 dst : E(write); 4483 src2 : R(read); 4484 IALU : R; 4485 %} 4486 4487 // Integer ALU zero-reg operation with condition code only 4488 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{ 4489 single_instruction; 4490 cr : E(write); 4491 src : R(read); 4492 IALU : R; 4493 %} 4494 4495 // Integer ALU reg-reg operation with condition code only 4496 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4497 single_instruction; 4498 cr : E(write); 4499 src1 : R(read); 4500 src2 : R(read); 4501 IALU : R; 4502 %} 4503 4504 // Integer ALU reg-imm operation with condition code only 4505 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4506 single_instruction; 4507 cr : E(write); 4508 src1 : R(read); 4509 IALU : R; 4510 %} 4511 4512 // Integer ALU reg-reg-zero operation with condition code only 4513 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{ 4514 single_instruction; 4515 cr : E(write); 4516 src1 : R(read); 4517 src2 : R(read); 4518 IALU : R; 4519 %} 4520 4521 // Integer ALU reg-imm-zero operation with condition code only 4522 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{ 4523 single_instruction; 4524 cr : E(write); 4525 src1 : R(read); 4526 IALU : R; 4527 %} 4528 4529 // Integer ALU reg-reg operation with condition code, src1 modified 4530 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4531 single_instruction; 4532 cr : E(write); 4533 src1 : E(write); 4534 src1 : R(read); 4535 src2 : R(read); 4536 IALU : R; 4537 %} 4538 4539 // Integer ALU reg-imm operation with condition code, src1 modified 4540 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4541 single_instruction; 4542 cr : E(write); 4543 src1 : E(write); 4544 src1 : R(read); 4545 IALU : R; 4546 %} 4547 4548 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{ 4549 multiple_bundles; 4550 dst : E(write)+4; 4551 cr : E(write); 4552 src1 : R(read); 4553 src2 : R(read); 4554 IALU : R(3); 4555 BR : R(2); 4556 %} 4557 4558 // Integer ALU operation 4559 pipe_class ialu_none(iRegI dst) %{ 4560 single_instruction; 4561 dst : E(write); 4562 IALU : R; 4563 %} 4564 4565 // Integer ALU reg operation 4566 pipe_class ialu_reg(iRegI dst, iRegI src) %{ 4567 single_instruction; may_have_no_code; 4568 dst : E(write); 4569 src : R(read); 4570 IALU : R; 4571 %} 4572 4573 // Integer ALU reg conditional operation 4574 // This instruction has a 1 cycle stall, and cannot execute 4575 // in the same cycle as the instruction setting the condition 4576 // code. We kludge this by pretending to read the condition code 4577 // 1 cycle earlier, and by marking the functional units as busy 4578 // for 2 cycles with the result available 1 cycle later than 4579 // is really the case. 4580 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{ 4581 single_instruction; 4582 op2_out : C(write); 4583 op1 : R(read); 4584 cr : R(read); // This is really E, with a 1 cycle stall 4585 BR : R(2); 4586 MS : R(2); 4587 %} 4588 4589 #ifdef _LP64 4590 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ 4591 instruction_count(1); multiple_bundles; 4592 dst : C(write)+1; 4593 src : R(read)+1; 4594 IALU : R(1); 4595 BR : E(2); 4596 MS : E(2); 4597 %} 4598 #endif 4599 4600 // Integer ALU reg operation 4601 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ 4602 single_instruction; may_have_no_code; 4603 dst : E(write); 4604 src : R(read); 4605 IALU : R; 4606 %} 4607 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{ 4608 single_instruction; may_have_no_code; 4609 dst : E(write); 4610 src : R(read); 4611 IALU : R; 4612 %} 4613 4614 // Two integer ALU reg operations 4615 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{ 4616 instruction_count(2); 4617 dst : E(write); 4618 src : R(read); 4619 A0 : R; 4620 A1 : R; 4621 %} 4622 4623 // Two integer ALU reg operations 4624 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{ 4625 instruction_count(2); may_have_no_code; 4626 dst : E(write); 4627 src : R(read); 4628 A0 : R; 4629 A1 : R; 4630 %} 4631 4632 // Integer ALU imm operation 4633 pipe_class ialu_imm(iRegI dst, immI13 src) %{ 4634 single_instruction; 4635 dst : E(write); 4636 IALU : R; 4637 %} 4638 4639 // Integer ALU reg-reg with carry operation 4640 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{ 4641 single_instruction; 4642 dst : E(write); 4643 src1 : R(read); 4644 src2 : R(read); 4645 IALU : R; 4646 %} 4647 4648 // Integer ALU cc operation 4649 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{ 4650 single_instruction; 4651 dst : E(write); 4652 cc : R(read); 4653 IALU : R; 4654 %} 4655 4656 // Integer ALU cc / second IALU operation 4657 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{ 4658 instruction_count(1); multiple_bundles; 4659 dst : E(write)+1; 4660 src : R(read); 4661 IALU : R; 4662 %} 4663 4664 // Integer ALU cc / second IALU operation 4665 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{ 4666 instruction_count(1); multiple_bundles; 4667 dst : E(write)+1; 4668 p : R(read); 4669 q : R(read); 4670 IALU : R; 4671 %} 4672 4673 // Integer ALU hi-lo-reg operation 4674 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{ 4675 instruction_count(1); multiple_bundles; 4676 dst : E(write)+1; 4677 IALU : R(2); 4678 %} 4679 4680 // Float ALU hi-lo-reg operation (with temp) 4681 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{ 4682 instruction_count(1); multiple_bundles; 4683 dst : E(write)+1; 4684 IALU : R(2); 4685 %} 4686 4687 // Long Constant 4688 pipe_class loadConL( iRegL dst, immL src ) %{ 4689 instruction_count(2); multiple_bundles; 4690 dst : E(write)+1; 4691 IALU : R(2); 4692 IALU : R(2); 4693 %} 4694 4695 // Pointer Constant 4696 pipe_class loadConP( iRegP dst, immP src ) %{ 4697 instruction_count(0); multiple_bundles; 4698 fixed_latency(6); 4699 %} 4700 4701 // Polling Address 4702 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ 4703 #ifdef _LP64 4704 instruction_count(0); multiple_bundles; 4705 fixed_latency(6); 4706 #else 4707 dst : E(write); 4708 IALU : R; 4709 #endif 4710 %} 4711 4712 // Long Constant small 4713 pipe_class loadConLlo( iRegL dst, immL src ) %{ 4714 instruction_count(2); 4715 dst : E(write); 4716 IALU : R; 4717 IALU : R; 4718 %} 4719 4720 // [PHH] This is wrong for 64-bit. See LdImmF/D. 4721 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{ 4722 instruction_count(1); multiple_bundles; 4723 src : R(read); 4724 dst : M(write)+1; 4725 IALU : R; 4726 MS : E; 4727 %} 4728 4729 // Integer ALU nop operation 4730 pipe_class ialu_nop() %{ 4731 single_instruction; 4732 IALU : R; 4733 %} 4734 4735 // Integer ALU nop operation 4736 pipe_class ialu_nop_A0() %{ 4737 single_instruction; 4738 A0 : R; 4739 %} 4740 4741 // Integer ALU nop operation 4742 pipe_class ialu_nop_A1() %{ 4743 single_instruction; 4744 A1 : R; 4745 %} 4746 4747 // Integer Multiply reg-reg operation 4748 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4749 single_instruction; 4750 dst : E(write); 4751 src1 : R(read); 4752 src2 : R(read); 4753 MS : R(5); 4754 %} 4755 4756 // Integer Multiply reg-imm operation 4757 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4758 single_instruction; 4759 dst : E(write); 4760 src1 : R(read); 4761 MS : R(5); 4762 %} 4763 4764 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4765 single_instruction; 4766 dst : E(write)+4; 4767 src1 : R(read); 4768 src2 : R(read); 4769 MS : R(6); 4770 %} 4771 4772 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4773 single_instruction; 4774 dst : E(write)+4; 4775 src1 : R(read); 4776 MS : R(6); 4777 %} 4778 4779 // Integer Divide reg-reg 4780 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{ 4781 instruction_count(1); multiple_bundles; 4782 dst : E(write); 4783 temp : E(write); 4784 src1 : R(read); 4785 src2 : R(read); 4786 temp : R(read); 4787 MS : R(38); 4788 %} 4789 4790 // Integer Divide reg-imm 4791 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{ 4792 instruction_count(1); multiple_bundles; 4793 dst : E(write); 4794 temp : E(write); 4795 src1 : R(read); 4796 temp : R(read); 4797 MS : R(38); 4798 %} 4799 4800 // Long Divide 4801 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4802 dst : E(write)+71; 4803 src1 : R(read); 4804 src2 : R(read)+1; 4805 MS : R(70); 4806 %} 4807 4808 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4809 dst : E(write)+71; 4810 src1 : R(read); 4811 MS : R(70); 4812 %} 4813 4814 // Floating Point Add Float 4815 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{ 4816 single_instruction; 4817 dst : X(write); 4818 src1 : E(read); 4819 src2 : E(read); 4820 FA : R; 4821 %} 4822 4823 // Floating Point Add Double 4824 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{ 4825 single_instruction; 4826 dst : X(write); 4827 src1 : E(read); 4828 src2 : E(read); 4829 FA : R; 4830 %} 4831 4832 // Floating Point Conditional Move based on integer flags 4833 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{ 4834 single_instruction; 4835 dst : X(write); 4836 src : E(read); 4837 cr : R(read); 4838 FA : R(2); 4839 BR : R(2); 4840 %} 4841 4842 // Floating Point Conditional Move based on integer flags 4843 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{ 4844 single_instruction; 4845 dst : X(write); 4846 src : E(read); 4847 cr : R(read); 4848 FA : R(2); 4849 BR : R(2); 4850 %} 4851 4852 // Floating Point Multiply Float 4853 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{ 4854 single_instruction; 4855 dst : X(write); 4856 src1 : E(read); 4857 src2 : E(read); 4858 FM : R; 4859 %} 4860 4861 // Floating Point Multiply Double 4862 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{ 4863 single_instruction; 4864 dst : X(write); 4865 src1 : E(read); 4866 src2 : E(read); 4867 FM : R; 4868 %} 4869 4870 // Floating Point Divide Float 4871 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{ 4872 single_instruction; 4873 dst : X(write); 4874 src1 : E(read); 4875 src2 : E(read); 4876 FM : R; 4877 FDIV : C(14); 4878 %} 4879 4880 // Floating Point Divide Double 4881 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{ 4882 single_instruction; 4883 dst : X(write); 4884 src1 : E(read); 4885 src2 : E(read); 4886 FM : R; 4887 FDIV : C(17); 4888 %} 4889 4890 // Floating Point Move/Negate/Abs Float 4891 pipe_class faddF_reg(regF dst, regF src) %{ 4892 single_instruction; 4893 dst : W(write); 4894 src : E(read); 4895 FA : R(1); 4896 %} 4897 4898 // Floating Point Move/Negate/Abs Double 4899 pipe_class faddD_reg(regD dst, regD src) %{ 4900 single_instruction; 4901 dst : W(write); 4902 src : E(read); 4903 FA : R; 4904 %} 4905 4906 // Floating Point Convert F->D 4907 pipe_class fcvtF2D(regD dst, regF src) %{ 4908 single_instruction; 4909 dst : X(write); 4910 src : E(read); 4911 FA : R; 4912 %} 4913 4914 // Floating Point Convert I->D 4915 pipe_class fcvtI2D(regD dst, regF src) %{ 4916 single_instruction; 4917 dst : X(write); 4918 src : E(read); 4919 FA : R; 4920 %} 4921 4922 // Floating Point Convert LHi->D 4923 pipe_class fcvtLHi2D(regD dst, regD src) %{ 4924 single_instruction; 4925 dst : X(write); 4926 src : E(read); 4927 FA : R; 4928 %} 4929 4930 // Floating Point Convert L->D 4931 pipe_class fcvtL2D(regD dst, regF src) %{ 4932 single_instruction; 4933 dst : X(write); 4934 src : E(read); 4935 FA : R; 4936 %} 4937 4938 // Floating Point Convert L->F 4939 pipe_class fcvtL2F(regD dst, regF src) %{ 4940 single_instruction; 4941 dst : X(write); 4942 src : E(read); 4943 FA : R; 4944 %} 4945 4946 // Floating Point Convert D->F 4947 pipe_class fcvtD2F(regD dst, regF src) %{ 4948 single_instruction; 4949 dst : X(write); 4950 src : E(read); 4951 FA : R; 4952 %} 4953 4954 // Floating Point Convert I->L 4955 pipe_class fcvtI2L(regD dst, regF src) %{ 4956 single_instruction; 4957 dst : X(write); 4958 src : E(read); 4959 FA : R; 4960 %} 4961 4962 // Floating Point Convert D->F 4963 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{ 4964 instruction_count(1); multiple_bundles; 4965 dst : X(write)+6; 4966 src : E(read); 4967 FA : R; 4968 %} 4969 4970 // Floating Point Convert D->L 4971 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{ 4972 instruction_count(1); multiple_bundles; 4973 dst : X(write)+6; 4974 src : E(read); 4975 FA : R; 4976 %} 4977 4978 // Floating Point Convert F->I 4979 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{ 4980 instruction_count(1); multiple_bundles; 4981 dst : X(write)+6; 4982 src : E(read); 4983 FA : R; 4984 %} 4985 4986 // Floating Point Convert F->L 4987 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{ 4988 instruction_count(1); multiple_bundles; 4989 dst : X(write)+6; 4990 src : E(read); 4991 FA : R; 4992 %} 4993 4994 // Floating Point Convert I->F 4995 pipe_class fcvtI2F(regF dst, regF src) %{ 4996 single_instruction; 4997 dst : X(write); 4998 src : E(read); 4999 FA : R; 5000 %} 5001 5002 // Floating Point Compare 5003 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{ 5004 single_instruction; 5005 cr : X(write); 5006 src1 : E(read); 5007 src2 : E(read); 5008 FA : R; 5009 %} 5010 5011 // Floating Point Compare 5012 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{ 5013 single_instruction; 5014 cr : X(write); 5015 src1 : E(read); 5016 src2 : E(read); 5017 FA : R; 5018 %} 5019 5020 // Floating Add Nop 5021 pipe_class fadd_nop() %{ 5022 single_instruction; 5023 FA : R; 5024 %} 5025 5026 // Integer Store to Memory 5027 pipe_class istore_mem_reg(memory mem, iRegI src) %{ 5028 single_instruction; 5029 mem : R(read); 5030 src : C(read); 5031 MS : R; 5032 %} 5033 5034 // Integer Store to Memory 5035 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{ 5036 single_instruction; 5037 mem : R(read); 5038 src : C(read); 5039 MS : R; 5040 %} 5041 5042 // Integer Store Zero to Memory 5043 pipe_class istore_mem_zero(memory mem, immI0 src) %{ 5044 single_instruction; 5045 mem : R(read); 5046 MS : R; 5047 %} 5048 5049 // Special Stack Slot Store 5050 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{ 5051 single_instruction; 5052 stkSlot : R(read); 5053 src : C(read); 5054 MS : R; 5055 %} 5056 5057 // Special Stack Slot Store 5058 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{ 5059 instruction_count(2); multiple_bundles; 5060 stkSlot : R(read); 5061 src : C(read); 5062 MS : R(2); 5063 %} 5064 5065 // Float Store 5066 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{ 5067 single_instruction; 5068 mem : R(read); 5069 src : C(read); 5070 MS : R; 5071 %} 5072 5073 // Float Store 5074 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{ 5075 single_instruction; 5076 mem : R(read); 5077 MS : R; 5078 %} 5079 5080 // Double Store 5081 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{ 5082 instruction_count(1); 5083 mem : R(read); 5084 src : C(read); 5085 MS : R; 5086 %} 5087 5088 // Double Store 5089 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{ 5090 single_instruction; 5091 mem : R(read); 5092 MS : R; 5093 %} 5094 5095 // Special Stack Slot Float Store 5096 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{ 5097 single_instruction; 5098 stkSlot : R(read); 5099 src : C(read); 5100 MS : R; 5101 %} 5102 5103 // Special Stack Slot Double Store 5104 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{ 5105 single_instruction; 5106 stkSlot : R(read); 5107 src : C(read); 5108 MS : R; 5109 %} 5110 5111 // Integer Load (when sign bit propagation not needed) 5112 pipe_class iload_mem(iRegI dst, memory mem) %{ 5113 single_instruction; 5114 mem : R(read); 5115 dst : C(write); 5116 MS : R; 5117 %} 5118 5119 // Integer Load from stack operand 5120 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{ 5121 single_instruction; 5122 mem : R(read); 5123 dst : C(write); 5124 MS : R; 5125 %} 5126 5127 // Integer Load (when sign bit propagation or masking is needed) 5128 pipe_class iload_mask_mem(iRegI dst, memory mem) %{ 5129 single_instruction; 5130 mem : R(read); 5131 dst : M(write); 5132 MS : R; 5133 %} 5134 5135 // Float Load 5136 pipe_class floadF_mem(regF dst, memory mem) %{ 5137 single_instruction; 5138 mem : R(read); 5139 dst : M(write); 5140 MS : R; 5141 %} 5142 5143 // Float Load 5144 pipe_class floadD_mem(regD dst, memory mem) %{ 5145 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case 5146 mem : R(read); 5147 dst : M(write); 5148 MS : R; 5149 %} 5150 5151 // Float Load 5152 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{ 5153 single_instruction; 5154 stkSlot : R(read); 5155 dst : M(write); 5156 MS : R; 5157 %} 5158 5159 // Float Load 5160 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{ 5161 single_instruction; 5162 stkSlot : R(read); 5163 dst : M(write); 5164 MS : R; 5165 %} 5166 5167 // Memory Nop 5168 pipe_class mem_nop() %{ 5169 single_instruction; 5170 MS : R; 5171 %} 5172 5173 pipe_class sethi(iRegP dst, immI src) %{ 5174 single_instruction; 5175 dst : E(write); 5176 IALU : R; 5177 %} 5178 5179 pipe_class loadPollP(iRegP poll) %{ 5180 single_instruction; 5181 poll : R(read); 5182 MS : R; 5183 %} 5184 5185 pipe_class br(Universe br, label labl) %{ 5186 single_instruction_with_delay_slot; 5187 BR : R; 5188 %} 5189 5190 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{ 5191 single_instruction_with_delay_slot; 5192 cr : E(read); 5193 BR : R; 5194 %} 5195 5196 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{ 5197 single_instruction_with_delay_slot; 5198 op1 : E(read); 5199 BR : R; 5200 MS : R; 5201 %} 5202 5203 // Compare and branch 5204 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{ 5205 instruction_count(2); has_delay_slot; 5206 cr : E(write); 5207 src1 : R(read); 5208 src2 : R(read); 5209 IALU : R; 5210 BR : R; 5211 %} 5212 5213 // Compare and branch 5214 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{ 5215 instruction_count(2); has_delay_slot; 5216 cr : E(write); 5217 src1 : R(read); 5218 IALU : R; 5219 BR : R; 5220 %} 5221 5222 // Compare and branch using cbcond 5223 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{ 5224 single_instruction; 5225 src1 : E(read); 5226 src2 : E(read); 5227 IALU : R; 5228 BR : R; 5229 %} 5230 5231 // Compare and branch using cbcond 5232 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{ 5233 single_instruction; 5234 src1 : E(read); 5235 IALU : R; 5236 BR : R; 5237 %} 5238 5239 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{ 5240 single_instruction_with_delay_slot; 5241 cr : E(read); 5242 BR : R; 5243 %} 5244 5245 pipe_class br_nop() %{ 5246 single_instruction; 5247 BR : R; 5248 %} 5249 5250 pipe_class simple_call(method meth) %{ 5251 instruction_count(2); multiple_bundles; force_serialization; 5252 fixed_latency(100); 5253 BR : R(1); 5254 MS : R(1); 5255 A0 : R(1); 5256 %} 5257 5258 pipe_class compiled_call(method meth) %{ 5259 instruction_count(1); multiple_bundles; force_serialization; 5260 fixed_latency(100); 5261 MS : R(1); 5262 %} 5263 5264 pipe_class call(method meth) %{ 5265 instruction_count(0); multiple_bundles; force_serialization; 5266 fixed_latency(100); 5267 %} 5268 5269 pipe_class tail_call(Universe ignore, label labl) %{ 5270 single_instruction; has_delay_slot; 5271 fixed_latency(100); 5272 BR : R(1); 5273 MS : R(1); 5274 %} 5275 5276 pipe_class ret(Universe ignore) %{ 5277 single_instruction; has_delay_slot; 5278 BR : R(1); 5279 MS : R(1); 5280 %} 5281 5282 pipe_class ret_poll(g3RegP poll) %{ 5283 instruction_count(3); has_delay_slot; 5284 poll : E(read); 5285 MS : R; 5286 %} 5287 5288 // The real do-nothing guy 5289 pipe_class empty( ) %{ 5290 instruction_count(0); 5291 %} 5292 5293 pipe_class long_memory_op() %{ 5294 instruction_count(0); multiple_bundles; force_serialization; 5295 fixed_latency(25); 5296 MS : R(1); 5297 %} 5298 5299 // Check-cast 5300 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{ 5301 array : R(read); 5302 match : R(read); 5303 IALU : R(2); 5304 BR : R(2); 5305 MS : R; 5306 %} 5307 5308 // Convert FPU flags into +1,0,-1 5309 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{ 5310 src1 : E(read); 5311 src2 : E(read); 5312 dst : E(write); 5313 FA : R; 5314 MS : R(2); 5315 BR : R(2); 5316 %} 5317 5318 // Compare for p < q, and conditionally add y 5319 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{ 5320 p : E(read); 5321 q : E(read); 5322 y : E(read); 5323 IALU : R(3) 5324 %} 5325 5326 // Perform a compare, then move conditionally in a branch delay slot. 5327 pipe_class min_max( iRegI src2, iRegI srcdst ) %{ 5328 src2 : E(read); 5329 srcdst : E(read); 5330 IALU : R; 5331 BR : R; 5332 %} 5333 5334 // Define the class for the Nop node 5335 define %{ 5336 MachNop = ialu_nop; 5337 %} 5338 5339 %} 5340 5341 //----------INSTRUCTIONS------------------------------------------------------- 5342 5343 //------------Special Stack Slot instructions - no match rules----------------- 5344 instruct stkI_to_regF(regF dst, stackSlotI src) %{ 5345 // No match rule to avoid chain rule match. 5346 effect(DEF dst, USE src); 5347 ins_cost(MEMORY_REF_COST); 5348 size(4); 5349 format %{ "LDF $src,$dst\t! stkI to regF" %} 5350 opcode(Assembler::ldf_op3); 5351 ins_encode(simple_form3_mem_reg(src, dst)); 5352 ins_pipe(floadF_stk); 5353 %} 5354 5355 instruct stkL_to_regD(regD dst, stackSlotL src) %{ 5356 // No match rule to avoid chain rule match. 5357 effect(DEF dst, USE src); 5358 ins_cost(MEMORY_REF_COST); 5359 size(4); 5360 format %{ "LDDF $src,$dst\t! stkL to regD" %} 5361 opcode(Assembler::lddf_op3); 5362 ins_encode(simple_form3_mem_reg(src, dst)); 5363 ins_pipe(floadD_stk); 5364 %} 5365 5366 instruct regF_to_stkI(stackSlotI dst, regF src) %{ 5367 // No match rule to avoid chain rule match. 5368 effect(DEF dst, USE src); 5369 ins_cost(MEMORY_REF_COST); 5370 size(4); 5371 format %{ "STF $src,$dst\t! regF to stkI" %} 5372 opcode(Assembler::stf_op3); 5373 ins_encode(simple_form3_mem_reg(dst, src)); 5374 ins_pipe(fstoreF_stk_reg); 5375 %} 5376 5377 instruct regD_to_stkL(stackSlotL dst, regD src) %{ 5378 // No match rule to avoid chain rule match. 5379 effect(DEF dst, USE src); 5380 ins_cost(MEMORY_REF_COST); 5381 size(4); 5382 format %{ "STDF $src,$dst\t! regD to stkL" %} 5383 opcode(Assembler::stdf_op3); 5384 ins_encode(simple_form3_mem_reg(dst, src)); 5385 ins_pipe(fstoreD_stk_reg); 5386 %} 5387 5388 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{ 5389 effect(DEF dst, USE src); 5390 ins_cost(MEMORY_REF_COST*2); 5391 size(8); 5392 format %{ "STW $src,$dst.hi\t! long\n\t" 5393 "STW R_G0,$dst.lo" %} 5394 opcode(Assembler::stw_op3); 5395 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); 5396 ins_pipe(lstoreI_stk_reg); 5397 %} 5398 5399 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{ 5400 // No match rule to avoid chain rule match. 5401 effect(DEF dst, USE src); 5402 ins_cost(MEMORY_REF_COST); 5403 size(4); 5404 format %{ "STX $src,$dst\t! regL to stkD" %} 5405 opcode(Assembler::stx_op3); 5406 ins_encode(simple_form3_mem_reg( dst, src ) ); 5407 ins_pipe(istore_stk_reg); 5408 %} 5409 5410 //---------- Chain stack slots between similar types -------- 5411 5412 // Load integer from stack slot 5413 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{ 5414 match(Set dst src); 5415 ins_cost(MEMORY_REF_COST); 5416 5417 size(4); 5418 format %{ "LDUW $src,$dst\t!stk" %} 5419 opcode(Assembler::lduw_op3); 5420 ins_encode(simple_form3_mem_reg( src, dst ) ); 5421 ins_pipe(iload_mem); 5422 %} 5423 5424 // Store integer to stack slot 5425 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{ 5426 match(Set dst src); 5427 ins_cost(MEMORY_REF_COST); 5428 5429 size(4); 5430 format %{ "STW $src,$dst\t!stk" %} 5431 opcode(Assembler::stw_op3); 5432 ins_encode(simple_form3_mem_reg( dst, src ) ); 5433 ins_pipe(istore_mem_reg); 5434 %} 5435 5436 // Load long from stack slot 5437 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{ 5438 match(Set dst src); 5439 5440 ins_cost(MEMORY_REF_COST); 5441 size(4); 5442 format %{ "LDX $src,$dst\t! long" %} 5443 opcode(Assembler::ldx_op3); 5444 ins_encode(simple_form3_mem_reg( src, dst ) ); 5445 ins_pipe(iload_mem); 5446 %} 5447 5448 // Store long to stack slot 5449 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{ 5450 match(Set dst src); 5451 5452 ins_cost(MEMORY_REF_COST); 5453 size(4); 5454 format %{ "STX $src,$dst\t! long" %} 5455 opcode(Assembler::stx_op3); 5456 ins_encode(simple_form3_mem_reg( dst, src ) ); 5457 ins_pipe(istore_mem_reg); 5458 %} 5459 5460 #ifdef _LP64 5461 // Load pointer from stack slot, 64-bit encoding 5462 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5463 match(Set dst src); 5464 ins_cost(MEMORY_REF_COST); 5465 size(4); 5466 format %{ "LDX $src,$dst\t!ptr" %} 5467 opcode(Assembler::ldx_op3); 5468 ins_encode(simple_form3_mem_reg( src, dst ) ); 5469 ins_pipe(iload_mem); 5470 %} 5471 5472 // Store pointer to stack slot 5473 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5474 match(Set dst src); 5475 ins_cost(MEMORY_REF_COST); 5476 size(4); 5477 format %{ "STX $src,$dst\t!ptr" %} 5478 opcode(Assembler::stx_op3); 5479 ins_encode(simple_form3_mem_reg( dst, src ) ); 5480 ins_pipe(istore_mem_reg); 5481 %} 5482 #else // _LP64 5483 // Load pointer from stack slot, 32-bit encoding 5484 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5485 match(Set dst src); 5486 ins_cost(MEMORY_REF_COST); 5487 format %{ "LDUW $src,$dst\t!ptr" %} 5488 opcode(Assembler::lduw_op3, Assembler::ldst_op); 5489 ins_encode(simple_form3_mem_reg( src, dst ) ); 5490 ins_pipe(iload_mem); 5491 %} 5492 5493 // Store pointer to stack slot 5494 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5495 match(Set dst src); 5496 ins_cost(MEMORY_REF_COST); 5497 format %{ "STW $src,$dst\t!ptr" %} 5498 opcode(Assembler::stw_op3, Assembler::ldst_op); 5499 ins_encode(simple_form3_mem_reg( dst, src ) ); 5500 ins_pipe(istore_mem_reg); 5501 %} 5502 #endif // _LP64 5503 5504 //------------Special Nop instructions for bundling - no match rules----------- 5505 // Nop using the A0 functional unit 5506 instruct Nop_A0() %{ 5507 ins_cost(0); 5508 5509 format %{ "NOP ! Alu Pipeline" %} 5510 opcode(Assembler::or_op3, Assembler::arith_op); 5511 ins_encode( form2_nop() ); 5512 ins_pipe(ialu_nop_A0); 5513 %} 5514 5515 // Nop using the A1 functional unit 5516 instruct Nop_A1( ) %{ 5517 ins_cost(0); 5518 5519 format %{ "NOP ! Alu Pipeline" %} 5520 opcode(Assembler::or_op3, Assembler::arith_op); 5521 ins_encode( form2_nop() ); 5522 ins_pipe(ialu_nop_A1); 5523 %} 5524 5525 // Nop using the memory functional unit 5526 instruct Nop_MS( ) %{ 5527 ins_cost(0); 5528 5529 format %{ "NOP ! Memory Pipeline" %} 5530 ins_encode( emit_mem_nop ); 5531 ins_pipe(mem_nop); 5532 %} 5533 5534 // Nop using the floating add functional unit 5535 instruct Nop_FA( ) %{ 5536 ins_cost(0); 5537 5538 format %{ "NOP ! Floating Add Pipeline" %} 5539 ins_encode( emit_fadd_nop ); 5540 ins_pipe(fadd_nop); 5541 %} 5542 5543 // Nop using the branch functional unit 5544 instruct Nop_BR( ) %{ 5545 ins_cost(0); 5546 5547 format %{ "NOP ! Branch Pipeline" %} 5548 ins_encode( emit_br_nop ); 5549 ins_pipe(br_nop); 5550 %} 5551 5552 //----------Load/Store/Move Instructions--------------------------------------- 5553 //----------Load Instructions-------------------------------------------------- 5554 // Load Byte (8bit signed) 5555 instruct loadB(iRegI dst, memory mem) %{ 5556 match(Set dst (LoadB mem)); 5557 ins_cost(MEMORY_REF_COST); 5558 5559 size(4); 5560 format %{ "LDSB $mem,$dst\t! byte" %} 5561 ins_encode %{ 5562 __ ldsb($mem$$Address, $dst$$Register); 5563 %} 5564 ins_pipe(iload_mask_mem); 5565 %} 5566 5567 // Load Byte (8bit signed) into a Long Register 5568 instruct loadB2L(iRegL dst, memory mem) %{ 5569 match(Set dst (ConvI2L (LoadB mem))); 5570 ins_cost(MEMORY_REF_COST); 5571 5572 size(4); 5573 format %{ "LDSB $mem,$dst\t! byte -> long" %} 5574 ins_encode %{ 5575 __ ldsb($mem$$Address, $dst$$Register); 5576 %} 5577 ins_pipe(iload_mask_mem); 5578 %} 5579 5580 // Load Unsigned Byte (8bit UNsigned) into an int reg 5581 instruct loadUB(iRegI dst, memory mem) %{ 5582 match(Set dst (LoadUB mem)); 5583 ins_cost(MEMORY_REF_COST); 5584 5585 size(4); 5586 format %{ "LDUB $mem,$dst\t! ubyte" %} 5587 ins_encode %{ 5588 __ ldub($mem$$Address, $dst$$Register); 5589 %} 5590 ins_pipe(iload_mem); 5591 %} 5592 5593 // Load Unsigned Byte (8bit UNsigned) into a Long Register 5594 instruct loadUB2L(iRegL dst, memory mem) %{ 5595 match(Set dst (ConvI2L (LoadUB mem))); 5596 ins_cost(MEMORY_REF_COST); 5597 5598 size(4); 5599 format %{ "LDUB $mem,$dst\t! ubyte -> long" %} 5600 ins_encode %{ 5601 __ ldub($mem$$Address, $dst$$Register); 5602 %} 5603 ins_pipe(iload_mem); 5604 %} 5605 5606 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register 5607 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{ 5608 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5609 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5610 5611 size(2*4); 5612 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t" 5613 "AND $dst,$mask,$dst" %} 5614 ins_encode %{ 5615 __ ldub($mem$$Address, $dst$$Register); 5616 __ and3($dst$$Register, $mask$$constant, $dst$$Register); 5617 %} 5618 ins_pipe(iload_mem); 5619 %} 5620 5621 // Load Short (16bit signed) 5622 instruct loadS(iRegI dst, memory mem) %{ 5623 match(Set dst (LoadS mem)); 5624 ins_cost(MEMORY_REF_COST); 5625 5626 size(4); 5627 format %{ "LDSH $mem,$dst\t! short" %} 5628 ins_encode %{ 5629 __ ldsh($mem$$Address, $dst$$Register); 5630 %} 5631 ins_pipe(iload_mask_mem); 5632 %} 5633 5634 // Load Short (16 bit signed) to Byte (8 bit signed) 5635 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5636 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5637 ins_cost(MEMORY_REF_COST); 5638 5639 size(4); 5640 5641 format %{ "LDSB $mem+1,$dst\t! short -> byte" %} 5642 ins_encode %{ 5643 __ ldsb($mem$$Address, $dst$$Register, 1); 5644 %} 5645 ins_pipe(iload_mask_mem); 5646 %} 5647 5648 // Load Short (16bit signed) into a Long Register 5649 instruct loadS2L(iRegL dst, memory mem) %{ 5650 match(Set dst (ConvI2L (LoadS mem))); 5651 ins_cost(MEMORY_REF_COST); 5652 5653 size(4); 5654 format %{ "LDSH $mem,$dst\t! short -> long" %} 5655 ins_encode %{ 5656 __ ldsh($mem$$Address, $dst$$Register); 5657 %} 5658 ins_pipe(iload_mask_mem); 5659 %} 5660 5661 // Load Unsigned Short/Char (16bit UNsigned) 5662 instruct loadUS(iRegI dst, memory mem) %{ 5663 match(Set dst (LoadUS mem)); 5664 ins_cost(MEMORY_REF_COST); 5665 5666 size(4); 5667 format %{ "LDUH $mem,$dst\t! ushort/char" %} 5668 ins_encode %{ 5669 __ lduh($mem$$Address, $dst$$Register); 5670 %} 5671 ins_pipe(iload_mem); 5672 %} 5673 5674 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5675 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5676 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5677 ins_cost(MEMORY_REF_COST); 5678 5679 size(4); 5680 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %} 5681 ins_encode %{ 5682 __ ldsb($mem$$Address, $dst$$Register, 1); 5683 %} 5684 ins_pipe(iload_mask_mem); 5685 %} 5686 5687 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register 5688 instruct loadUS2L(iRegL dst, memory mem) %{ 5689 match(Set dst (ConvI2L (LoadUS mem))); 5690 ins_cost(MEMORY_REF_COST); 5691 5692 size(4); 5693 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} 5694 ins_encode %{ 5695 __ lduh($mem$$Address, $dst$$Register); 5696 %} 5697 ins_pipe(iload_mem); 5698 %} 5699 5700 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register 5701 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5702 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5703 ins_cost(MEMORY_REF_COST); 5704 5705 size(4); 5706 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %} 5707 ins_encode %{ 5708 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE 5709 %} 5710 ins_pipe(iload_mem); 5711 %} 5712 5713 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register 5714 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5715 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5716 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5717 5718 size(2*4); 5719 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t" 5720 "AND $dst,$mask,$dst" %} 5721 ins_encode %{ 5722 Register Rdst = $dst$$Register; 5723 __ lduh($mem$$Address, Rdst); 5724 __ and3(Rdst, $mask$$constant, Rdst); 5725 %} 5726 ins_pipe(iload_mem); 5727 %} 5728 5729 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register 5730 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{ 5731 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5732 effect(TEMP dst, TEMP tmp); 5733 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5734 5735 size((3+1)*4); // set may use two instructions. 5736 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t" 5737 "SET $mask,$tmp\n\t" 5738 "AND $dst,$tmp,$dst" %} 5739 ins_encode %{ 5740 Register Rdst = $dst$$Register; 5741 Register Rtmp = $tmp$$Register; 5742 __ lduh($mem$$Address, Rdst); 5743 __ set($mask$$constant, Rtmp); 5744 __ and3(Rdst, Rtmp, Rdst); 5745 %} 5746 ins_pipe(iload_mem); 5747 %} 5748 5749 // Load Integer 5750 instruct loadI(iRegI dst, memory mem) %{ 5751 match(Set dst (LoadI mem)); 5752 ins_cost(MEMORY_REF_COST); 5753 5754 size(4); 5755 format %{ "LDUW $mem,$dst\t! int" %} 5756 ins_encode %{ 5757 __ lduw($mem$$Address, $dst$$Register); 5758 %} 5759 ins_pipe(iload_mem); 5760 %} 5761 5762 // Load Integer to Byte (8 bit signed) 5763 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5764 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5765 ins_cost(MEMORY_REF_COST); 5766 5767 size(4); 5768 5769 format %{ "LDSB $mem+3,$dst\t! int -> byte" %} 5770 ins_encode %{ 5771 __ ldsb($mem$$Address, $dst$$Register, 3); 5772 %} 5773 ins_pipe(iload_mask_mem); 5774 %} 5775 5776 // Load Integer to Unsigned Byte (8 bit UNsigned) 5777 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{ 5778 match(Set dst (AndI (LoadI mem) mask)); 5779 ins_cost(MEMORY_REF_COST); 5780 5781 size(4); 5782 5783 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %} 5784 ins_encode %{ 5785 __ ldub($mem$$Address, $dst$$Register, 3); 5786 %} 5787 ins_pipe(iload_mask_mem); 5788 %} 5789 5790 // Load Integer to Short (16 bit signed) 5791 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{ 5792 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5793 ins_cost(MEMORY_REF_COST); 5794 5795 size(4); 5796 5797 format %{ "LDSH $mem+2,$dst\t! int -> short" %} 5798 ins_encode %{ 5799 __ ldsh($mem$$Address, $dst$$Register, 2); 5800 %} 5801 ins_pipe(iload_mask_mem); 5802 %} 5803 5804 // Load Integer to Unsigned Short (16 bit UNsigned) 5805 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{ 5806 match(Set dst (AndI (LoadI mem) mask)); 5807 ins_cost(MEMORY_REF_COST); 5808 5809 size(4); 5810 5811 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %} 5812 ins_encode %{ 5813 __ lduh($mem$$Address, $dst$$Register, 2); 5814 %} 5815 ins_pipe(iload_mask_mem); 5816 %} 5817 5818 // Load Integer into a Long Register 5819 instruct loadI2L(iRegL dst, memory mem) %{ 5820 match(Set dst (ConvI2L (LoadI mem))); 5821 ins_cost(MEMORY_REF_COST); 5822 5823 size(4); 5824 format %{ "LDSW $mem,$dst\t! int -> long" %} 5825 ins_encode %{ 5826 __ ldsw($mem$$Address, $dst$$Register); 5827 %} 5828 ins_pipe(iload_mask_mem); 5829 %} 5830 5831 // Load Integer with mask 0xFF into a Long Register 5832 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5833 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5834 ins_cost(MEMORY_REF_COST); 5835 5836 size(4); 5837 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %} 5838 ins_encode %{ 5839 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE 5840 %} 5841 ins_pipe(iload_mem); 5842 %} 5843 5844 // Load Integer with mask 0xFFFF into a Long Register 5845 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{ 5846 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5847 ins_cost(MEMORY_REF_COST); 5848 5849 size(4); 5850 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %} 5851 ins_encode %{ 5852 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE 5853 %} 5854 ins_pipe(iload_mem); 5855 %} 5856 5857 // Load Integer with a 13-bit mask into a Long Register 5858 instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5859 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5860 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5861 5862 size(2*4); 5863 format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t" 5864 "AND $dst,$mask,$dst" %} 5865 ins_encode %{ 5866 Register Rdst = $dst$$Register; 5867 __ lduw($mem$$Address, Rdst); 5868 __ and3(Rdst, $mask$$constant, Rdst); 5869 %} 5870 ins_pipe(iload_mem); 5871 %} 5872 5873 // Load Integer with a 32-bit mask into a Long Register 5874 instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ 5875 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5876 effect(TEMP dst, TEMP tmp); 5877 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5878 5879 size((3+1)*4); // set may use two instructions. 5880 format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t" 5881 "SET $mask,$tmp\n\t" 5882 "AND $dst,$tmp,$dst" %} 5883 ins_encode %{ 5884 Register Rdst = $dst$$Register; 5885 Register Rtmp = $tmp$$Register; 5886 __ lduw($mem$$Address, Rdst); 5887 __ set($mask$$constant, Rtmp); 5888 __ and3(Rdst, Rtmp, Rdst); 5889 %} 5890 ins_pipe(iload_mem); 5891 %} 5892 5893 // Load Unsigned Integer into a Long Register 5894 instruct loadUI2L(iRegL dst, memory mem) %{ 5895 match(Set dst (LoadUI2L mem)); 5896 ins_cost(MEMORY_REF_COST); 5897 5898 size(4); 5899 format %{ "LDUW $mem,$dst\t! uint -> long" %} 5900 ins_encode %{ 5901 __ lduw($mem$$Address, $dst$$Register); 5902 %} 5903 ins_pipe(iload_mem); 5904 %} 5905 5906 // Load Long - aligned 5907 instruct loadL(iRegL dst, memory mem ) %{ 5908 match(Set dst (LoadL mem)); 5909 ins_cost(MEMORY_REF_COST); 5910 5911 size(4); 5912 format %{ "LDX $mem,$dst\t! long" %} 5913 ins_encode %{ 5914 __ ldx($mem$$Address, $dst$$Register); 5915 %} 5916 ins_pipe(iload_mem); 5917 %} 5918 5919 // Load Long - UNaligned 5920 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{ 5921 match(Set dst (LoadL_unaligned mem)); 5922 effect(KILL tmp); 5923 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5924 size(16); 5925 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n" 5926 "\tLDUW $mem ,$dst\n" 5927 "\tSLLX #32, $dst, $dst\n" 5928 "\tOR $dst, R_O7, $dst" %} 5929 opcode(Assembler::lduw_op3); 5930 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); 5931 ins_pipe(iload_mem); 5932 %} 5933 5934 // Load Aligned Packed Byte into a Double Register 5935 instruct loadA8B(regD dst, memory mem) %{ 5936 match(Set dst (Load8B mem)); 5937 ins_cost(MEMORY_REF_COST); 5938 size(4); 5939 format %{ "LDDF $mem,$dst\t! packed8B" %} 5940 opcode(Assembler::lddf_op3); 5941 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5942 ins_pipe(floadD_mem); 5943 %} 5944 5945 // Load Aligned Packed Char into a Double Register 5946 instruct loadA4C(regD dst, memory mem) %{ 5947 match(Set dst (Load4C mem)); 5948 ins_cost(MEMORY_REF_COST); 5949 size(4); 5950 format %{ "LDDF $mem,$dst\t! packed4C" %} 5951 opcode(Assembler::lddf_op3); 5952 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5953 ins_pipe(floadD_mem); 5954 %} 5955 5956 // Load Aligned Packed Short into a Double Register 5957 instruct loadA4S(regD dst, memory mem) %{ 5958 match(Set dst (Load4S mem)); 5959 ins_cost(MEMORY_REF_COST); 5960 size(4); 5961 format %{ "LDDF $mem,$dst\t! packed4S" %} 5962 opcode(Assembler::lddf_op3); 5963 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5964 ins_pipe(floadD_mem); 5965 %} 5966 5967 // Load Aligned Packed Int into a Double Register 5968 instruct loadA2I(regD dst, memory mem) %{ 5969 match(Set dst (Load2I mem)); 5970 ins_cost(MEMORY_REF_COST); 5971 size(4); 5972 format %{ "LDDF $mem,$dst\t! packed2I" %} 5973 opcode(Assembler::lddf_op3); 5974 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5975 ins_pipe(floadD_mem); 5976 %} 5977 5978 // Load Range 5979 instruct loadRange(iRegI dst, memory mem) %{ 5980 match(Set dst (LoadRange mem)); 5981 ins_cost(MEMORY_REF_COST); 5982 5983 size(4); 5984 format %{ "LDUW $mem,$dst\t! range" %} 5985 opcode(Assembler::lduw_op3); 5986 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5987 ins_pipe(iload_mem); 5988 %} 5989 5990 // Load Integer into %f register (for fitos/fitod) 5991 instruct loadI_freg(regF dst, memory mem) %{ 5992 match(Set dst (LoadI mem)); 5993 ins_cost(MEMORY_REF_COST); 5994 size(4); 5995 5996 format %{ "LDF $mem,$dst\t! for fitos/fitod" %} 5997 opcode(Assembler::ldf_op3); 5998 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5999 ins_pipe(floadF_mem); 6000 %} 6001 6002 // Load Pointer 6003 instruct loadP(iRegP dst, memory mem) %{ 6004 match(Set dst (LoadP mem)); 6005 ins_cost(MEMORY_REF_COST); 6006 size(4); 6007 6008 #ifndef _LP64 6009 format %{ "LDUW $mem,$dst\t! ptr" %} 6010 ins_encode %{ 6011 __ lduw($mem$$Address, $dst$$Register); 6012 %} 6013 #else 6014 format %{ "LDX $mem,$dst\t! ptr" %} 6015 ins_encode %{ 6016 __ ldx($mem$$Address, $dst$$Register); 6017 %} 6018 #endif 6019 ins_pipe(iload_mem); 6020 %} 6021 6022 // Load Compressed Pointer 6023 instruct loadN(iRegN dst, memory mem) %{ 6024 match(Set dst (LoadN mem)); 6025 ins_cost(MEMORY_REF_COST); 6026 size(4); 6027 6028 format %{ "LDUW $mem,$dst\t! compressed ptr" %} 6029 ins_encode %{ 6030 __ lduw($mem$$Address, $dst$$Register); 6031 %} 6032 ins_pipe(iload_mem); 6033 %} 6034 6035 // Load Klass Pointer 6036 instruct loadKlass(iRegP dst, memory mem) %{ 6037 match(Set dst (LoadKlass mem)); 6038 ins_cost(MEMORY_REF_COST); 6039 size(4); 6040 6041 #ifndef _LP64 6042 format %{ "LDUW $mem,$dst\t! klass ptr" %} 6043 ins_encode %{ 6044 __ lduw($mem$$Address, $dst$$Register); 6045 %} 6046 #else 6047 format %{ "LDX $mem,$dst\t! klass ptr" %} 6048 ins_encode %{ 6049 __ ldx($mem$$Address, $dst$$Register); 6050 %} 6051 #endif 6052 ins_pipe(iload_mem); 6053 %} 6054 6055 // Load narrow Klass Pointer 6056 instruct loadNKlass(iRegN dst, memory mem) %{ 6057 match(Set dst (LoadNKlass mem)); 6058 ins_cost(MEMORY_REF_COST); 6059 size(4); 6060 6061 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} 6062 ins_encode %{ 6063 __ lduw($mem$$Address, $dst$$Register); 6064 %} 6065 ins_pipe(iload_mem); 6066 %} 6067 6068 // Load Double 6069 instruct loadD(regD dst, memory mem) %{ 6070 match(Set dst (LoadD mem)); 6071 ins_cost(MEMORY_REF_COST); 6072 6073 size(4); 6074 format %{ "LDDF $mem,$dst" %} 6075 opcode(Assembler::lddf_op3); 6076 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6077 ins_pipe(floadD_mem); 6078 %} 6079 6080 // Load Double - UNaligned 6081 instruct loadD_unaligned(regD_low dst, memory mem ) %{ 6082 match(Set dst (LoadD_unaligned mem)); 6083 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 6084 size(8); 6085 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" 6086 "\tLDF $mem+4,$dst.lo\t!" %} 6087 opcode(Assembler::ldf_op3); 6088 ins_encode( form3_mem_reg_double_unaligned( mem, dst )); 6089 ins_pipe(iload_mem); 6090 %} 6091 6092 // Load Float 6093 instruct loadF(regF dst, memory mem) %{ 6094 match(Set dst (LoadF mem)); 6095 ins_cost(MEMORY_REF_COST); 6096 6097 size(4); 6098 format %{ "LDF $mem,$dst" %} 6099 opcode(Assembler::ldf_op3); 6100 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6101 ins_pipe(floadF_mem); 6102 %} 6103 6104 // Load Constant 6105 instruct loadConI( iRegI dst, immI src ) %{ 6106 match(Set dst src); 6107 ins_cost(DEFAULT_COST * 3/2); 6108 format %{ "SET $src,$dst" %} 6109 ins_encode( Set32(src, dst) ); 6110 ins_pipe(ialu_hi_lo_reg); 6111 %} 6112 6113 instruct loadConI13( iRegI dst, immI13 src ) %{ 6114 match(Set dst src); 6115 6116 size(4); 6117 format %{ "MOV $src,$dst" %} 6118 ins_encode( Set13( src, dst ) ); 6119 ins_pipe(ialu_imm); 6120 %} 6121 6122 #ifndef _LP64 6123 instruct loadConP(iRegP dst, immP con) %{ 6124 match(Set dst con); 6125 ins_cost(DEFAULT_COST * 3/2); 6126 format %{ "SET $con,$dst\t!ptr" %} 6127 ins_encode %{ 6128 // [RGV] This next line should be generated from ADLC 6129 if (_opnds[1]->constant_is_oop()) { 6130 intptr_t val = $con$$constant; 6131 __ set_oop_constant((jobject) val, $dst$$Register); 6132 } else { // non-oop pointers, e.g. card mark base, heap top 6133 __ set($con$$constant, $dst$$Register); 6134 } 6135 %} 6136 ins_pipe(loadConP); 6137 %} 6138 #else 6139 instruct loadConP_set(iRegP dst, immP_set con) %{ 6140 match(Set dst con); 6141 ins_cost(DEFAULT_COST * 3/2); 6142 format %{ "SET $con,$dst\t! ptr" %} 6143 ins_encode %{ 6144 // [RGV] This next line should be generated from ADLC 6145 if (_opnds[1]->constant_is_oop()) { 6146 intptr_t val = $con$$constant; 6147 __ set_oop_constant((jobject) val, $dst$$Register); 6148 } else { // non-oop pointers, e.g. card mark base, heap top 6149 __ set($con$$constant, $dst$$Register); 6150 } 6151 %} 6152 ins_pipe(loadConP); 6153 %} 6154 6155 instruct loadConP_load(iRegP dst, immP_load con) %{ 6156 match(Set dst con); 6157 ins_cost(MEMORY_REF_COST); 6158 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 6159 ins_encode %{ 6160 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6161 __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 6162 %} 6163 ins_pipe(loadConP); 6164 %} 6165 6166 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{ 6167 match(Set dst con); 6168 ins_cost(DEFAULT_COST * 3/2); 6169 format %{ "SET $con,$dst\t! non-oop ptr" %} 6170 ins_encode %{ 6171 __ set($con$$constant, $dst$$Register); 6172 %} 6173 ins_pipe(loadConP); 6174 %} 6175 #endif // _LP64 6176 6177 instruct loadConP0(iRegP dst, immP0 src) %{ 6178 match(Set dst src); 6179 6180 size(4); 6181 format %{ "CLR $dst\t!ptr" %} 6182 ins_encode %{ 6183 __ clr($dst$$Register); 6184 %} 6185 ins_pipe(ialu_imm); 6186 %} 6187 6188 instruct loadConP_poll(iRegP dst, immP_poll src) %{ 6189 match(Set dst src); 6190 ins_cost(DEFAULT_COST); 6191 format %{ "SET $src,$dst\t!ptr" %} 6192 ins_encode %{ 6193 AddressLiteral polling_page(os::get_polling_page()); 6194 __ sethi(polling_page, reg_to_register_object($dst$$reg)); 6195 %} 6196 ins_pipe(loadConP_poll); 6197 %} 6198 6199 instruct loadConN0(iRegN dst, immN0 src) %{ 6200 match(Set dst src); 6201 6202 size(4); 6203 format %{ "CLR $dst\t! compressed NULL ptr" %} 6204 ins_encode %{ 6205 __ clr($dst$$Register); 6206 %} 6207 ins_pipe(ialu_imm); 6208 %} 6209 6210 instruct loadConN(iRegN dst, immN src) %{ 6211 match(Set dst src); 6212 ins_cost(DEFAULT_COST * 3/2); 6213 format %{ "SET $src,$dst\t! compressed ptr" %} 6214 ins_encode %{ 6215 Register dst = $dst$$Register; 6216 __ set_narrow_oop((jobject)$src$$constant, dst); 6217 %} 6218 ins_pipe(ialu_hi_lo_reg); 6219 %} 6220 6221 // Materialize long value (predicated by immL_cheap). 6222 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 6223 match(Set dst con); 6224 effect(KILL tmp); 6225 ins_cost(DEFAULT_COST * 3); 6226 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 6227 ins_encode %{ 6228 __ set64($con$$constant, $dst$$Register, $tmp$$Register); 6229 %} 6230 ins_pipe(loadConL); 6231 %} 6232 6233 // Load long value from constant table (predicated by immL_expensive). 6234 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 6235 match(Set dst con); 6236 ins_cost(MEMORY_REF_COST); 6237 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 6238 ins_encode %{ 6239 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6240 __ ldx($constanttablebase, con_offset, $dst$$Register); 6241 %} 6242 ins_pipe(loadConL); 6243 %} 6244 6245 instruct loadConL0( iRegL dst, immL0 src ) %{ 6246 match(Set dst src); 6247 ins_cost(DEFAULT_COST); 6248 size(4); 6249 format %{ "CLR $dst\t! long" %} 6250 ins_encode( Set13( src, dst ) ); 6251 ins_pipe(ialu_imm); 6252 %} 6253 6254 instruct loadConL13( iRegL dst, immL13 src ) %{ 6255 match(Set dst src); 6256 ins_cost(DEFAULT_COST * 2); 6257 6258 size(4); 6259 format %{ "MOV $src,$dst\t! long" %} 6260 ins_encode( Set13( src, dst ) ); 6261 ins_pipe(ialu_imm); 6262 %} 6263 6264 instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 6265 match(Set dst con); 6266 effect(KILL tmp); 6267 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 6268 ins_encode %{ 6269 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6270 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 6271 %} 6272 ins_pipe(loadConFD); 6273 %} 6274 6275 instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 6276 match(Set dst con); 6277 effect(KILL tmp); 6278 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 6279 ins_encode %{ 6280 // XXX This is a quick fix for 6833573. 6281 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 6282 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6283 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 6284 %} 6285 ins_pipe(loadConFD); 6286 %} 6287 6288 // Prefetch instructions. 6289 // Must be safe to execute with invalid address (cannot fault). 6290 6291 instruct prefetchr( memory mem ) %{ 6292 match( PrefetchRead mem ); 6293 ins_cost(MEMORY_REF_COST); 6294 size(4); 6295 6296 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %} 6297 opcode(Assembler::prefetch_op3); 6298 ins_encode( form3_mem_prefetch_read( mem ) ); 6299 ins_pipe(iload_mem); 6300 %} 6301 6302 instruct prefetchw( memory mem ) %{ 6303 match( PrefetchWrite mem ); 6304 ins_cost(MEMORY_REF_COST); 6305 size(4); 6306 6307 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %} 6308 opcode(Assembler::prefetch_op3); 6309 ins_encode( form3_mem_prefetch_write( mem ) ); 6310 ins_pipe(iload_mem); 6311 %} 6312 6313 // Prefetch instructions for allocation. 6314 6315 instruct prefetchAlloc( memory mem ) %{ 6316 predicate(AllocatePrefetchInstr == 0); 6317 match( PrefetchAllocation mem ); 6318 ins_cost(MEMORY_REF_COST); 6319 size(4); 6320 6321 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %} 6322 opcode(Assembler::prefetch_op3); 6323 ins_encode( form3_mem_prefetch_write( mem ) ); 6324 ins_pipe(iload_mem); 6325 %} 6326 6327 // Use BIS instruction to prefetch for allocation. 6328 // Could fault, need space at the end of TLAB. 6329 instruct prefetchAlloc_bis( iRegP dst ) %{ 6330 predicate(AllocatePrefetchInstr == 1); 6331 match( PrefetchAllocation dst ); 6332 ins_cost(MEMORY_REF_COST); 6333 size(4); 6334 6335 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %} 6336 ins_encode %{ 6337 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 6338 %} 6339 ins_pipe(istore_mem_reg); 6340 %} 6341 6342 // Next code is used for finding next cache line address to prefetch. 6343 #ifndef _LP64 6344 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{ 6345 match(Set dst (CastX2P (AndI (CastP2X src) mask))); 6346 ins_cost(DEFAULT_COST); 6347 size(4); 6348 6349 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6350 ins_encode %{ 6351 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6352 %} 6353 ins_pipe(ialu_reg_imm); 6354 %} 6355 #else 6356 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ 6357 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 6358 ins_cost(DEFAULT_COST); 6359 size(4); 6360 6361 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6362 ins_encode %{ 6363 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6364 %} 6365 ins_pipe(ialu_reg_imm); 6366 %} 6367 #endif 6368 6369 //----------Store Instructions------------------------------------------------- 6370 // Store Byte 6371 instruct storeB(memory mem, iRegI src) %{ 6372 match(Set mem (StoreB mem src)); 6373 ins_cost(MEMORY_REF_COST); 6374 6375 size(4); 6376 format %{ "STB $src,$mem\t! byte" %} 6377 opcode(Assembler::stb_op3); 6378 ins_encode(simple_form3_mem_reg( mem, src ) ); 6379 ins_pipe(istore_mem_reg); 6380 %} 6381 6382 instruct storeB0(memory mem, immI0 src) %{ 6383 match(Set mem (StoreB mem src)); 6384 ins_cost(MEMORY_REF_COST); 6385 6386 size(4); 6387 format %{ "STB $src,$mem\t! byte" %} 6388 opcode(Assembler::stb_op3); 6389 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6390 ins_pipe(istore_mem_zero); 6391 %} 6392 6393 instruct storeCM0(memory mem, immI0 src) %{ 6394 match(Set mem (StoreCM mem src)); 6395 ins_cost(MEMORY_REF_COST); 6396 6397 size(4); 6398 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} 6399 opcode(Assembler::stb_op3); 6400 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6401 ins_pipe(istore_mem_zero); 6402 %} 6403 6404 // Store Char/Short 6405 instruct storeC(memory mem, iRegI src) %{ 6406 match(Set mem (StoreC mem src)); 6407 ins_cost(MEMORY_REF_COST); 6408 6409 size(4); 6410 format %{ "STH $src,$mem\t! short" %} 6411 opcode(Assembler::sth_op3); 6412 ins_encode(simple_form3_mem_reg( mem, src ) ); 6413 ins_pipe(istore_mem_reg); 6414 %} 6415 6416 instruct storeC0(memory mem, immI0 src) %{ 6417 match(Set mem (StoreC mem src)); 6418 ins_cost(MEMORY_REF_COST); 6419 6420 size(4); 6421 format %{ "STH $src,$mem\t! short" %} 6422 opcode(Assembler::sth_op3); 6423 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6424 ins_pipe(istore_mem_zero); 6425 %} 6426 6427 // Store Integer 6428 instruct storeI(memory mem, iRegI src) %{ 6429 match(Set mem (StoreI mem src)); 6430 ins_cost(MEMORY_REF_COST); 6431 6432 size(4); 6433 format %{ "STW $src,$mem" %} 6434 opcode(Assembler::stw_op3); 6435 ins_encode(simple_form3_mem_reg( mem, src ) ); 6436 ins_pipe(istore_mem_reg); 6437 %} 6438 6439 // Store Long 6440 instruct storeL(memory mem, iRegL src) %{ 6441 match(Set mem (StoreL mem src)); 6442 ins_cost(MEMORY_REF_COST); 6443 size(4); 6444 format %{ "STX $src,$mem\t! long" %} 6445 opcode(Assembler::stx_op3); 6446 ins_encode(simple_form3_mem_reg( mem, src ) ); 6447 ins_pipe(istore_mem_reg); 6448 %} 6449 6450 instruct storeI0(memory mem, immI0 src) %{ 6451 match(Set mem (StoreI mem src)); 6452 ins_cost(MEMORY_REF_COST); 6453 6454 size(4); 6455 format %{ "STW $src,$mem" %} 6456 opcode(Assembler::stw_op3); 6457 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6458 ins_pipe(istore_mem_zero); 6459 %} 6460 6461 instruct storeL0(memory mem, immL0 src) %{ 6462 match(Set mem (StoreL mem src)); 6463 ins_cost(MEMORY_REF_COST); 6464 6465 size(4); 6466 format %{ "STX $src,$mem" %} 6467 opcode(Assembler::stx_op3); 6468 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6469 ins_pipe(istore_mem_zero); 6470 %} 6471 6472 // Store Integer from float register (used after fstoi) 6473 instruct storeI_Freg(memory mem, regF src) %{ 6474 match(Set mem (StoreI mem src)); 6475 ins_cost(MEMORY_REF_COST); 6476 6477 size(4); 6478 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} 6479 opcode(Assembler::stf_op3); 6480 ins_encode(simple_form3_mem_reg( mem, src ) ); 6481 ins_pipe(fstoreF_mem_reg); 6482 %} 6483 6484 // Store Pointer 6485 instruct storeP(memory dst, sp_ptr_RegP src) %{ 6486 match(Set dst (StoreP dst src)); 6487 ins_cost(MEMORY_REF_COST); 6488 size(4); 6489 6490 #ifndef _LP64 6491 format %{ "STW $src,$dst\t! ptr" %} 6492 opcode(Assembler::stw_op3, 0, REGP_OP); 6493 #else 6494 format %{ "STX $src,$dst\t! ptr" %} 6495 opcode(Assembler::stx_op3, 0, REGP_OP); 6496 #endif 6497 ins_encode( form3_mem_reg( dst, src ) ); 6498 ins_pipe(istore_mem_spORreg); 6499 %} 6500 6501 instruct storeP0(memory dst, immP0 src) %{ 6502 match(Set dst (StoreP dst src)); 6503 ins_cost(MEMORY_REF_COST); 6504 size(4); 6505 6506 #ifndef _LP64 6507 format %{ "STW $src,$dst\t! ptr" %} 6508 opcode(Assembler::stw_op3, 0, REGP_OP); 6509 #else 6510 format %{ "STX $src,$dst\t! ptr" %} 6511 opcode(Assembler::stx_op3, 0, REGP_OP); 6512 #endif 6513 ins_encode( form3_mem_reg( dst, R_G0 ) ); 6514 ins_pipe(istore_mem_zero); 6515 %} 6516 6517 // Store Compressed Pointer 6518 instruct storeN(memory dst, iRegN src) %{ 6519 match(Set dst (StoreN dst src)); 6520 ins_cost(MEMORY_REF_COST); 6521 size(4); 6522 6523 format %{ "STW $src,$dst\t! compressed ptr" %} 6524 ins_encode %{ 6525 Register base = as_Register($dst$$base); 6526 Register index = as_Register($dst$$index); 6527 Register src = $src$$Register; 6528 if (index != G0) { 6529 __ stw(src, base, index); 6530 } else { 6531 __ stw(src, base, $dst$$disp); 6532 } 6533 %} 6534 ins_pipe(istore_mem_spORreg); 6535 %} 6536 6537 instruct storeN0(memory dst, immN0 src) %{ 6538 match(Set dst (StoreN dst src)); 6539 ins_cost(MEMORY_REF_COST); 6540 size(4); 6541 6542 format %{ "STW $src,$dst\t! compressed ptr" %} 6543 ins_encode %{ 6544 Register base = as_Register($dst$$base); 6545 Register index = as_Register($dst$$index); 6546 if (index != G0) { 6547 __ stw(0, base, index); 6548 } else { 6549 __ stw(0, base, $dst$$disp); 6550 } 6551 %} 6552 ins_pipe(istore_mem_zero); 6553 %} 6554 6555 // Store Double 6556 instruct storeD( memory mem, regD src) %{ 6557 match(Set mem (StoreD mem src)); 6558 ins_cost(MEMORY_REF_COST); 6559 6560 size(4); 6561 format %{ "STDF $src,$mem" %} 6562 opcode(Assembler::stdf_op3); 6563 ins_encode(simple_form3_mem_reg( mem, src ) ); 6564 ins_pipe(fstoreD_mem_reg); 6565 %} 6566 6567 instruct storeD0( memory mem, immD0 src) %{ 6568 match(Set mem (StoreD mem src)); 6569 ins_cost(MEMORY_REF_COST); 6570 6571 size(4); 6572 format %{ "STX $src,$mem" %} 6573 opcode(Assembler::stx_op3); 6574 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6575 ins_pipe(fstoreD_mem_zero); 6576 %} 6577 6578 // Store Float 6579 instruct storeF( memory mem, regF src) %{ 6580 match(Set mem (StoreF mem src)); 6581 ins_cost(MEMORY_REF_COST); 6582 6583 size(4); 6584 format %{ "STF $src,$mem" %} 6585 opcode(Assembler::stf_op3); 6586 ins_encode(simple_form3_mem_reg( mem, src ) ); 6587 ins_pipe(fstoreF_mem_reg); 6588 %} 6589 6590 instruct storeF0( memory mem, immF0 src) %{ 6591 match(Set mem (StoreF mem src)); 6592 ins_cost(MEMORY_REF_COST); 6593 6594 size(4); 6595 format %{ "STW $src,$mem\t! storeF0" %} 6596 opcode(Assembler::stw_op3); 6597 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6598 ins_pipe(fstoreF_mem_zero); 6599 %} 6600 6601 // Store Aligned Packed Bytes in Double register to memory 6602 instruct storeA8B(memory mem, regD src) %{ 6603 match(Set mem (Store8B mem src)); 6604 ins_cost(MEMORY_REF_COST); 6605 size(4); 6606 format %{ "STDF $src,$mem\t! packed8B" %} 6607 opcode(Assembler::stdf_op3); 6608 ins_encode(simple_form3_mem_reg( mem, src ) ); 6609 ins_pipe(fstoreD_mem_reg); 6610 %} 6611 6612 // Convert oop pointer into compressed form 6613 instruct encodeHeapOop(iRegN dst, iRegP src) %{ 6614 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6615 match(Set dst (EncodeP src)); 6616 format %{ "encode_heap_oop $src, $dst" %} 6617 ins_encode %{ 6618 __ encode_heap_oop($src$$Register, $dst$$Register); 6619 %} 6620 ins_pipe(ialu_reg); 6621 %} 6622 6623 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ 6624 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6625 match(Set dst (EncodeP src)); 6626 format %{ "encode_heap_oop_not_null $src, $dst" %} 6627 ins_encode %{ 6628 __ encode_heap_oop_not_null($src$$Register, $dst$$Register); 6629 %} 6630 ins_pipe(ialu_reg); 6631 %} 6632 6633 instruct decodeHeapOop(iRegP dst, iRegN src) %{ 6634 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 6635 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 6636 match(Set dst (DecodeN src)); 6637 format %{ "decode_heap_oop $src, $dst" %} 6638 ins_encode %{ 6639 __ decode_heap_oop($src$$Register, $dst$$Register); 6640 %} 6641 ins_pipe(ialu_reg); 6642 %} 6643 6644 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ 6645 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 6646 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 6647 match(Set dst (DecodeN src)); 6648 format %{ "decode_heap_oop_not_null $src, $dst" %} 6649 ins_encode %{ 6650 __ decode_heap_oop_not_null($src$$Register, $dst$$Register); 6651 %} 6652 ins_pipe(ialu_reg); 6653 %} 6654 6655 6656 // Store Zero into Aligned Packed Bytes 6657 instruct storeA8B0(memory mem, immI0 zero) %{ 6658 match(Set mem (Store8B mem zero)); 6659 ins_cost(MEMORY_REF_COST); 6660 size(4); 6661 format %{ "STX $zero,$mem\t! packed8B" %} 6662 opcode(Assembler::stx_op3); 6663 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6664 ins_pipe(fstoreD_mem_zero); 6665 %} 6666 6667 // Store Aligned Packed Chars/Shorts in Double register to memory 6668 instruct storeA4C(memory mem, regD src) %{ 6669 match(Set mem (Store4C mem src)); 6670 ins_cost(MEMORY_REF_COST); 6671 size(4); 6672 format %{ "STDF $src,$mem\t! packed4C" %} 6673 opcode(Assembler::stdf_op3); 6674 ins_encode(simple_form3_mem_reg( mem, src ) ); 6675 ins_pipe(fstoreD_mem_reg); 6676 %} 6677 6678 // Store Zero into Aligned Packed Chars/Shorts 6679 instruct storeA4C0(memory mem, immI0 zero) %{ 6680 match(Set mem (Store4C mem (Replicate4C zero))); 6681 ins_cost(MEMORY_REF_COST); 6682 size(4); 6683 format %{ "STX $zero,$mem\t! packed4C" %} 6684 opcode(Assembler::stx_op3); 6685 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6686 ins_pipe(fstoreD_mem_zero); 6687 %} 6688 6689 // Store Aligned Packed Ints in Double register to memory 6690 instruct storeA2I(memory mem, regD src) %{ 6691 match(Set mem (Store2I mem src)); 6692 ins_cost(MEMORY_REF_COST); 6693 size(4); 6694 format %{ "STDF $src,$mem\t! packed2I" %} 6695 opcode(Assembler::stdf_op3); 6696 ins_encode(simple_form3_mem_reg( mem, src ) ); 6697 ins_pipe(fstoreD_mem_reg); 6698 %} 6699 6700 // Store Zero into Aligned Packed Ints 6701 instruct storeA2I0(memory mem, immI0 zero) %{ 6702 match(Set mem (Store2I mem zero)); 6703 ins_cost(MEMORY_REF_COST); 6704 size(4); 6705 format %{ "STX $zero,$mem\t! packed2I" %} 6706 opcode(Assembler::stx_op3); 6707 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6708 ins_pipe(fstoreD_mem_zero); 6709 %} 6710 6711 6712 //----------MemBar Instructions----------------------------------------------- 6713 // Memory barrier flavors 6714 6715 instruct membar_acquire() %{ 6716 match(MemBarAcquire); 6717 ins_cost(4*MEMORY_REF_COST); 6718 6719 size(0); 6720 format %{ "MEMBAR-acquire" %} 6721 ins_encode( enc_membar_acquire ); 6722 ins_pipe(long_memory_op); 6723 %} 6724 6725 instruct membar_acquire_lock() %{ 6726 match(MemBarAcquireLock); 6727 ins_cost(0); 6728 6729 size(0); 6730 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %} 6731 ins_encode( ); 6732 ins_pipe(empty); 6733 %} 6734 6735 instruct membar_release() %{ 6736 match(MemBarRelease); 6737 ins_cost(4*MEMORY_REF_COST); 6738 6739 size(0); 6740 format %{ "MEMBAR-release" %} 6741 ins_encode( enc_membar_release ); 6742 ins_pipe(long_memory_op); 6743 %} 6744 6745 instruct membar_release_lock() %{ 6746 match(MemBarReleaseLock); 6747 ins_cost(0); 6748 6749 size(0); 6750 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %} 6751 ins_encode( ); 6752 ins_pipe(empty); 6753 %} 6754 6755 instruct membar_volatile() %{ 6756 match(MemBarVolatile); 6757 ins_cost(4*MEMORY_REF_COST); 6758 6759 size(4); 6760 format %{ "MEMBAR-volatile" %} 6761 ins_encode( enc_membar_volatile ); 6762 ins_pipe(long_memory_op); 6763 %} 6764 6765 instruct unnecessary_membar_volatile() %{ 6766 match(MemBarVolatile); 6767 predicate(Matcher::post_store_load_barrier(n)); 6768 ins_cost(0); 6769 6770 size(0); 6771 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %} 6772 ins_encode( ); 6773 ins_pipe(empty); 6774 %} 6775 6776 instruct unnecessary_membar_storestore() %{ 6777 match(MemBarStoreStore); 6778 ins_cost(0); 6779 6780 size(0); 6781 format %{ "!MEMBAR-storestore (unnecessary so empty encoding)" %} 6782 ins_encode( ); 6783 ins_pipe(empty); 6784 %} 6785 6786 //----------Register Move Instructions----------------------------------------- 6787 instruct roundDouble_nop(regD dst) %{ 6788 match(Set dst (RoundDouble dst)); 6789 ins_cost(0); 6790 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6791 ins_encode( ); 6792 ins_pipe(empty); 6793 %} 6794 6795 6796 instruct roundFloat_nop(regF dst) %{ 6797 match(Set dst (RoundFloat dst)); 6798 ins_cost(0); 6799 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6800 ins_encode( ); 6801 ins_pipe(empty); 6802 %} 6803 6804 6805 // Cast Index to Pointer for unsafe natives 6806 instruct castX2P(iRegX src, iRegP dst) %{ 6807 match(Set dst (CastX2P src)); 6808 6809 format %{ "MOV $src,$dst\t! IntX->Ptr" %} 6810 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6811 ins_pipe(ialu_reg); 6812 %} 6813 6814 // Cast Pointer to Index for unsafe natives 6815 instruct castP2X(iRegP src, iRegX dst) %{ 6816 match(Set dst (CastP2X src)); 6817 6818 format %{ "MOV $src,$dst\t! Ptr->IntX" %} 6819 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6820 ins_pipe(ialu_reg); 6821 %} 6822 6823 instruct stfSSD(stackSlotD stkSlot, regD src) %{ 6824 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6825 match(Set stkSlot src); // chain rule 6826 ins_cost(MEMORY_REF_COST); 6827 format %{ "STDF $src,$stkSlot\t!stk" %} 6828 opcode(Assembler::stdf_op3); 6829 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6830 ins_pipe(fstoreD_stk_reg); 6831 %} 6832 6833 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{ 6834 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6835 match(Set dst stkSlot); // chain rule 6836 ins_cost(MEMORY_REF_COST); 6837 format %{ "LDDF $stkSlot,$dst\t!stk" %} 6838 opcode(Assembler::lddf_op3); 6839 ins_encode(simple_form3_mem_reg(stkSlot, dst)); 6840 ins_pipe(floadD_stk); 6841 %} 6842 6843 instruct stfSSF(stackSlotF stkSlot, regF src) %{ 6844 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6845 match(Set stkSlot src); // chain rule 6846 ins_cost(MEMORY_REF_COST); 6847 format %{ "STF $src,$stkSlot\t!stk" %} 6848 opcode(Assembler::stf_op3); 6849 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6850 ins_pipe(fstoreF_stk_reg); 6851 %} 6852 6853 //----------Conditional Move--------------------------------------------------- 6854 // Conditional move 6855 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{ 6856 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6857 ins_cost(150); 6858 format %{ "MOV$cmp $pcc,$src,$dst" %} 6859 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6860 ins_pipe(ialu_reg); 6861 %} 6862 6863 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{ 6864 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6865 ins_cost(140); 6866 format %{ "MOV$cmp $pcc,$src,$dst" %} 6867 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6868 ins_pipe(ialu_imm); 6869 %} 6870 6871 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{ 6872 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6873 ins_cost(150); 6874 size(4); 6875 format %{ "MOV$cmp $icc,$src,$dst" %} 6876 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6877 ins_pipe(ialu_reg); 6878 %} 6879 6880 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{ 6881 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6882 ins_cost(140); 6883 size(4); 6884 format %{ "MOV$cmp $icc,$src,$dst" %} 6885 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6886 ins_pipe(ialu_imm); 6887 %} 6888 6889 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ 6890 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6891 ins_cost(150); 6892 size(4); 6893 format %{ "MOV$cmp $icc,$src,$dst" %} 6894 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6895 ins_pipe(ialu_reg); 6896 %} 6897 6898 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ 6899 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6900 ins_cost(140); 6901 size(4); 6902 format %{ "MOV$cmp $icc,$src,$dst" %} 6903 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6904 ins_pipe(ialu_imm); 6905 %} 6906 6907 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{ 6908 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6909 ins_cost(150); 6910 size(4); 6911 format %{ "MOV$cmp $fcc,$src,$dst" %} 6912 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6913 ins_pipe(ialu_reg); 6914 %} 6915 6916 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{ 6917 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6918 ins_cost(140); 6919 size(4); 6920 format %{ "MOV$cmp $fcc,$src,$dst" %} 6921 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6922 ins_pipe(ialu_imm); 6923 %} 6924 6925 // Conditional move for RegN. Only cmov(reg,reg). 6926 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ 6927 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); 6928 ins_cost(150); 6929 format %{ "MOV$cmp $pcc,$src,$dst" %} 6930 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6931 ins_pipe(ialu_reg); 6932 %} 6933 6934 // This instruction also works with CmpN so we don't need cmovNN_reg. 6935 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ 6936 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6937 ins_cost(150); 6938 size(4); 6939 format %{ "MOV$cmp $icc,$src,$dst" %} 6940 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6941 ins_pipe(ialu_reg); 6942 %} 6943 6944 // This instruction also works with CmpN so we don't need cmovNN_reg. 6945 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{ 6946 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6947 ins_cost(150); 6948 size(4); 6949 format %{ "MOV$cmp $icc,$src,$dst" %} 6950 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6951 ins_pipe(ialu_reg); 6952 %} 6953 6954 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ 6955 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); 6956 ins_cost(150); 6957 size(4); 6958 format %{ "MOV$cmp $fcc,$src,$dst" %} 6959 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6960 ins_pipe(ialu_reg); 6961 %} 6962 6963 // Conditional move 6964 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ 6965 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6966 ins_cost(150); 6967 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6968 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6969 ins_pipe(ialu_reg); 6970 %} 6971 6972 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{ 6973 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6974 ins_cost(140); 6975 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6976 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6977 ins_pipe(ialu_imm); 6978 %} 6979 6980 // This instruction also works with CmpN so we don't need cmovPN_reg. 6981 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ 6982 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6983 ins_cost(150); 6984 6985 size(4); 6986 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6987 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6988 ins_pipe(ialu_reg); 6989 %} 6990 6991 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{ 6992 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6993 ins_cost(150); 6994 6995 size(4); 6996 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6997 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6998 ins_pipe(ialu_reg); 6999 %} 7000 7001 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{ 7002 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 7003 ins_cost(140); 7004 7005 size(4); 7006 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 7007 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 7008 ins_pipe(ialu_imm); 7009 %} 7010 7011 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{ 7012 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 7013 ins_cost(140); 7014 7015 size(4); 7016 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 7017 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 7018 ins_pipe(ialu_imm); 7019 %} 7020 7021 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{ 7022 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 7023 ins_cost(150); 7024 size(4); 7025 format %{ "MOV$cmp $fcc,$src,$dst" %} 7026 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7027 ins_pipe(ialu_imm); 7028 %} 7029 7030 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{ 7031 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 7032 ins_cost(140); 7033 size(4); 7034 format %{ "MOV$cmp $fcc,$src,$dst" %} 7035 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 7036 ins_pipe(ialu_imm); 7037 %} 7038 7039 // Conditional move 7040 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{ 7041 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src))); 7042 ins_cost(150); 7043 opcode(0x101); 7044 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7045 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7046 ins_pipe(int_conditional_float_move); 7047 %} 7048 7049 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ 7050 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 7051 ins_cost(150); 7052 7053 size(4); 7054 format %{ "FMOVS$cmp $icc,$src,$dst" %} 7055 opcode(0x101); 7056 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7057 ins_pipe(int_conditional_float_move); 7058 %} 7059 7060 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{ 7061 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 7062 ins_cost(150); 7063 7064 size(4); 7065 format %{ "FMOVS$cmp $icc,$src,$dst" %} 7066 opcode(0x101); 7067 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7068 ins_pipe(int_conditional_float_move); 7069 %} 7070 7071 // Conditional move, 7072 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ 7073 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); 7074 ins_cost(150); 7075 size(4); 7076 format %{ "FMOVF$cmp $fcc,$src,$dst" %} 7077 opcode(0x1); 7078 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7079 ins_pipe(int_conditional_double_move); 7080 %} 7081 7082 // Conditional move 7083 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{ 7084 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src))); 7085 ins_cost(150); 7086 size(4); 7087 opcode(0x102); 7088 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7089 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7090 ins_pipe(int_conditional_double_move); 7091 %} 7092 7093 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{ 7094 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7095 ins_cost(150); 7096 7097 size(4); 7098 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7099 opcode(0x102); 7100 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7101 ins_pipe(int_conditional_double_move); 7102 %} 7103 7104 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{ 7105 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7106 ins_cost(150); 7107 7108 size(4); 7109 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7110 opcode(0x102); 7111 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7112 ins_pipe(int_conditional_double_move); 7113 %} 7114 7115 // Conditional move, 7116 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ 7117 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); 7118 ins_cost(150); 7119 size(4); 7120 format %{ "FMOVD$cmp $fcc,$src,$dst" %} 7121 opcode(0x2); 7122 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7123 ins_pipe(int_conditional_double_move); 7124 %} 7125 7126 // Conditional move 7127 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{ 7128 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7129 ins_cost(150); 7130 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7131 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7132 ins_pipe(ialu_reg); 7133 %} 7134 7135 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{ 7136 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7137 ins_cost(140); 7138 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7139 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 7140 ins_pipe(ialu_imm); 7141 %} 7142 7143 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{ 7144 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7145 ins_cost(150); 7146 7147 size(4); 7148 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7149 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7150 ins_pipe(ialu_reg); 7151 %} 7152 7153 7154 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{ 7155 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7156 ins_cost(150); 7157 7158 size(4); 7159 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7160 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7161 ins_pipe(ialu_reg); 7162 %} 7163 7164 7165 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{ 7166 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src))); 7167 ins_cost(150); 7168 7169 size(4); 7170 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %} 7171 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7172 ins_pipe(ialu_reg); 7173 %} 7174 7175 7176 7177 //----------OS and Locking Instructions---------------------------------------- 7178 7179 // This name is KNOWN by the ADLC and cannot be changed. 7180 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 7181 // for this guy. 7182 instruct tlsLoadP(g2RegP dst) %{ 7183 match(Set dst (ThreadLocal)); 7184 7185 size(0); 7186 ins_cost(0); 7187 format %{ "# TLS is in G2" %} 7188 ins_encode( /*empty encoding*/ ); 7189 ins_pipe(ialu_none); 7190 %} 7191 7192 instruct checkCastPP( iRegP dst ) %{ 7193 match(Set dst (CheckCastPP dst)); 7194 7195 size(0); 7196 format %{ "# checkcastPP of $dst" %} 7197 ins_encode( /*empty encoding*/ ); 7198 ins_pipe(empty); 7199 %} 7200 7201 7202 instruct castPP( iRegP dst ) %{ 7203 match(Set dst (CastPP dst)); 7204 format %{ "# castPP of $dst" %} 7205 ins_encode( /*empty encoding*/ ); 7206 ins_pipe(empty); 7207 %} 7208 7209 instruct castII( iRegI dst ) %{ 7210 match(Set dst (CastII dst)); 7211 format %{ "# castII of $dst" %} 7212 ins_encode( /*empty encoding*/ ); 7213 ins_cost(0); 7214 ins_pipe(empty); 7215 %} 7216 7217 //----------Arithmetic Instructions-------------------------------------------- 7218 // Addition Instructions 7219 // Register Addition 7220 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7221 match(Set dst (AddI src1 src2)); 7222 7223 size(4); 7224 format %{ "ADD $src1,$src2,$dst" %} 7225 ins_encode %{ 7226 __ add($src1$$Register, $src2$$Register, $dst$$Register); 7227 %} 7228 ins_pipe(ialu_reg_reg); 7229 %} 7230 7231 // Immediate Addition 7232 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7233 match(Set dst (AddI src1 src2)); 7234 7235 size(4); 7236 format %{ "ADD $src1,$src2,$dst" %} 7237 opcode(Assembler::add_op3, Assembler::arith_op); 7238 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7239 ins_pipe(ialu_reg_imm); 7240 %} 7241 7242 // Pointer Register Addition 7243 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{ 7244 match(Set dst (AddP src1 src2)); 7245 7246 size(4); 7247 format %{ "ADD $src1,$src2,$dst" %} 7248 opcode(Assembler::add_op3, Assembler::arith_op); 7249 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7250 ins_pipe(ialu_reg_reg); 7251 %} 7252 7253 // Pointer Immediate Addition 7254 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{ 7255 match(Set dst (AddP src1 src2)); 7256 7257 size(4); 7258 format %{ "ADD $src1,$src2,$dst" %} 7259 opcode(Assembler::add_op3, Assembler::arith_op); 7260 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7261 ins_pipe(ialu_reg_imm); 7262 %} 7263 7264 // Long Addition 7265 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7266 match(Set dst (AddL src1 src2)); 7267 7268 size(4); 7269 format %{ "ADD $src1,$src2,$dst\t! long" %} 7270 opcode(Assembler::add_op3, Assembler::arith_op); 7271 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7272 ins_pipe(ialu_reg_reg); 7273 %} 7274 7275 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7276 match(Set dst (AddL src1 con)); 7277 7278 size(4); 7279 format %{ "ADD $src1,$con,$dst" %} 7280 opcode(Assembler::add_op3, Assembler::arith_op); 7281 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7282 ins_pipe(ialu_reg_imm); 7283 %} 7284 7285 //----------Conditional_store-------------------------------------------------- 7286 // Conditional-store of the updated heap-top. 7287 // Used during allocation of the shared heap. 7288 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 7289 7290 // LoadP-locked. Same as a regular pointer load when used with a compare-swap 7291 instruct loadPLocked(iRegP dst, memory mem) %{ 7292 match(Set dst (LoadPLocked mem)); 7293 ins_cost(MEMORY_REF_COST); 7294 7295 #ifndef _LP64 7296 size(4); 7297 format %{ "LDUW $mem,$dst\t! ptr" %} 7298 opcode(Assembler::lduw_op3, 0, REGP_OP); 7299 #else 7300 format %{ "LDX $mem,$dst\t! ptr" %} 7301 opcode(Assembler::ldx_op3, 0, REGP_OP); 7302 #endif 7303 ins_encode( form3_mem_reg( mem, dst ) ); 7304 ins_pipe(iload_mem); 7305 %} 7306 7307 // LoadL-locked. Same as a regular long load when used with a compare-swap 7308 instruct loadLLocked(iRegL dst, memory mem) %{ 7309 match(Set dst (LoadLLocked mem)); 7310 ins_cost(MEMORY_REF_COST); 7311 size(4); 7312 format %{ "LDX $mem,$dst\t! long" %} 7313 opcode(Assembler::ldx_op3); 7314 ins_encode(simple_form3_mem_reg( mem, dst ) ); 7315 ins_pipe(iload_mem); 7316 %} 7317 7318 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ 7319 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); 7320 effect( KILL newval ); 7321 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" 7322 "CMP R_G3,$oldval\t\t! See if we made progress" %} 7323 ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); 7324 ins_pipe( long_memory_op ); 7325 %} 7326 7327 // Conditional-store of an int value. 7328 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ 7329 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); 7330 effect( KILL newval ); 7331 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7332 "CMP $oldval,$newval\t\t! See if we made progress" %} 7333 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7334 ins_pipe( long_memory_op ); 7335 %} 7336 7337 // Conditional-store of a long value. 7338 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ 7339 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); 7340 effect( KILL newval ); 7341 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7342 "CMP $oldval,$newval\t\t! See if we made progress" %} 7343 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7344 ins_pipe( long_memory_op ); 7345 %} 7346 7347 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 7348 7349 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7350 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 7351 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7352 format %{ 7353 "MOV $newval,O7\n\t" 7354 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7355 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7356 "MOV 1,$res\n\t" 7357 "MOVne xcc,R_G0,$res" 7358 %} 7359 ins_encode( enc_casx(mem_ptr, oldval, newval), 7360 enc_lflags_ne_to_boolean(res) ); 7361 ins_pipe( long_memory_op ); 7362 %} 7363 7364 7365 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7366 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 7367 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7368 format %{ 7369 "MOV $newval,O7\n\t" 7370 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7371 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7372 "MOV 1,$res\n\t" 7373 "MOVne icc,R_G0,$res" 7374 %} 7375 ins_encode( enc_casi(mem_ptr, oldval, newval), 7376 enc_iflags_ne_to_boolean(res) ); 7377 ins_pipe( long_memory_op ); 7378 %} 7379 7380 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7381 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 7382 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7383 format %{ 7384 "MOV $newval,O7\n\t" 7385 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7386 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7387 "MOV 1,$res\n\t" 7388 "MOVne xcc,R_G0,$res" 7389 %} 7390 #ifdef _LP64 7391 ins_encode( enc_casx(mem_ptr, oldval, newval), 7392 enc_lflags_ne_to_boolean(res) ); 7393 #else 7394 ins_encode( enc_casi(mem_ptr, oldval, newval), 7395 enc_iflags_ne_to_boolean(res) ); 7396 #endif 7397 ins_pipe( long_memory_op ); 7398 %} 7399 7400 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7401 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 7402 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7403 format %{ 7404 "MOV $newval,O7\n\t" 7405 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7406 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7407 "MOV 1,$res\n\t" 7408 "MOVne icc,R_G0,$res" 7409 %} 7410 ins_encode( enc_casi(mem_ptr, oldval, newval), 7411 enc_iflags_ne_to_boolean(res) ); 7412 ins_pipe( long_memory_op ); 7413 %} 7414 7415 //--------------------- 7416 // Subtraction Instructions 7417 // Register Subtraction 7418 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7419 match(Set dst (SubI src1 src2)); 7420 7421 size(4); 7422 format %{ "SUB $src1,$src2,$dst" %} 7423 opcode(Assembler::sub_op3, Assembler::arith_op); 7424 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7425 ins_pipe(ialu_reg_reg); 7426 %} 7427 7428 // Immediate Subtraction 7429 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7430 match(Set dst (SubI src1 src2)); 7431 7432 size(4); 7433 format %{ "SUB $src1,$src2,$dst" %} 7434 opcode(Assembler::sub_op3, Assembler::arith_op); 7435 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7436 ins_pipe(ialu_reg_imm); 7437 %} 7438 7439 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 7440 match(Set dst (SubI zero src2)); 7441 7442 size(4); 7443 format %{ "NEG $src2,$dst" %} 7444 opcode(Assembler::sub_op3, Assembler::arith_op); 7445 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7446 ins_pipe(ialu_zero_reg); 7447 %} 7448 7449 // Long subtraction 7450 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7451 match(Set dst (SubL src1 src2)); 7452 7453 size(4); 7454 format %{ "SUB $src1,$src2,$dst\t! long" %} 7455 opcode(Assembler::sub_op3, Assembler::arith_op); 7456 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7457 ins_pipe(ialu_reg_reg); 7458 %} 7459 7460 // Immediate Subtraction 7461 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7462 match(Set dst (SubL src1 con)); 7463 7464 size(4); 7465 format %{ "SUB $src1,$con,$dst\t! long" %} 7466 opcode(Assembler::sub_op3, Assembler::arith_op); 7467 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7468 ins_pipe(ialu_reg_imm); 7469 %} 7470 7471 // Long negation 7472 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{ 7473 match(Set dst (SubL zero src2)); 7474 7475 size(4); 7476 format %{ "NEG $src2,$dst\t! long" %} 7477 opcode(Assembler::sub_op3, Assembler::arith_op); 7478 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7479 ins_pipe(ialu_zero_reg); 7480 %} 7481 7482 // Multiplication Instructions 7483 // Integer Multiplication 7484 // Register Multiplication 7485 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7486 match(Set dst (MulI src1 src2)); 7487 7488 size(4); 7489 format %{ "MULX $src1,$src2,$dst" %} 7490 opcode(Assembler::mulx_op3, Assembler::arith_op); 7491 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7492 ins_pipe(imul_reg_reg); 7493 %} 7494 7495 // Immediate Multiplication 7496 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7497 match(Set dst (MulI src1 src2)); 7498 7499 size(4); 7500 format %{ "MULX $src1,$src2,$dst" %} 7501 opcode(Assembler::mulx_op3, Assembler::arith_op); 7502 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7503 ins_pipe(imul_reg_imm); 7504 %} 7505 7506 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7507 match(Set dst (MulL src1 src2)); 7508 ins_cost(DEFAULT_COST * 5); 7509 size(4); 7510 format %{ "MULX $src1,$src2,$dst\t! long" %} 7511 opcode(Assembler::mulx_op3, Assembler::arith_op); 7512 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7513 ins_pipe(mulL_reg_reg); 7514 %} 7515 7516 // Immediate Multiplication 7517 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7518 match(Set dst (MulL src1 src2)); 7519 ins_cost(DEFAULT_COST * 5); 7520 size(4); 7521 format %{ "MULX $src1,$src2,$dst" %} 7522 opcode(Assembler::mulx_op3, Assembler::arith_op); 7523 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7524 ins_pipe(mulL_reg_imm); 7525 %} 7526 7527 // Integer Division 7528 // Register Division 7529 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{ 7530 match(Set dst (DivI src1 src2)); 7531 ins_cost((2+71)*DEFAULT_COST); 7532 7533 format %{ "SRA $src2,0,$src2\n\t" 7534 "SRA $src1,0,$src1\n\t" 7535 "SDIVX $src1,$src2,$dst" %} 7536 ins_encode( idiv_reg( src1, src2, dst ) ); 7537 ins_pipe(sdiv_reg_reg); 7538 %} 7539 7540 // Immediate Division 7541 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{ 7542 match(Set dst (DivI src1 src2)); 7543 ins_cost((2+71)*DEFAULT_COST); 7544 7545 format %{ "SRA $src1,0,$src1\n\t" 7546 "SDIVX $src1,$src2,$dst" %} 7547 ins_encode( idiv_imm( src1, src2, dst ) ); 7548 ins_pipe(sdiv_reg_imm); 7549 %} 7550 7551 //----------Div-By-10-Expansion------------------------------------------------ 7552 // Extract hi bits of a 32x32->64 bit multiply. 7553 // Expand rule only, not matched 7554 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{ 7555 effect( DEF dst, USE src1, USE src2 ); 7556 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t" 7557 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %} 7558 ins_encode( enc_mul_hi(dst,src1,src2)); 7559 ins_pipe(sdiv_reg_reg); 7560 %} 7561 7562 // Magic constant, reciprocal of 10 7563 instruct loadConI_x66666667(iRegIsafe dst) %{ 7564 effect( DEF dst ); 7565 7566 size(8); 7567 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %} 7568 ins_encode( Set32(0x66666667, dst) ); 7569 ins_pipe(ialu_hi_lo_reg); 7570 %} 7571 7572 // Register Shift Right Arithmetic Long by 32-63 7573 instruct sra_31( iRegI dst, iRegI src ) %{ 7574 effect( DEF dst, USE src ); 7575 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 7576 ins_encode( form3_rs1_rd_copysign_hi(src,dst) ); 7577 ins_pipe(ialu_reg_reg); 7578 %} 7579 7580 // Arithmetic Shift Right by 8-bit immediate 7581 instruct sra_reg_2( iRegI dst, iRegI src ) %{ 7582 effect( DEF dst, USE src ); 7583 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} 7584 opcode(Assembler::sra_op3, Assembler::arith_op); 7585 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); 7586 ins_pipe(ialu_reg_imm); 7587 %} 7588 7589 // Integer DIV with 10 7590 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{ 7591 match(Set dst (DivI src div)); 7592 ins_cost((6+6)*DEFAULT_COST); 7593 expand %{ 7594 iRegIsafe tmp1; // Killed temps; 7595 iRegIsafe tmp2; // Killed temps; 7596 iRegI tmp3; // Killed temps; 7597 iRegI tmp4; // Killed temps; 7598 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1 7599 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2 7600 sra_31( tmp3, src ); // SRA src,31 -> tmp3 7601 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4 7602 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst 7603 %} 7604 %} 7605 7606 // Register Long Division 7607 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7608 match(Set dst (DivL src1 src2)); 7609 ins_cost(DEFAULT_COST*71); 7610 size(4); 7611 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7612 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7613 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7614 ins_pipe(divL_reg_reg); 7615 %} 7616 7617 // Register Long Division 7618 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7619 match(Set dst (DivL src1 src2)); 7620 ins_cost(DEFAULT_COST*71); 7621 size(4); 7622 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7623 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7624 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7625 ins_pipe(divL_reg_imm); 7626 %} 7627 7628 // Integer Remainder 7629 // Register Remainder 7630 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{ 7631 match(Set dst (ModI src1 src2)); 7632 effect( KILL ccr, KILL temp); 7633 7634 format %{ "SREM $src1,$src2,$dst" %} 7635 ins_encode( irem_reg(src1, src2, dst, temp) ); 7636 ins_pipe(sdiv_reg_reg); 7637 %} 7638 7639 // Immediate Remainder 7640 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ 7641 match(Set dst (ModI src1 src2)); 7642 effect( KILL ccr, KILL temp); 7643 7644 format %{ "SREM $src1,$src2,$dst" %} 7645 ins_encode( irem_imm(src1, src2, dst, temp) ); 7646 ins_pipe(sdiv_reg_imm); 7647 %} 7648 7649 // Register Long Remainder 7650 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7651 effect(DEF dst, USE src1, USE src2); 7652 size(4); 7653 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7654 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7655 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7656 ins_pipe(divL_reg_reg); 7657 %} 7658 7659 // Register Long Division 7660 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7661 effect(DEF dst, USE src1, USE src2); 7662 size(4); 7663 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7664 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7665 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7666 ins_pipe(divL_reg_imm); 7667 %} 7668 7669 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7670 effect(DEF dst, USE src1, USE src2); 7671 size(4); 7672 format %{ "MULX $src1,$src2,$dst\t! long" %} 7673 opcode(Assembler::mulx_op3, Assembler::arith_op); 7674 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7675 ins_pipe(mulL_reg_reg); 7676 %} 7677 7678 // Immediate Multiplication 7679 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7680 effect(DEF dst, USE src1, USE src2); 7681 size(4); 7682 format %{ "MULX $src1,$src2,$dst" %} 7683 opcode(Assembler::mulx_op3, Assembler::arith_op); 7684 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7685 ins_pipe(mulL_reg_imm); 7686 %} 7687 7688 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7689 effect(DEF dst, USE src1, USE src2); 7690 size(4); 7691 format %{ "SUB $src1,$src2,$dst\t! long" %} 7692 opcode(Assembler::sub_op3, Assembler::arith_op); 7693 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7694 ins_pipe(ialu_reg_reg); 7695 %} 7696 7697 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 7698 effect(DEF dst, USE src1, USE src2); 7699 size(4); 7700 format %{ "SUB $src1,$src2,$dst\t! long" %} 7701 opcode(Assembler::sub_op3, Assembler::arith_op); 7702 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7703 ins_pipe(ialu_reg_reg); 7704 %} 7705 7706 // Register Long Remainder 7707 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7708 match(Set dst (ModL src1 src2)); 7709 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7710 expand %{ 7711 iRegL tmp1; 7712 iRegL tmp2; 7713 divL_reg_reg_1(tmp1, src1, src2); 7714 mulL_reg_reg_1(tmp2, tmp1, src2); 7715 subL_reg_reg_1(dst, src1, tmp2); 7716 %} 7717 %} 7718 7719 // Register Long Remainder 7720 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7721 match(Set dst (ModL src1 src2)); 7722 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7723 expand %{ 7724 iRegL tmp1; 7725 iRegL tmp2; 7726 divL_reg_imm13_1(tmp1, src1, src2); 7727 mulL_reg_imm13_1(tmp2, tmp1, src2); 7728 subL_reg_reg_2 (dst, src1, tmp2); 7729 %} 7730 %} 7731 7732 // Integer Shift Instructions 7733 // Register Shift Left 7734 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7735 match(Set dst (LShiftI src1 src2)); 7736 7737 size(4); 7738 format %{ "SLL $src1,$src2,$dst" %} 7739 opcode(Assembler::sll_op3, Assembler::arith_op); 7740 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7741 ins_pipe(ialu_reg_reg); 7742 %} 7743 7744 // Register Shift Left Immediate 7745 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7746 match(Set dst (LShiftI src1 src2)); 7747 7748 size(4); 7749 format %{ "SLL $src1,$src2,$dst" %} 7750 opcode(Assembler::sll_op3, Assembler::arith_op); 7751 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7752 ins_pipe(ialu_reg_imm); 7753 %} 7754 7755 // Register Shift Left 7756 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7757 match(Set dst (LShiftL src1 src2)); 7758 7759 size(4); 7760 format %{ "SLLX $src1,$src2,$dst" %} 7761 opcode(Assembler::sllx_op3, Assembler::arith_op); 7762 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7763 ins_pipe(ialu_reg_reg); 7764 %} 7765 7766 // Register Shift Left Immediate 7767 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7768 match(Set dst (LShiftL src1 src2)); 7769 7770 size(4); 7771 format %{ "SLLX $src1,$src2,$dst" %} 7772 opcode(Assembler::sllx_op3, Assembler::arith_op); 7773 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7774 ins_pipe(ialu_reg_imm); 7775 %} 7776 7777 // Register Arithmetic Shift Right 7778 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7779 match(Set dst (RShiftI src1 src2)); 7780 size(4); 7781 format %{ "SRA $src1,$src2,$dst" %} 7782 opcode(Assembler::sra_op3, Assembler::arith_op); 7783 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7784 ins_pipe(ialu_reg_reg); 7785 %} 7786 7787 // Register Arithmetic Shift Right Immediate 7788 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7789 match(Set dst (RShiftI src1 src2)); 7790 7791 size(4); 7792 format %{ "SRA $src1,$src2,$dst" %} 7793 opcode(Assembler::sra_op3, Assembler::arith_op); 7794 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7795 ins_pipe(ialu_reg_imm); 7796 %} 7797 7798 // Register Shift Right Arithmatic Long 7799 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7800 match(Set dst (RShiftL src1 src2)); 7801 7802 size(4); 7803 format %{ "SRAX $src1,$src2,$dst" %} 7804 opcode(Assembler::srax_op3, Assembler::arith_op); 7805 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7806 ins_pipe(ialu_reg_reg); 7807 %} 7808 7809 // Register Shift Left Immediate 7810 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7811 match(Set dst (RShiftL src1 src2)); 7812 7813 size(4); 7814 format %{ "SRAX $src1,$src2,$dst" %} 7815 opcode(Assembler::srax_op3, Assembler::arith_op); 7816 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7817 ins_pipe(ialu_reg_imm); 7818 %} 7819 7820 // Register Shift Right 7821 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7822 match(Set dst (URShiftI src1 src2)); 7823 7824 size(4); 7825 format %{ "SRL $src1,$src2,$dst" %} 7826 opcode(Assembler::srl_op3, Assembler::arith_op); 7827 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7828 ins_pipe(ialu_reg_reg); 7829 %} 7830 7831 // Register Shift Right Immediate 7832 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7833 match(Set dst (URShiftI src1 src2)); 7834 7835 size(4); 7836 format %{ "SRL $src1,$src2,$dst" %} 7837 opcode(Assembler::srl_op3, Assembler::arith_op); 7838 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7839 ins_pipe(ialu_reg_imm); 7840 %} 7841 7842 // Register Shift Right 7843 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7844 match(Set dst (URShiftL src1 src2)); 7845 7846 size(4); 7847 format %{ "SRLX $src1,$src2,$dst" %} 7848 opcode(Assembler::srlx_op3, Assembler::arith_op); 7849 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7850 ins_pipe(ialu_reg_reg); 7851 %} 7852 7853 // Register Shift Right Immediate 7854 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7855 match(Set dst (URShiftL src1 src2)); 7856 7857 size(4); 7858 format %{ "SRLX $src1,$src2,$dst" %} 7859 opcode(Assembler::srlx_op3, Assembler::arith_op); 7860 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7861 ins_pipe(ialu_reg_imm); 7862 %} 7863 7864 // Register Shift Right Immediate with a CastP2X 7865 #ifdef _LP64 7866 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ 7867 match(Set dst (URShiftL (CastP2X src1) src2)); 7868 size(4); 7869 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} 7870 opcode(Assembler::srlx_op3, Assembler::arith_op); 7871 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7872 ins_pipe(ialu_reg_imm); 7873 %} 7874 #else 7875 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{ 7876 match(Set dst (URShiftI (CastP2X src1) src2)); 7877 size(4); 7878 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %} 7879 opcode(Assembler::srl_op3, Assembler::arith_op); 7880 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7881 ins_pipe(ialu_reg_imm); 7882 %} 7883 #endif 7884 7885 7886 //----------Floating Point Arithmetic Instructions----------------------------- 7887 7888 // Add float single precision 7889 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 7890 match(Set dst (AddF src1 src2)); 7891 7892 size(4); 7893 format %{ "FADDS $src1,$src2,$dst" %} 7894 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf); 7895 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7896 ins_pipe(faddF_reg_reg); 7897 %} 7898 7899 // Add float double precision 7900 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 7901 match(Set dst (AddD src1 src2)); 7902 7903 size(4); 7904 format %{ "FADDD $src1,$src2,$dst" %} 7905 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 7906 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7907 ins_pipe(faddD_reg_reg); 7908 %} 7909 7910 // Sub float single precision 7911 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 7912 match(Set dst (SubF src1 src2)); 7913 7914 size(4); 7915 format %{ "FSUBS $src1,$src2,$dst" %} 7916 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf); 7917 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7918 ins_pipe(faddF_reg_reg); 7919 %} 7920 7921 // Sub float double precision 7922 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 7923 match(Set dst (SubD src1 src2)); 7924 7925 size(4); 7926 format %{ "FSUBD $src1,$src2,$dst" %} 7927 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 7928 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7929 ins_pipe(faddD_reg_reg); 7930 %} 7931 7932 // Mul float single precision 7933 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 7934 match(Set dst (MulF src1 src2)); 7935 7936 size(4); 7937 format %{ "FMULS $src1,$src2,$dst" %} 7938 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf); 7939 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7940 ins_pipe(fmulF_reg_reg); 7941 %} 7942 7943 // Mul float double precision 7944 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 7945 match(Set dst (MulD src1 src2)); 7946 7947 size(4); 7948 format %{ "FMULD $src1,$src2,$dst" %} 7949 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 7950 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7951 ins_pipe(fmulD_reg_reg); 7952 %} 7953 7954 // Div float single precision 7955 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 7956 match(Set dst (DivF src1 src2)); 7957 7958 size(4); 7959 format %{ "FDIVS $src1,$src2,$dst" %} 7960 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf); 7961 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7962 ins_pipe(fdivF_reg_reg); 7963 %} 7964 7965 // Div float double precision 7966 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 7967 match(Set dst (DivD src1 src2)); 7968 7969 size(4); 7970 format %{ "FDIVD $src1,$src2,$dst" %} 7971 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf); 7972 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7973 ins_pipe(fdivD_reg_reg); 7974 %} 7975 7976 // Absolute float double precision 7977 instruct absD_reg(regD dst, regD src) %{ 7978 match(Set dst (AbsD src)); 7979 7980 format %{ "FABSd $src,$dst" %} 7981 ins_encode(fabsd(dst, src)); 7982 ins_pipe(faddD_reg); 7983 %} 7984 7985 // Absolute float single precision 7986 instruct absF_reg(regF dst, regF src) %{ 7987 match(Set dst (AbsF src)); 7988 7989 format %{ "FABSs $src,$dst" %} 7990 ins_encode(fabss(dst, src)); 7991 ins_pipe(faddF_reg); 7992 %} 7993 7994 instruct negF_reg(regF dst, regF src) %{ 7995 match(Set dst (NegF src)); 7996 7997 size(4); 7998 format %{ "FNEGs $src,$dst" %} 7999 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); 8000 ins_encode(form3_opf_rs2F_rdF(src, dst)); 8001 ins_pipe(faddF_reg); 8002 %} 8003 8004 instruct negD_reg(regD dst, regD src) %{ 8005 match(Set dst (NegD src)); 8006 8007 format %{ "FNEGd $src,$dst" %} 8008 ins_encode(fnegd(dst, src)); 8009 ins_pipe(faddD_reg); 8010 %} 8011 8012 // Sqrt float double precision 8013 instruct sqrtF_reg_reg(regF dst, regF src) %{ 8014 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 8015 8016 size(4); 8017 format %{ "FSQRTS $src,$dst" %} 8018 ins_encode(fsqrts(dst, src)); 8019 ins_pipe(fdivF_reg_reg); 8020 %} 8021 8022 // Sqrt float double precision 8023 instruct sqrtD_reg_reg(regD dst, regD src) %{ 8024 match(Set dst (SqrtD src)); 8025 8026 size(4); 8027 format %{ "FSQRTD $src,$dst" %} 8028 ins_encode(fsqrtd(dst, src)); 8029 ins_pipe(fdivD_reg_reg); 8030 %} 8031 8032 //----------Logical Instructions----------------------------------------------- 8033 // And Instructions 8034 // Register And 8035 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8036 match(Set dst (AndI src1 src2)); 8037 8038 size(4); 8039 format %{ "AND $src1,$src2,$dst" %} 8040 opcode(Assembler::and_op3, Assembler::arith_op); 8041 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8042 ins_pipe(ialu_reg_reg); 8043 %} 8044 8045 // Immediate And 8046 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8047 match(Set dst (AndI src1 src2)); 8048 8049 size(4); 8050 format %{ "AND $src1,$src2,$dst" %} 8051 opcode(Assembler::and_op3, Assembler::arith_op); 8052 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8053 ins_pipe(ialu_reg_imm); 8054 %} 8055 8056 // Register And Long 8057 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8058 match(Set dst (AndL src1 src2)); 8059 8060 ins_cost(DEFAULT_COST); 8061 size(4); 8062 format %{ "AND $src1,$src2,$dst\t! long" %} 8063 opcode(Assembler::and_op3, Assembler::arith_op); 8064 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8065 ins_pipe(ialu_reg_reg); 8066 %} 8067 8068 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8069 match(Set dst (AndL src1 con)); 8070 8071 ins_cost(DEFAULT_COST); 8072 size(4); 8073 format %{ "AND $src1,$con,$dst\t! long" %} 8074 opcode(Assembler::and_op3, Assembler::arith_op); 8075 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8076 ins_pipe(ialu_reg_imm); 8077 %} 8078 8079 // Or Instructions 8080 // Register Or 8081 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8082 match(Set dst (OrI src1 src2)); 8083 8084 size(4); 8085 format %{ "OR $src1,$src2,$dst" %} 8086 opcode(Assembler::or_op3, Assembler::arith_op); 8087 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8088 ins_pipe(ialu_reg_reg); 8089 %} 8090 8091 // Immediate Or 8092 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8093 match(Set dst (OrI src1 src2)); 8094 8095 size(4); 8096 format %{ "OR $src1,$src2,$dst" %} 8097 opcode(Assembler::or_op3, Assembler::arith_op); 8098 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8099 ins_pipe(ialu_reg_imm); 8100 %} 8101 8102 // Register Or Long 8103 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8104 match(Set dst (OrL src1 src2)); 8105 8106 ins_cost(DEFAULT_COST); 8107 size(4); 8108 format %{ "OR $src1,$src2,$dst\t! long" %} 8109 opcode(Assembler::or_op3, Assembler::arith_op); 8110 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8111 ins_pipe(ialu_reg_reg); 8112 %} 8113 8114 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8115 match(Set dst (OrL src1 con)); 8116 ins_cost(DEFAULT_COST*2); 8117 8118 ins_cost(DEFAULT_COST); 8119 size(4); 8120 format %{ "OR $src1,$con,$dst\t! long" %} 8121 opcode(Assembler::or_op3, Assembler::arith_op); 8122 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8123 ins_pipe(ialu_reg_imm); 8124 %} 8125 8126 #ifndef _LP64 8127 8128 // Use sp_ptr_RegP to match G2 (TLS register) without spilling. 8129 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{ 8130 match(Set dst (OrI src1 (CastP2X src2))); 8131 8132 size(4); 8133 format %{ "OR $src1,$src2,$dst" %} 8134 opcode(Assembler::or_op3, Assembler::arith_op); 8135 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8136 ins_pipe(ialu_reg_reg); 8137 %} 8138 8139 #else 8140 8141 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ 8142 match(Set dst (OrL src1 (CastP2X src2))); 8143 8144 ins_cost(DEFAULT_COST); 8145 size(4); 8146 format %{ "OR $src1,$src2,$dst\t! long" %} 8147 opcode(Assembler::or_op3, Assembler::arith_op); 8148 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8149 ins_pipe(ialu_reg_reg); 8150 %} 8151 8152 #endif 8153 8154 // Xor Instructions 8155 // Register Xor 8156 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8157 match(Set dst (XorI src1 src2)); 8158 8159 size(4); 8160 format %{ "XOR $src1,$src2,$dst" %} 8161 opcode(Assembler::xor_op3, Assembler::arith_op); 8162 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8163 ins_pipe(ialu_reg_reg); 8164 %} 8165 8166 // Immediate Xor 8167 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8168 match(Set dst (XorI src1 src2)); 8169 8170 size(4); 8171 format %{ "XOR $src1,$src2,$dst" %} 8172 opcode(Assembler::xor_op3, Assembler::arith_op); 8173 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8174 ins_pipe(ialu_reg_imm); 8175 %} 8176 8177 // Register Xor Long 8178 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8179 match(Set dst (XorL src1 src2)); 8180 8181 ins_cost(DEFAULT_COST); 8182 size(4); 8183 format %{ "XOR $src1,$src2,$dst\t! long" %} 8184 opcode(Assembler::xor_op3, Assembler::arith_op); 8185 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8186 ins_pipe(ialu_reg_reg); 8187 %} 8188 8189 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8190 match(Set dst (XorL src1 con)); 8191 8192 ins_cost(DEFAULT_COST); 8193 size(4); 8194 format %{ "XOR $src1,$con,$dst\t! long" %} 8195 opcode(Assembler::xor_op3, Assembler::arith_op); 8196 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8197 ins_pipe(ialu_reg_imm); 8198 %} 8199 8200 //----------Convert to Boolean------------------------------------------------- 8201 // Nice hack for 32-bit tests but doesn't work for 8202 // 64-bit pointers. 8203 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{ 8204 match(Set dst (Conv2B src)); 8205 effect( KILL ccr ); 8206 ins_cost(DEFAULT_COST*2); 8207 format %{ "CMP R_G0,$src\n\t" 8208 "ADDX R_G0,0,$dst" %} 8209 ins_encode( enc_to_bool( src, dst ) ); 8210 ins_pipe(ialu_reg_ialu); 8211 %} 8212 8213 #ifndef _LP64 8214 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{ 8215 match(Set dst (Conv2B src)); 8216 effect( KILL ccr ); 8217 ins_cost(DEFAULT_COST*2); 8218 format %{ "CMP R_G0,$src\n\t" 8219 "ADDX R_G0,0,$dst" %} 8220 ins_encode( enc_to_bool( src, dst ) ); 8221 ins_pipe(ialu_reg_ialu); 8222 %} 8223 #else 8224 instruct convP2B( iRegI dst, iRegP src ) %{ 8225 match(Set dst (Conv2B src)); 8226 ins_cost(DEFAULT_COST*2); 8227 format %{ "MOV $src,$dst\n\t" 8228 "MOVRNZ $src,1,$dst" %} 8229 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); 8230 ins_pipe(ialu_clr_and_mover); 8231 %} 8232 #endif 8233 8234 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ 8235 match(Set dst (CmpLTMask src zero)); 8236 effect(KILL ccr); 8237 size(4); 8238 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %} 8239 ins_encode %{ 8240 __ sra($src$$Register, 31, $dst$$Register); 8241 %} 8242 ins_pipe(ialu_reg_imm); 8243 %} 8244 8245 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{ 8246 match(Set dst (CmpLTMask p q)); 8247 effect( KILL ccr ); 8248 ins_cost(DEFAULT_COST*4); 8249 format %{ "CMP $p,$q\n\t" 8250 "MOV #0,$dst\n\t" 8251 "BLT,a .+8\n\t" 8252 "MOV #-1,$dst" %} 8253 ins_encode( enc_ltmask(p,q,dst) ); 8254 ins_pipe(ialu_reg_reg_ialu); 8255 %} 8256 8257 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ 8258 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 8259 effect(KILL ccr, TEMP tmp); 8260 ins_cost(DEFAULT_COST*3); 8261 8262 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" 8263 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" 8264 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} 8265 ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) ); 8266 ins_pipe( cadd_cmpltmask ); 8267 %} 8268 8269 8270 //----------------------------------------------------------------- 8271 // Direct raw moves between float and general registers using VIS3. 8272 8273 // ins_pipe(faddF_reg); 8274 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{ 8275 predicate(UseVIS >= 3); 8276 match(Set dst (MoveF2I src)); 8277 8278 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %} 8279 ins_encode %{ 8280 __ movstouw($src$$FloatRegister, $dst$$Register); 8281 %} 8282 ins_pipe(ialu_reg_reg); 8283 %} 8284 8285 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{ 8286 predicate(UseVIS >= 3); 8287 match(Set dst (MoveI2F src)); 8288 8289 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %} 8290 ins_encode %{ 8291 __ movwtos($src$$Register, $dst$$FloatRegister); 8292 %} 8293 ins_pipe(ialu_reg_reg); 8294 %} 8295 8296 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{ 8297 predicate(UseVIS >= 3); 8298 match(Set dst (MoveD2L src)); 8299 8300 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %} 8301 ins_encode %{ 8302 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register); 8303 %} 8304 ins_pipe(ialu_reg_reg); 8305 %} 8306 8307 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{ 8308 predicate(UseVIS >= 3); 8309 match(Set dst (MoveL2D src)); 8310 8311 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %} 8312 ins_encode %{ 8313 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg)); 8314 %} 8315 ins_pipe(ialu_reg_reg); 8316 %} 8317 8318 8319 // Raw moves between float and general registers using stack. 8320 8321 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ 8322 match(Set dst (MoveF2I src)); 8323 effect(DEF dst, USE src); 8324 ins_cost(MEMORY_REF_COST); 8325 8326 size(4); 8327 format %{ "LDUW $src,$dst\t! MoveF2I" %} 8328 opcode(Assembler::lduw_op3); 8329 ins_encode(simple_form3_mem_reg( src, dst ) ); 8330 ins_pipe(iload_mem); 8331 %} 8332 8333 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 8334 match(Set dst (MoveI2F src)); 8335 effect(DEF dst, USE src); 8336 ins_cost(MEMORY_REF_COST); 8337 8338 size(4); 8339 format %{ "LDF $src,$dst\t! MoveI2F" %} 8340 opcode(Assembler::ldf_op3); 8341 ins_encode(simple_form3_mem_reg(src, dst)); 8342 ins_pipe(floadF_stk); 8343 %} 8344 8345 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{ 8346 match(Set dst (MoveD2L src)); 8347 effect(DEF dst, USE src); 8348 ins_cost(MEMORY_REF_COST); 8349 8350 size(4); 8351 format %{ "LDX $src,$dst\t! MoveD2L" %} 8352 opcode(Assembler::ldx_op3); 8353 ins_encode(simple_form3_mem_reg( src, dst ) ); 8354 ins_pipe(iload_mem); 8355 %} 8356 8357 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 8358 match(Set dst (MoveL2D src)); 8359 effect(DEF dst, USE src); 8360 ins_cost(MEMORY_REF_COST); 8361 8362 size(4); 8363 format %{ "LDDF $src,$dst\t! MoveL2D" %} 8364 opcode(Assembler::lddf_op3); 8365 ins_encode(simple_form3_mem_reg(src, dst)); 8366 ins_pipe(floadD_stk); 8367 %} 8368 8369 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 8370 match(Set dst (MoveF2I src)); 8371 effect(DEF dst, USE src); 8372 ins_cost(MEMORY_REF_COST); 8373 8374 size(4); 8375 format %{ "STF $src,$dst\t! MoveF2I" %} 8376 opcode(Assembler::stf_op3); 8377 ins_encode(simple_form3_mem_reg(dst, src)); 8378 ins_pipe(fstoreF_stk_reg); 8379 %} 8380 8381 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ 8382 match(Set dst (MoveI2F src)); 8383 effect(DEF dst, USE src); 8384 ins_cost(MEMORY_REF_COST); 8385 8386 size(4); 8387 format %{ "STW $src,$dst\t! MoveI2F" %} 8388 opcode(Assembler::stw_op3); 8389 ins_encode(simple_form3_mem_reg( dst, src ) ); 8390 ins_pipe(istore_mem_reg); 8391 %} 8392 8393 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 8394 match(Set dst (MoveD2L src)); 8395 effect(DEF dst, USE src); 8396 ins_cost(MEMORY_REF_COST); 8397 8398 size(4); 8399 format %{ "STDF $src,$dst\t! MoveD2L" %} 8400 opcode(Assembler::stdf_op3); 8401 ins_encode(simple_form3_mem_reg(dst, src)); 8402 ins_pipe(fstoreD_stk_reg); 8403 %} 8404 8405 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ 8406 match(Set dst (MoveL2D src)); 8407 effect(DEF dst, USE src); 8408 ins_cost(MEMORY_REF_COST); 8409 8410 size(4); 8411 format %{ "STX $src,$dst\t! MoveL2D" %} 8412 opcode(Assembler::stx_op3); 8413 ins_encode(simple_form3_mem_reg( dst, src ) ); 8414 ins_pipe(istore_mem_reg); 8415 %} 8416 8417 8418 //----------Arithmetic Conversion Instructions--------------------------------- 8419 // The conversions operations are all Alpha sorted. Please keep it that way! 8420 8421 instruct convD2F_reg(regF dst, regD src) %{ 8422 match(Set dst (ConvD2F src)); 8423 size(4); 8424 format %{ "FDTOS $src,$dst" %} 8425 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); 8426 ins_encode(form3_opf_rs2D_rdF(src, dst)); 8427 ins_pipe(fcvtD2F); 8428 %} 8429 8430 8431 // Convert a double to an int in a float register. 8432 // If the double is a NAN, stuff a zero in instead. 8433 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ 8434 effect(DEF dst, USE src, KILL fcc0); 8435 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8436 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8437 "FDTOI $src,$dst\t! convert in delay slot\n\t" 8438 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8439 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8440 "skip:" %} 8441 ins_encode(form_d2i_helper(src,dst)); 8442 ins_pipe(fcvtD2I); 8443 %} 8444 8445 instruct convD2I_stk(stackSlotI dst, regD src) %{ 8446 match(Set dst (ConvD2I src)); 8447 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8448 expand %{ 8449 regF tmp; 8450 convD2I_helper(tmp, src); 8451 regF_to_stkI(dst, tmp); 8452 %} 8453 %} 8454 8455 instruct convD2I_reg(iRegI dst, regD src) %{ 8456 predicate(UseVIS >= 3); 8457 match(Set dst (ConvD2I src)); 8458 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8459 expand %{ 8460 regF tmp; 8461 convD2I_helper(tmp, src); 8462 MoveF2I_reg_reg(dst, tmp); 8463 %} 8464 %} 8465 8466 8467 // Convert a double to a long in a double register. 8468 // If the double is a NAN, stuff a zero in instead. 8469 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ 8470 effect(DEF dst, USE src, KILL fcc0); 8471 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8472 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8473 "FDTOX $src,$dst\t! convert in delay slot\n\t" 8474 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8475 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8476 "skip:" %} 8477 ins_encode(form_d2l_helper(src,dst)); 8478 ins_pipe(fcvtD2L); 8479 %} 8480 8481 instruct convD2L_stk(stackSlotL dst, regD src) %{ 8482 match(Set dst (ConvD2L src)); 8483 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8484 expand %{ 8485 regD tmp; 8486 convD2L_helper(tmp, src); 8487 regD_to_stkL(dst, tmp); 8488 %} 8489 %} 8490 8491 instruct convD2L_reg(iRegL dst, regD src) %{ 8492 predicate(UseVIS >= 3); 8493 match(Set dst (ConvD2L src)); 8494 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8495 expand %{ 8496 regD tmp; 8497 convD2L_helper(tmp, src); 8498 MoveD2L_reg_reg(dst, tmp); 8499 %} 8500 %} 8501 8502 8503 instruct convF2D_reg(regD dst, regF src) %{ 8504 match(Set dst (ConvF2D src)); 8505 format %{ "FSTOD $src,$dst" %} 8506 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf); 8507 ins_encode(form3_opf_rs2F_rdD(src, dst)); 8508 ins_pipe(fcvtF2D); 8509 %} 8510 8511 8512 // Convert a float to an int in a float register. 8513 // If the float is a NAN, stuff a zero in instead. 8514 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ 8515 effect(DEF dst, USE src, KILL fcc0); 8516 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8517 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8518 "FSTOI $src,$dst\t! convert in delay slot\n\t" 8519 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8520 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8521 "skip:" %} 8522 ins_encode(form_f2i_helper(src,dst)); 8523 ins_pipe(fcvtF2I); 8524 %} 8525 8526 instruct convF2I_stk(stackSlotI dst, regF src) %{ 8527 match(Set dst (ConvF2I src)); 8528 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8529 expand %{ 8530 regF tmp; 8531 convF2I_helper(tmp, src); 8532 regF_to_stkI(dst, tmp); 8533 %} 8534 %} 8535 8536 instruct convF2I_reg(iRegI dst, regF src) %{ 8537 predicate(UseVIS >= 3); 8538 match(Set dst (ConvF2I src)); 8539 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8540 expand %{ 8541 regF tmp; 8542 convF2I_helper(tmp, src); 8543 MoveF2I_reg_reg(dst, tmp); 8544 %} 8545 %} 8546 8547 8548 // Convert a float to a long in a float register. 8549 // If the float is a NAN, stuff a zero in instead. 8550 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ 8551 effect(DEF dst, USE src, KILL fcc0); 8552 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8553 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8554 "FSTOX $src,$dst\t! convert in delay slot\n\t" 8555 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8556 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8557 "skip:" %} 8558 ins_encode(form_f2l_helper(src,dst)); 8559 ins_pipe(fcvtF2L); 8560 %} 8561 8562 instruct convF2L_stk(stackSlotL dst, regF src) %{ 8563 match(Set dst (ConvF2L src)); 8564 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8565 expand %{ 8566 regD tmp; 8567 convF2L_helper(tmp, src); 8568 regD_to_stkL(dst, tmp); 8569 %} 8570 %} 8571 8572 instruct convF2L_reg(iRegL dst, regF src) %{ 8573 predicate(UseVIS >= 3); 8574 match(Set dst (ConvF2L src)); 8575 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8576 expand %{ 8577 regD tmp; 8578 convF2L_helper(tmp, src); 8579 MoveD2L_reg_reg(dst, tmp); 8580 %} 8581 %} 8582 8583 8584 instruct convI2D_helper(regD dst, regF tmp) %{ 8585 effect(USE tmp, DEF dst); 8586 format %{ "FITOD $tmp,$dst" %} 8587 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8588 ins_encode(form3_opf_rs2F_rdD(tmp, dst)); 8589 ins_pipe(fcvtI2D); 8590 %} 8591 8592 instruct convI2D_stk(stackSlotI src, regD dst) %{ 8593 match(Set dst (ConvI2D src)); 8594 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8595 expand %{ 8596 regF tmp; 8597 stkI_to_regF(tmp, src); 8598 convI2D_helper(dst, tmp); 8599 %} 8600 %} 8601 8602 instruct convI2D_reg(regD_low dst, iRegI src) %{ 8603 predicate(UseVIS >= 3); 8604 match(Set dst (ConvI2D src)); 8605 expand %{ 8606 regF tmp; 8607 MoveI2F_reg_reg(tmp, src); 8608 convI2D_helper(dst, tmp); 8609 %} 8610 %} 8611 8612 instruct convI2D_mem(regD_low dst, memory mem) %{ 8613 match(Set dst (ConvI2D (LoadI mem))); 8614 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8615 size(8); 8616 format %{ "LDF $mem,$dst\n\t" 8617 "FITOD $dst,$dst" %} 8618 opcode(Assembler::ldf_op3, Assembler::fitod_opf); 8619 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8620 ins_pipe(floadF_mem); 8621 %} 8622 8623 8624 instruct convI2F_helper(regF dst, regF tmp) %{ 8625 effect(DEF dst, USE tmp); 8626 format %{ "FITOS $tmp,$dst" %} 8627 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf); 8628 ins_encode(form3_opf_rs2F_rdF(tmp, dst)); 8629 ins_pipe(fcvtI2F); 8630 %} 8631 8632 instruct convI2F_stk(regF dst, stackSlotI src) %{ 8633 match(Set dst (ConvI2F src)); 8634 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8635 expand %{ 8636 regF tmp; 8637 stkI_to_regF(tmp,src); 8638 convI2F_helper(dst, tmp); 8639 %} 8640 %} 8641 8642 instruct convI2F_reg(regF dst, iRegI src) %{ 8643 predicate(UseVIS >= 3); 8644 match(Set dst (ConvI2F src)); 8645 ins_cost(DEFAULT_COST); 8646 expand %{ 8647 regF tmp; 8648 MoveI2F_reg_reg(tmp, src); 8649 convI2F_helper(dst, tmp); 8650 %} 8651 %} 8652 8653 instruct convI2F_mem( regF dst, memory mem ) %{ 8654 match(Set dst (ConvI2F (LoadI mem))); 8655 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8656 size(8); 8657 format %{ "LDF $mem,$dst\n\t" 8658 "FITOS $dst,$dst" %} 8659 opcode(Assembler::ldf_op3, Assembler::fitos_opf); 8660 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8661 ins_pipe(floadF_mem); 8662 %} 8663 8664 8665 instruct convI2L_reg(iRegL dst, iRegI src) %{ 8666 match(Set dst (ConvI2L src)); 8667 size(4); 8668 format %{ "SRA $src,0,$dst\t! int->long" %} 8669 opcode(Assembler::sra_op3, Assembler::arith_op); 8670 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8671 ins_pipe(ialu_reg_reg); 8672 %} 8673 8674 // Zero-extend convert int to long 8675 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ 8676 match(Set dst (AndL (ConvI2L src) mask) ); 8677 size(4); 8678 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} 8679 opcode(Assembler::srl_op3, Assembler::arith_op); 8680 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8681 ins_pipe(ialu_reg_reg); 8682 %} 8683 8684 // Zero-extend long 8685 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ 8686 match(Set dst (AndL src mask) ); 8687 size(4); 8688 format %{ "SRL $src,0,$dst\t! zero-extend long" %} 8689 opcode(Assembler::srl_op3, Assembler::arith_op); 8690 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8691 ins_pipe(ialu_reg_reg); 8692 %} 8693 8694 8695 //----------- 8696 // Long to Double conversion using V8 opcodes. 8697 // Still useful because cheetah traps and becomes 8698 // amazingly slow for some common numbers. 8699 8700 // Magic constant, 0x43300000 8701 instruct loadConI_x43300000(iRegI dst) %{ 8702 effect(DEF dst); 8703 size(4); 8704 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %} 8705 ins_encode(SetHi22(0x43300000, dst)); 8706 ins_pipe(ialu_none); 8707 %} 8708 8709 // Magic constant, 0x41f00000 8710 instruct loadConI_x41f00000(iRegI dst) %{ 8711 effect(DEF dst); 8712 size(4); 8713 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %} 8714 ins_encode(SetHi22(0x41f00000, dst)); 8715 ins_pipe(ialu_none); 8716 %} 8717 8718 // Construct a double from two float halves 8719 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{ 8720 effect(DEF dst, USE src1, USE src2); 8721 size(8); 8722 format %{ "FMOVS $src1.hi,$dst.hi\n\t" 8723 "FMOVS $src2.lo,$dst.lo" %} 8724 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); 8725 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); 8726 ins_pipe(faddD_reg_reg); 8727 %} 8728 8729 // Convert integer in high half of a double register (in the lower half of 8730 // the double register file) to double 8731 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{ 8732 effect(DEF dst, USE src); 8733 size(4); 8734 format %{ "FITOD $src,$dst" %} 8735 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8736 ins_encode(form3_opf_rs2D_rdD(src, dst)); 8737 ins_pipe(fcvtLHi2D); 8738 %} 8739 8740 // Add float double precision 8741 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{ 8742 effect(DEF dst, USE src1, USE src2); 8743 size(4); 8744 format %{ "FADDD $src1,$src2,$dst" %} 8745 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 8746 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8747 ins_pipe(faddD_reg_reg); 8748 %} 8749 8750 // Sub float double precision 8751 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{ 8752 effect(DEF dst, USE src1, USE src2); 8753 size(4); 8754 format %{ "FSUBD $src1,$src2,$dst" %} 8755 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 8756 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8757 ins_pipe(faddD_reg_reg); 8758 %} 8759 8760 // Mul float double precision 8761 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{ 8762 effect(DEF dst, USE src1, USE src2); 8763 size(4); 8764 format %{ "FMULD $src1,$src2,$dst" %} 8765 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 8766 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8767 ins_pipe(fmulD_reg_reg); 8768 %} 8769 8770 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{ 8771 match(Set dst (ConvL2D src)); 8772 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); 8773 8774 expand %{ 8775 regD_low tmpsrc; 8776 iRegI ix43300000; 8777 iRegI ix41f00000; 8778 stackSlotL lx43300000; 8779 stackSlotL lx41f00000; 8780 regD_low dx43300000; 8781 regD dx41f00000; 8782 regD tmp1; 8783 regD_low tmp2; 8784 regD tmp3; 8785 regD tmp4; 8786 8787 stkL_to_regD(tmpsrc, src); 8788 8789 loadConI_x43300000(ix43300000); 8790 loadConI_x41f00000(ix41f00000); 8791 regI_to_stkLHi(lx43300000, ix43300000); 8792 regI_to_stkLHi(lx41f00000, ix41f00000); 8793 stkL_to_regD(dx43300000, lx43300000); 8794 stkL_to_regD(dx41f00000, lx41f00000); 8795 8796 convI2D_regDHi_regD(tmp1, tmpsrc); 8797 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc); 8798 subD_regD_regD(tmp3, tmp2, dx43300000); 8799 mulD_regD_regD(tmp4, tmp1, dx41f00000); 8800 addD_regD_regD(dst, tmp3, tmp4); 8801 %} 8802 %} 8803 8804 // Long to Double conversion using fast fxtof 8805 instruct convL2D_helper(regD dst, regD tmp) %{ 8806 effect(DEF dst, USE tmp); 8807 size(4); 8808 format %{ "FXTOD $tmp,$dst" %} 8809 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf); 8810 ins_encode(form3_opf_rs2D_rdD(tmp, dst)); 8811 ins_pipe(fcvtL2D); 8812 %} 8813 8814 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{ 8815 predicate(VM_Version::has_fast_fxtof()); 8816 match(Set dst (ConvL2D src)); 8817 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); 8818 expand %{ 8819 regD tmp; 8820 stkL_to_regD(tmp, src); 8821 convL2D_helper(dst, tmp); 8822 %} 8823 %} 8824 8825 instruct convL2D_reg(regD dst, iRegL src) %{ 8826 predicate(UseVIS >= 3); 8827 match(Set dst (ConvL2D src)); 8828 expand %{ 8829 regD tmp; 8830 MoveL2D_reg_reg(tmp, src); 8831 convL2D_helper(dst, tmp); 8832 %} 8833 %} 8834 8835 // Long to Float conversion using fast fxtof 8836 instruct convL2F_helper(regF dst, regD tmp) %{ 8837 effect(DEF dst, USE tmp); 8838 size(4); 8839 format %{ "FXTOS $tmp,$dst" %} 8840 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf); 8841 ins_encode(form3_opf_rs2D_rdF(tmp, dst)); 8842 ins_pipe(fcvtL2F); 8843 %} 8844 8845 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{ 8846 match(Set dst (ConvL2F src)); 8847 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8848 expand %{ 8849 regD tmp; 8850 stkL_to_regD(tmp, src); 8851 convL2F_helper(dst, tmp); 8852 %} 8853 %} 8854 8855 instruct convL2F_reg(regF dst, iRegL src) %{ 8856 predicate(UseVIS >= 3); 8857 match(Set dst (ConvL2F src)); 8858 ins_cost(DEFAULT_COST); 8859 expand %{ 8860 regD tmp; 8861 MoveL2D_reg_reg(tmp, src); 8862 convL2F_helper(dst, tmp); 8863 %} 8864 %} 8865 8866 //----------- 8867 8868 instruct convL2I_reg(iRegI dst, iRegL src) %{ 8869 match(Set dst (ConvL2I src)); 8870 #ifndef _LP64 8871 format %{ "MOV $src.lo,$dst\t! long->int" %} 8872 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) ); 8873 ins_pipe(ialu_move_reg_I_to_L); 8874 #else 8875 size(4); 8876 format %{ "SRA $src,R_G0,$dst\t! long->int" %} 8877 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); 8878 ins_pipe(ialu_reg); 8879 #endif 8880 %} 8881 8882 // Register Shift Right Immediate 8883 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{ 8884 match(Set dst (ConvL2I (RShiftL src cnt))); 8885 8886 size(4); 8887 format %{ "SRAX $src,$cnt,$dst" %} 8888 opcode(Assembler::srax_op3, Assembler::arith_op); 8889 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) ); 8890 ins_pipe(ialu_reg_imm); 8891 %} 8892 8893 // Replicate scalar to packed byte values in Double register 8894 instruct Repl8B_reg_helper(iRegL dst, iRegI src) %{ 8895 effect(DEF dst, USE src); 8896 format %{ "SLLX $src,56,$dst\n\t" 8897 "SRLX $dst, 8,O7\n\t" 8898 "OR $dst,O7,$dst\n\t" 8899 "SRLX $dst,16,O7\n\t" 8900 "OR $dst,O7,$dst\n\t" 8901 "SRLX $dst,32,O7\n\t" 8902 "OR $dst,O7,$dst\t! replicate8B" %} 8903 ins_encode( enc_repl8b(src, dst)); 8904 ins_pipe(ialu_reg); 8905 %} 8906 8907 // Replicate scalar to packed byte values in Double register 8908 instruct Repl8B_reg(stackSlotD dst, iRegI src) %{ 8909 match(Set dst (Replicate8B src)); 8910 expand %{ 8911 iRegL tmp; 8912 Repl8B_reg_helper(tmp, src); 8913 regL_to_stkD(dst, tmp); 8914 %} 8915 %} 8916 8917 // Replicate scalar constant to packed byte values in Double register 8918 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 8919 match(Set dst (Replicate8B con)); 8920 effect(KILL tmp); 8921 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 8922 ins_encode %{ 8923 // XXX This is a quick fix for 6833573. 8924 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 8925 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 8926 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 8927 %} 8928 ins_pipe(loadConFD); 8929 %} 8930 8931 // Replicate scalar to packed char values into stack slot 8932 instruct Repl4C_reg_helper(iRegL dst, iRegI src) %{ 8933 effect(DEF dst, USE src); 8934 format %{ "SLLX $src,48,$dst\n\t" 8935 "SRLX $dst,16,O7\n\t" 8936 "OR $dst,O7,$dst\n\t" 8937 "SRLX $dst,32,O7\n\t" 8938 "OR $dst,O7,$dst\t! replicate4C" %} 8939 ins_encode( enc_repl4s(src, dst) ); 8940 ins_pipe(ialu_reg); 8941 %} 8942 8943 // Replicate scalar to packed char values into stack slot 8944 instruct Repl4C_reg(stackSlotD dst, iRegI src) %{ 8945 match(Set dst (Replicate4C src)); 8946 expand %{ 8947 iRegL tmp; 8948 Repl4C_reg_helper(tmp, src); 8949 regL_to_stkD(dst, tmp); 8950 %} 8951 %} 8952 8953 // Replicate scalar constant to packed char values in Double register 8954 instruct Repl4C_immI(regD dst, immI con, o7RegI tmp) %{ 8955 match(Set dst (Replicate4C con)); 8956 effect(KILL tmp); 8957 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4C($con)" %} 8958 ins_encode %{ 8959 // XXX This is a quick fix for 6833573. 8960 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 8961 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 8962 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 8963 %} 8964 ins_pipe(loadConFD); 8965 %} 8966 8967 // Replicate scalar to packed short values into stack slot 8968 instruct Repl4S_reg_helper(iRegL dst, iRegI src) %{ 8969 effect(DEF dst, USE src); 8970 format %{ "SLLX $src,48,$dst\n\t" 8971 "SRLX $dst,16,O7\n\t" 8972 "OR $dst,O7,$dst\n\t" 8973 "SRLX $dst,32,O7\n\t" 8974 "OR $dst,O7,$dst\t! replicate4S" %} 8975 ins_encode( enc_repl4s(src, dst) ); 8976 ins_pipe(ialu_reg); 8977 %} 8978 8979 // Replicate scalar to packed short values into stack slot 8980 instruct Repl4S_reg(stackSlotD dst, iRegI src) %{ 8981 match(Set dst (Replicate4S src)); 8982 expand %{ 8983 iRegL tmp; 8984 Repl4S_reg_helper(tmp, src); 8985 regL_to_stkD(dst, tmp); 8986 %} 8987 %} 8988 8989 // Replicate scalar constant to packed short values in Double register 8990 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 8991 match(Set dst (Replicate4S con)); 8992 effect(KILL tmp); 8993 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 8994 ins_encode %{ 8995 // XXX This is a quick fix for 6833573. 8996 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 8997 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 8998 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 8999 %} 9000 ins_pipe(loadConFD); 9001 %} 9002 9003 // Replicate scalar to packed int values in Double register 9004 instruct Repl2I_reg_helper(iRegL dst, iRegI src) %{ 9005 effect(DEF dst, USE src); 9006 format %{ "SLLX $src,32,$dst\n\t" 9007 "SRLX $dst,32,O7\n\t" 9008 "OR $dst,O7,$dst\t! replicate2I" %} 9009 ins_encode( enc_repl2i(src, dst)); 9010 ins_pipe(ialu_reg); 9011 %} 9012 9013 // Replicate scalar to packed int values in Double register 9014 instruct Repl2I_reg(stackSlotD dst, iRegI src) %{ 9015 match(Set dst (Replicate2I src)); 9016 expand %{ 9017 iRegL tmp; 9018 Repl2I_reg_helper(tmp, src); 9019 regL_to_stkD(dst, tmp); 9020 %} 9021 %} 9022 9023 // Replicate scalar zero constant to packed int values in Double register 9024 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 9025 match(Set dst (Replicate2I con)); 9026 effect(KILL tmp); 9027 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 9028 ins_encode %{ 9029 // XXX This is a quick fix for 6833573. 9030 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 9031 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 9032 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 9033 %} 9034 ins_pipe(loadConFD); 9035 %} 9036 9037 //----------Control Flow Instructions------------------------------------------ 9038 // Compare Instructions 9039 // Compare Integers 9040 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{ 9041 match(Set icc (CmpI op1 op2)); 9042 effect( DEF icc, USE op1, USE op2 ); 9043 9044 size(4); 9045 format %{ "CMP $op1,$op2" %} 9046 opcode(Assembler::subcc_op3, Assembler::arith_op); 9047 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9048 ins_pipe(ialu_cconly_reg_reg); 9049 %} 9050 9051 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{ 9052 match(Set icc (CmpU op1 op2)); 9053 9054 size(4); 9055 format %{ "CMP $op1,$op2\t! unsigned" %} 9056 opcode(Assembler::subcc_op3, Assembler::arith_op); 9057 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9058 ins_pipe(ialu_cconly_reg_reg); 9059 %} 9060 9061 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{ 9062 match(Set icc (CmpI op1 op2)); 9063 effect( DEF icc, USE op1 ); 9064 9065 size(4); 9066 format %{ "CMP $op1,$op2" %} 9067 opcode(Assembler::subcc_op3, Assembler::arith_op); 9068 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9069 ins_pipe(ialu_cconly_reg_imm); 9070 %} 9071 9072 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{ 9073 match(Set icc (CmpI (AndI op1 op2) zero)); 9074 9075 size(4); 9076 format %{ "BTST $op2,$op1" %} 9077 opcode(Assembler::andcc_op3, Assembler::arith_op); 9078 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9079 ins_pipe(ialu_cconly_reg_reg_zero); 9080 %} 9081 9082 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{ 9083 match(Set icc (CmpI (AndI op1 op2) zero)); 9084 9085 size(4); 9086 format %{ "BTST $op2,$op1" %} 9087 opcode(Assembler::andcc_op3, Assembler::arith_op); 9088 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9089 ins_pipe(ialu_cconly_reg_imm_zero); 9090 %} 9091 9092 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{ 9093 match(Set xcc (CmpL op1 op2)); 9094 effect( DEF xcc, USE op1, USE op2 ); 9095 9096 size(4); 9097 format %{ "CMP $op1,$op2\t\t! long" %} 9098 opcode(Assembler::subcc_op3, Assembler::arith_op); 9099 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9100 ins_pipe(ialu_cconly_reg_reg); 9101 %} 9102 9103 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{ 9104 match(Set xcc (CmpL op1 con)); 9105 effect( DEF xcc, USE op1, USE con ); 9106 9107 size(4); 9108 format %{ "CMP $op1,$con\t\t! long" %} 9109 opcode(Assembler::subcc_op3, Assembler::arith_op); 9110 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 9111 ins_pipe(ialu_cconly_reg_reg); 9112 %} 9113 9114 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ 9115 match(Set xcc (CmpL (AndL op1 op2) zero)); 9116 effect( DEF xcc, USE op1, USE op2 ); 9117 9118 size(4); 9119 format %{ "BTST $op1,$op2\t\t! long" %} 9120 opcode(Assembler::andcc_op3, Assembler::arith_op); 9121 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9122 ins_pipe(ialu_cconly_reg_reg); 9123 %} 9124 9125 // useful for checking the alignment of a pointer: 9126 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{ 9127 match(Set xcc (CmpL (AndL op1 con) zero)); 9128 effect( DEF xcc, USE op1, USE con ); 9129 9130 size(4); 9131 format %{ "BTST $op1,$con\t\t! long" %} 9132 opcode(Assembler::andcc_op3, Assembler::arith_op); 9133 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 9134 ins_pipe(ialu_cconly_reg_reg); 9135 %} 9136 9137 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{ 9138 match(Set icc (CmpU op1 op2)); 9139 9140 size(4); 9141 format %{ "CMP $op1,$op2\t! unsigned" %} 9142 opcode(Assembler::subcc_op3, Assembler::arith_op); 9143 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9144 ins_pipe(ialu_cconly_reg_imm); 9145 %} 9146 9147 // Compare Pointers 9148 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{ 9149 match(Set pcc (CmpP op1 op2)); 9150 9151 size(4); 9152 format %{ "CMP $op1,$op2\t! ptr" %} 9153 opcode(Assembler::subcc_op3, Assembler::arith_op); 9154 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9155 ins_pipe(ialu_cconly_reg_reg); 9156 %} 9157 9158 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{ 9159 match(Set pcc (CmpP op1 op2)); 9160 9161 size(4); 9162 format %{ "CMP $op1,$op2\t! ptr" %} 9163 opcode(Assembler::subcc_op3, Assembler::arith_op); 9164 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9165 ins_pipe(ialu_cconly_reg_imm); 9166 %} 9167 9168 // Compare Narrow oops 9169 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ 9170 match(Set icc (CmpN op1 op2)); 9171 9172 size(4); 9173 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9174 opcode(Assembler::subcc_op3, Assembler::arith_op); 9175 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9176 ins_pipe(ialu_cconly_reg_reg); 9177 %} 9178 9179 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ 9180 match(Set icc (CmpN op1 op2)); 9181 9182 size(4); 9183 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9184 opcode(Assembler::subcc_op3, Assembler::arith_op); 9185 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9186 ins_pipe(ialu_cconly_reg_imm); 9187 %} 9188 9189 //----------Max and Min-------------------------------------------------------- 9190 // Min Instructions 9191 // Conditional move for min 9192 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9193 effect( USE_DEF op2, USE op1, USE icc ); 9194 9195 size(4); 9196 format %{ "MOVlt icc,$op1,$op2\t! min" %} 9197 opcode(Assembler::less); 9198 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9199 ins_pipe(ialu_reg_flags); 9200 %} 9201 9202 // Min Register with Register. 9203 instruct minI_eReg(iRegI op1, iRegI op2) %{ 9204 match(Set op2 (MinI op1 op2)); 9205 ins_cost(DEFAULT_COST*2); 9206 expand %{ 9207 flagsReg icc; 9208 compI_iReg(icc,op1,op2); 9209 cmovI_reg_lt(op2,op1,icc); 9210 %} 9211 %} 9212 9213 // Max Instructions 9214 // Conditional move for max 9215 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9216 effect( USE_DEF op2, USE op1, USE icc ); 9217 format %{ "MOVgt icc,$op1,$op2\t! max" %} 9218 opcode(Assembler::greater); 9219 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9220 ins_pipe(ialu_reg_flags); 9221 %} 9222 9223 // Max Register with Register 9224 instruct maxI_eReg(iRegI op1, iRegI op2) %{ 9225 match(Set op2 (MaxI op1 op2)); 9226 ins_cost(DEFAULT_COST*2); 9227 expand %{ 9228 flagsReg icc; 9229 compI_iReg(icc,op1,op2); 9230 cmovI_reg_gt(op2,op1,icc); 9231 %} 9232 %} 9233 9234 9235 //----------Float Compares---------------------------------------------------- 9236 // Compare floating, generate condition code 9237 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{ 9238 match(Set fcc (CmpF src1 src2)); 9239 9240 size(4); 9241 format %{ "FCMPs $fcc,$src1,$src2" %} 9242 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); 9243 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); 9244 ins_pipe(faddF_fcc_reg_reg_zero); 9245 %} 9246 9247 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{ 9248 match(Set fcc (CmpD src1 src2)); 9249 9250 size(4); 9251 format %{ "FCMPd $fcc,$src1,$src2" %} 9252 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); 9253 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); 9254 ins_pipe(faddD_fcc_reg_reg_zero); 9255 %} 9256 9257 9258 // Compare floating, generate -1,0,1 9259 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{ 9260 match(Set dst (CmpF3 src1 src2)); 9261 effect(KILL fcc0); 9262 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9263 format %{ "fcmpl $dst,$src1,$src2" %} 9264 // Primary = float 9265 opcode( true ); 9266 ins_encode( floating_cmp( dst, src1, src2 ) ); 9267 ins_pipe( floating_cmp ); 9268 %} 9269 9270 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{ 9271 match(Set dst (CmpD3 src1 src2)); 9272 effect(KILL fcc0); 9273 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9274 format %{ "dcmpl $dst,$src1,$src2" %} 9275 // Primary = double (not float) 9276 opcode( false ); 9277 ins_encode( floating_cmp( dst, src1, src2 ) ); 9278 ins_pipe( floating_cmp ); 9279 %} 9280 9281 //----------Branches--------------------------------------------------------- 9282 // Jump 9283 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above) 9284 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{ 9285 match(Jump switch_val); 9286 9287 ins_cost(350); 9288 9289 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 9290 "LD [O7 + $switch_val], O7\n\t" 9291 "JUMP O7" %} 9292 ins_encode %{ 9293 // Calculate table address into a register. 9294 Register table_reg; 9295 Register label_reg = O7; 9296 // If we are calculating the size of this instruction don't trust 9297 // zero offsets because they might change when 9298 // MachConstantBaseNode decides to optimize the constant table 9299 // base. 9300 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) { 9301 table_reg = $constanttablebase; 9302 } else { 9303 table_reg = O7; 9304 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 9305 __ add($constanttablebase, con_offset, table_reg); 9306 } 9307 9308 // Jump to base address + switch value 9309 __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 9310 __ jmp(label_reg, G0); 9311 __ delayed()->nop(); 9312 %} 9313 ins_pipe(ialu_reg_reg); 9314 %} 9315 9316 // Direct Branch. Use V8 version with longer range. 9317 instruct branch(label labl) %{ 9318 match(Goto); 9319 effect(USE labl); 9320 9321 size(8); 9322 ins_cost(BRANCH_COST); 9323 format %{ "BA $labl" %} 9324 ins_encode %{ 9325 Label* L = $labl$$label; 9326 __ ba(*L); 9327 __ delayed()->nop(); 9328 %} 9329 ins_pipe(br); 9330 %} 9331 9332 // Direct Branch, short with no delay slot 9333 instruct branch_short(label labl) %{ 9334 match(Goto); 9335 predicate(UseCBCond); 9336 effect(USE labl); 9337 9338 size(4); 9339 ins_cost(BRANCH_COST); 9340 format %{ "BA $labl\t! short branch" %} 9341 ins_encode %{ 9342 Label* L = $labl$$label; 9343 assert(__ use_cbcond(*L), "back to back cbcond"); 9344 __ ba_short(*L); 9345 %} 9346 ins_short_branch(1); 9347 ins_avoid_back_to_back(1); 9348 ins_pipe(cbcond_reg_imm); 9349 %} 9350 9351 // Conditional Direct Branch 9352 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{ 9353 match(If cmp icc); 9354 effect(USE labl); 9355 9356 size(8); 9357 ins_cost(BRANCH_COST); 9358 format %{ "BP$cmp $icc,$labl" %} 9359 // Prim = bits 24-22, Secnd = bits 31-30 9360 ins_encode( enc_bp( labl, cmp, icc ) ); 9361 ins_pipe(br_cc); 9362 %} 9363 9364 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9365 match(If cmp icc); 9366 effect(USE labl); 9367 9368 ins_cost(BRANCH_COST); 9369 format %{ "BP$cmp $icc,$labl" %} 9370 // Prim = bits 24-22, Secnd = bits 31-30 9371 ins_encode( enc_bp( labl, cmp, icc ) ); 9372 ins_pipe(br_cc); 9373 %} 9374 9375 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{ 9376 match(If cmp pcc); 9377 effect(USE labl); 9378 9379 size(8); 9380 ins_cost(BRANCH_COST); 9381 format %{ "BP$cmp $pcc,$labl" %} 9382 ins_encode %{ 9383 Label* L = $labl$$label; 9384 Assembler::Predict predict_taken = 9385 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9386 9387 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9388 __ delayed()->nop(); 9389 %} 9390 ins_pipe(br_cc); 9391 %} 9392 9393 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{ 9394 match(If cmp fcc); 9395 effect(USE labl); 9396 9397 size(8); 9398 ins_cost(BRANCH_COST); 9399 format %{ "FBP$cmp $fcc,$labl" %} 9400 ins_encode %{ 9401 Label* L = $labl$$label; 9402 Assembler::Predict predict_taken = 9403 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9404 9405 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L); 9406 __ delayed()->nop(); 9407 %} 9408 ins_pipe(br_fcc); 9409 %} 9410 9411 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{ 9412 match(CountedLoopEnd cmp icc); 9413 effect(USE labl); 9414 9415 size(8); 9416 ins_cost(BRANCH_COST); 9417 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9418 // Prim = bits 24-22, Secnd = bits 31-30 9419 ins_encode( enc_bp( labl, cmp, icc ) ); 9420 ins_pipe(br_cc); 9421 %} 9422 9423 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9424 match(CountedLoopEnd cmp icc); 9425 effect(USE labl); 9426 9427 size(8); 9428 ins_cost(BRANCH_COST); 9429 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9430 // Prim = bits 24-22, Secnd = bits 31-30 9431 ins_encode( enc_bp( labl, cmp, icc ) ); 9432 ins_pipe(br_cc); 9433 %} 9434 9435 // Compare and branch instructions 9436 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9437 match(If cmp (CmpI op1 op2)); 9438 effect(USE labl, KILL icc); 9439 9440 size(12); 9441 ins_cost(BRANCH_COST); 9442 format %{ "CMP $op1,$op2\t! int\n\t" 9443 "BP$cmp $labl" %} 9444 ins_encode %{ 9445 Label* L = $labl$$label; 9446 Assembler::Predict predict_taken = 9447 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9448 __ cmp($op1$$Register, $op2$$Register); 9449 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9450 __ delayed()->nop(); 9451 %} 9452 ins_pipe(cmp_br_reg_reg); 9453 %} 9454 9455 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9456 match(If cmp (CmpI op1 op2)); 9457 effect(USE labl, KILL icc); 9458 9459 size(12); 9460 ins_cost(BRANCH_COST); 9461 format %{ "CMP $op1,$op2\t! int\n\t" 9462 "BP$cmp $labl" %} 9463 ins_encode %{ 9464 Label* L = $labl$$label; 9465 Assembler::Predict predict_taken = 9466 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9467 __ cmp($op1$$Register, $op2$$constant); 9468 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9469 __ delayed()->nop(); 9470 %} 9471 ins_pipe(cmp_br_reg_imm); 9472 %} 9473 9474 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9475 match(If cmp (CmpU op1 op2)); 9476 effect(USE labl, KILL icc); 9477 9478 size(12); 9479 ins_cost(BRANCH_COST); 9480 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9481 "BP$cmp $labl" %} 9482 ins_encode %{ 9483 Label* L = $labl$$label; 9484 Assembler::Predict predict_taken = 9485 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9486 __ cmp($op1$$Register, $op2$$Register); 9487 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9488 __ delayed()->nop(); 9489 %} 9490 ins_pipe(cmp_br_reg_reg); 9491 %} 9492 9493 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9494 match(If cmp (CmpU op1 op2)); 9495 effect(USE labl, KILL icc); 9496 9497 size(12); 9498 ins_cost(BRANCH_COST); 9499 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9500 "BP$cmp $labl" %} 9501 ins_encode %{ 9502 Label* L = $labl$$label; 9503 Assembler::Predict predict_taken = 9504 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9505 __ cmp($op1$$Register, $op2$$constant); 9506 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9507 __ delayed()->nop(); 9508 %} 9509 ins_pipe(cmp_br_reg_imm); 9510 %} 9511 9512 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9513 match(If cmp (CmpL op1 op2)); 9514 effect(USE labl, KILL xcc); 9515 9516 size(12); 9517 ins_cost(BRANCH_COST); 9518 format %{ "CMP $op1,$op2\t! long\n\t" 9519 "BP$cmp $labl" %} 9520 ins_encode %{ 9521 Label* L = $labl$$label; 9522 Assembler::Predict predict_taken = 9523 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9524 __ cmp($op1$$Register, $op2$$Register); 9525 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9526 __ delayed()->nop(); 9527 %} 9528 ins_pipe(cmp_br_reg_reg); 9529 %} 9530 9531 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9532 match(If cmp (CmpL op1 op2)); 9533 effect(USE labl, KILL xcc); 9534 9535 size(12); 9536 ins_cost(BRANCH_COST); 9537 format %{ "CMP $op1,$op2\t! long\n\t" 9538 "BP$cmp $labl" %} 9539 ins_encode %{ 9540 Label* L = $labl$$label; 9541 Assembler::Predict predict_taken = 9542 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9543 __ cmp($op1$$Register, $op2$$constant); 9544 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9545 __ delayed()->nop(); 9546 %} 9547 ins_pipe(cmp_br_reg_imm); 9548 %} 9549 9550 // Compare Pointers and branch 9551 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9552 match(If cmp (CmpP op1 op2)); 9553 effect(USE labl, KILL pcc); 9554 9555 size(12); 9556 ins_cost(BRANCH_COST); 9557 format %{ "CMP $op1,$op2\t! ptr\n\t" 9558 "B$cmp $labl" %} 9559 ins_encode %{ 9560 Label* L = $labl$$label; 9561 Assembler::Predict predict_taken = 9562 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9563 __ cmp($op1$$Register, $op2$$Register); 9564 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9565 __ delayed()->nop(); 9566 %} 9567 ins_pipe(cmp_br_reg_reg); 9568 %} 9569 9570 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9571 match(If cmp (CmpP op1 null)); 9572 effect(USE labl, KILL pcc); 9573 9574 size(12); 9575 ins_cost(BRANCH_COST); 9576 format %{ "CMP $op1,0\t! ptr\n\t" 9577 "B$cmp $labl" %} 9578 ins_encode %{ 9579 Label* L = $labl$$label; 9580 Assembler::Predict predict_taken = 9581 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9582 __ cmp($op1$$Register, G0); 9583 // bpr() is not used here since it has shorter distance. 9584 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9585 __ delayed()->nop(); 9586 %} 9587 ins_pipe(cmp_br_reg_reg); 9588 %} 9589 9590 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9591 match(If cmp (CmpN op1 op2)); 9592 effect(USE labl, KILL icc); 9593 9594 size(12); 9595 ins_cost(BRANCH_COST); 9596 format %{ "CMP $op1,$op2\t! compressed ptr\n\t" 9597 "BP$cmp $labl" %} 9598 ins_encode %{ 9599 Label* L = $labl$$label; 9600 Assembler::Predict predict_taken = 9601 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9602 __ cmp($op1$$Register, $op2$$Register); 9603 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9604 __ delayed()->nop(); 9605 %} 9606 ins_pipe(cmp_br_reg_reg); 9607 %} 9608 9609 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9610 match(If cmp (CmpN op1 null)); 9611 effect(USE labl, KILL icc); 9612 9613 size(12); 9614 ins_cost(BRANCH_COST); 9615 format %{ "CMP $op1,0\t! compressed ptr\n\t" 9616 "BP$cmp $labl" %} 9617 ins_encode %{ 9618 Label* L = $labl$$label; 9619 Assembler::Predict predict_taken = 9620 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9621 __ cmp($op1$$Register, G0); 9622 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9623 __ delayed()->nop(); 9624 %} 9625 ins_pipe(cmp_br_reg_reg); 9626 %} 9627 9628 // Loop back branch 9629 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9630 match(CountedLoopEnd cmp (CmpI op1 op2)); 9631 effect(USE labl, KILL icc); 9632 9633 size(12); 9634 ins_cost(BRANCH_COST); 9635 format %{ "CMP $op1,$op2\t! int\n\t" 9636 "BP$cmp $labl\t! Loop end" %} 9637 ins_encode %{ 9638 Label* L = $labl$$label; 9639 Assembler::Predict predict_taken = 9640 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9641 __ cmp($op1$$Register, $op2$$Register); 9642 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9643 __ delayed()->nop(); 9644 %} 9645 ins_pipe(cmp_br_reg_reg); 9646 %} 9647 9648 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9649 match(CountedLoopEnd cmp (CmpI op1 op2)); 9650 effect(USE labl, KILL icc); 9651 9652 size(12); 9653 ins_cost(BRANCH_COST); 9654 format %{ "CMP $op1,$op2\t! int\n\t" 9655 "BP$cmp $labl\t! Loop end" %} 9656 ins_encode %{ 9657 Label* L = $labl$$label; 9658 Assembler::Predict predict_taken = 9659 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9660 __ cmp($op1$$Register, $op2$$constant); 9661 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9662 __ delayed()->nop(); 9663 %} 9664 ins_pipe(cmp_br_reg_imm); 9665 %} 9666 9667 // Short compare and branch instructions 9668 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9669 match(If cmp (CmpI op1 op2)); 9670 predicate(UseCBCond); 9671 effect(USE labl, KILL icc); 9672 9673 size(4); 9674 ins_cost(BRANCH_COST); 9675 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9676 ins_encode %{ 9677 Label* L = $labl$$label; 9678 assert(__ use_cbcond(*L), "back to back cbcond"); 9679 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9680 %} 9681 ins_short_branch(1); 9682 ins_avoid_back_to_back(1); 9683 ins_pipe(cbcond_reg_reg); 9684 %} 9685 9686 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9687 match(If cmp (CmpI op1 op2)); 9688 predicate(UseCBCond); 9689 effect(USE labl, KILL icc); 9690 9691 size(4); 9692 ins_cost(BRANCH_COST); 9693 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9694 ins_encode %{ 9695 Label* L = $labl$$label; 9696 assert(__ use_cbcond(*L), "back to back cbcond"); 9697 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9698 %} 9699 ins_short_branch(1); 9700 ins_avoid_back_to_back(1); 9701 ins_pipe(cbcond_reg_imm); 9702 %} 9703 9704 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9705 match(If cmp (CmpU op1 op2)); 9706 predicate(UseCBCond); 9707 effect(USE labl, KILL icc); 9708 9709 size(4); 9710 ins_cost(BRANCH_COST); 9711 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9712 ins_encode %{ 9713 Label* L = $labl$$label; 9714 assert(__ use_cbcond(*L), "back to back cbcond"); 9715 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9716 %} 9717 ins_short_branch(1); 9718 ins_avoid_back_to_back(1); 9719 ins_pipe(cbcond_reg_reg); 9720 %} 9721 9722 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9723 match(If cmp (CmpU op1 op2)); 9724 predicate(UseCBCond); 9725 effect(USE labl, KILL icc); 9726 9727 size(4); 9728 ins_cost(BRANCH_COST); 9729 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9730 ins_encode %{ 9731 Label* L = $labl$$label; 9732 assert(__ use_cbcond(*L), "back to back cbcond"); 9733 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9734 %} 9735 ins_short_branch(1); 9736 ins_avoid_back_to_back(1); 9737 ins_pipe(cbcond_reg_imm); 9738 %} 9739 9740 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9741 match(If cmp (CmpL op1 op2)); 9742 predicate(UseCBCond); 9743 effect(USE labl, KILL xcc); 9744 9745 size(4); 9746 ins_cost(BRANCH_COST); 9747 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9748 ins_encode %{ 9749 Label* L = $labl$$label; 9750 assert(__ use_cbcond(*L), "back to back cbcond"); 9751 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9752 %} 9753 ins_short_branch(1); 9754 ins_avoid_back_to_back(1); 9755 ins_pipe(cbcond_reg_reg); 9756 %} 9757 9758 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9759 match(If cmp (CmpL op1 op2)); 9760 predicate(UseCBCond); 9761 effect(USE labl, KILL xcc); 9762 9763 size(4); 9764 ins_cost(BRANCH_COST); 9765 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9766 ins_encode %{ 9767 Label* L = $labl$$label; 9768 assert(__ use_cbcond(*L), "back to back cbcond"); 9769 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9770 %} 9771 ins_short_branch(1); 9772 ins_avoid_back_to_back(1); 9773 ins_pipe(cbcond_reg_imm); 9774 %} 9775 9776 // Compare Pointers and branch 9777 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9778 match(If cmp (CmpP op1 op2)); 9779 predicate(UseCBCond); 9780 effect(USE labl, KILL pcc); 9781 9782 size(4); 9783 ins_cost(BRANCH_COST); 9784 #ifdef _LP64 9785 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} 9786 #else 9787 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %} 9788 #endif 9789 ins_encode %{ 9790 Label* L = $labl$$label; 9791 assert(__ use_cbcond(*L), "back to back cbcond"); 9792 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L); 9793 %} 9794 ins_short_branch(1); 9795 ins_avoid_back_to_back(1); 9796 ins_pipe(cbcond_reg_reg); 9797 %} 9798 9799 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9800 match(If cmp (CmpP op1 null)); 9801 predicate(UseCBCond); 9802 effect(USE labl, KILL pcc); 9803 9804 size(4); 9805 ins_cost(BRANCH_COST); 9806 #ifdef _LP64 9807 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} 9808 #else 9809 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %} 9810 #endif 9811 ins_encode %{ 9812 Label* L = $labl$$label; 9813 assert(__ use_cbcond(*L), "back to back cbcond"); 9814 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L); 9815 %} 9816 ins_short_branch(1); 9817 ins_avoid_back_to_back(1); 9818 ins_pipe(cbcond_reg_reg); 9819 %} 9820 9821 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9822 match(If cmp (CmpN op1 op2)); 9823 predicate(UseCBCond); 9824 effect(USE labl, KILL icc); 9825 9826 size(4); 9827 ins_cost(BRANCH_COST); 9828 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %} 9829 ins_encode %{ 9830 Label* L = $labl$$label; 9831 assert(__ use_cbcond(*L), "back to back cbcond"); 9832 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9833 %} 9834 ins_short_branch(1); 9835 ins_avoid_back_to_back(1); 9836 ins_pipe(cbcond_reg_reg); 9837 %} 9838 9839 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9840 match(If cmp (CmpN op1 null)); 9841 predicate(UseCBCond); 9842 effect(USE labl, KILL icc); 9843 9844 size(4); 9845 ins_cost(BRANCH_COST); 9846 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %} 9847 ins_encode %{ 9848 Label* L = $labl$$label; 9849 assert(__ use_cbcond(*L), "back to back cbcond"); 9850 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L); 9851 %} 9852 ins_short_branch(1); 9853 ins_avoid_back_to_back(1); 9854 ins_pipe(cbcond_reg_reg); 9855 %} 9856 9857 // Loop back branch 9858 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9859 match(CountedLoopEnd cmp (CmpI op1 op2)); 9860 predicate(UseCBCond); 9861 effect(USE labl, KILL icc); 9862 9863 size(4); 9864 ins_cost(BRANCH_COST); 9865 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9866 ins_encode %{ 9867 Label* L = $labl$$label; 9868 assert(__ use_cbcond(*L), "back to back cbcond"); 9869 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9870 %} 9871 ins_short_branch(1); 9872 ins_avoid_back_to_back(1); 9873 ins_pipe(cbcond_reg_reg); 9874 %} 9875 9876 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9877 match(CountedLoopEnd cmp (CmpI op1 op2)); 9878 predicate(UseCBCond); 9879 effect(USE labl, KILL icc); 9880 9881 size(4); 9882 ins_cost(BRANCH_COST); 9883 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9884 ins_encode %{ 9885 Label* L = $labl$$label; 9886 assert(__ use_cbcond(*L), "back to back cbcond"); 9887 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9888 %} 9889 ins_short_branch(1); 9890 ins_avoid_back_to_back(1); 9891 ins_pipe(cbcond_reg_imm); 9892 %} 9893 9894 // Branch-on-register tests all 64 bits. We assume that values 9895 // in 64-bit registers always remains zero or sign extended 9896 // unless our code munges the high bits. Interrupts can chop 9897 // the high order bits to zero or sign at any time. 9898 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ 9899 match(If cmp (CmpI op1 zero)); 9900 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9901 effect(USE labl); 9902 9903 size(8); 9904 ins_cost(BRANCH_COST); 9905 format %{ "BR$cmp $op1,$labl" %} 9906 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9907 ins_pipe(br_reg); 9908 %} 9909 9910 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{ 9911 match(If cmp (CmpP op1 null)); 9912 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9913 effect(USE labl); 9914 9915 size(8); 9916 ins_cost(BRANCH_COST); 9917 format %{ "BR$cmp $op1,$labl" %} 9918 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9919 ins_pipe(br_reg); 9920 %} 9921 9922 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{ 9923 match(If cmp (CmpL op1 zero)); 9924 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9925 effect(USE labl); 9926 9927 size(8); 9928 ins_cost(BRANCH_COST); 9929 format %{ "BR$cmp $op1,$labl" %} 9930 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9931 ins_pipe(br_reg); 9932 %} 9933 9934 9935 // ============================================================================ 9936 // Long Compare 9937 // 9938 // Currently we hold longs in 2 registers. Comparing such values efficiently 9939 // is tricky. The flavor of compare used depends on whether we are testing 9940 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit. 9941 // The GE test is the negated LT test. The LE test can be had by commuting 9942 // the operands (yielding a GE test) and then negating; negate again for the 9943 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the 9944 // NE test is negated from that. 9945 9946 // Due to a shortcoming in the ADLC, it mixes up expressions like: 9947 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the 9948 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections 9949 // are collapsed internally in the ADLC's dfa-gen code. The match for 9950 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the 9951 // foo match ends up with the wrong leaf. One fix is to not match both 9952 // reg-reg and reg-zero forms of long-compare. This is unfortunate because 9953 // both forms beat the trinary form of long-compare and both are very useful 9954 // on Intel which has so few registers. 9955 9956 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ 9957 match(If cmp xcc); 9958 effect(USE labl); 9959 9960 size(8); 9961 ins_cost(BRANCH_COST); 9962 format %{ "BP$cmp $xcc,$labl" %} 9963 ins_encode %{ 9964 Label* L = $labl$$label; 9965 Assembler::Predict predict_taken = 9966 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9967 9968 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9969 __ delayed()->nop(); 9970 %} 9971 ins_pipe(br_cc); 9972 %} 9973 9974 // Manifest a CmpL3 result in an integer register. Very painful. 9975 // This is the test to avoid. 9976 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{ 9977 match(Set dst (CmpL3 src1 src2) ); 9978 effect( KILL ccr ); 9979 ins_cost(6*DEFAULT_COST); 9980 size(24); 9981 format %{ "CMP $src1,$src2\t\t! long\n" 9982 "\tBLT,a,pn done\n" 9983 "\tMOV -1,$dst\t! delay slot\n" 9984 "\tBGT,a,pn done\n" 9985 "\tMOV 1,$dst\t! delay slot\n" 9986 "\tCLR $dst\n" 9987 "done:" %} 9988 ins_encode( cmpl_flag(src1,src2,dst) ); 9989 ins_pipe(cmpL_reg); 9990 %} 9991 9992 // Conditional move 9993 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{ 9994 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9995 ins_cost(150); 9996 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9997 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9998 ins_pipe(ialu_reg); 9999 %} 10000 10001 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{ 10002 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 10003 ins_cost(140); 10004 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 10005 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 10006 ins_pipe(ialu_imm); 10007 %} 10008 10009 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{ 10010 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 10011 ins_cost(150); 10012 format %{ "MOV$cmp $xcc,$src,$dst" %} 10013 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 10014 ins_pipe(ialu_reg); 10015 %} 10016 10017 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{ 10018 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 10019 ins_cost(140); 10020 format %{ "MOV$cmp $xcc,$src,$dst" %} 10021 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 10022 ins_pipe(ialu_imm); 10023 %} 10024 10025 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ 10026 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); 10027 ins_cost(150); 10028 format %{ "MOV$cmp $xcc,$src,$dst" %} 10029 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 10030 ins_pipe(ialu_reg); 10031 %} 10032 10033 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ 10034 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 10035 ins_cost(150); 10036 format %{ "MOV$cmp $xcc,$src,$dst" %} 10037 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 10038 ins_pipe(ialu_reg); 10039 %} 10040 10041 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{ 10042 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 10043 ins_cost(140); 10044 format %{ "MOV$cmp $xcc,$src,$dst" %} 10045 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 10046 ins_pipe(ialu_imm); 10047 %} 10048 10049 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{ 10050 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src))); 10051 ins_cost(150); 10052 opcode(0x101); 10053 format %{ "FMOVS$cmp $xcc,$src,$dst" %} 10054 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 10055 ins_pipe(int_conditional_float_move); 10056 %} 10057 10058 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{ 10059 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src))); 10060 ins_cost(150); 10061 opcode(0x102); 10062 format %{ "FMOVD$cmp $xcc,$src,$dst" %} 10063 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 10064 ins_pipe(int_conditional_float_move); 10065 %} 10066 10067 // ============================================================================ 10068 // Safepoint Instruction 10069 instruct safePoint_poll(iRegP poll) %{ 10070 match(SafePoint poll); 10071 effect(USE poll); 10072 10073 size(4); 10074 #ifdef _LP64 10075 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} 10076 #else 10077 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %} 10078 #endif 10079 ins_encode %{ 10080 __ relocate(relocInfo::poll_type); 10081 __ ld_ptr($poll$$Register, 0, G0); 10082 %} 10083 ins_pipe(loadPollP); 10084 %} 10085 10086 // ============================================================================ 10087 // Call Instructions 10088 // Call Java Static Instruction 10089 instruct CallStaticJavaDirect( method meth ) %{ 10090 match(CallStaticJava); 10091 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); 10092 effect(USE meth); 10093 10094 size(8); 10095 ins_cost(CALL_COST); 10096 format %{ "CALL,static ; NOP ==> " %} 10097 ins_encode( Java_Static_Call( meth ), call_epilog ); 10098 ins_pipe(simple_call); 10099 %} 10100 10101 // Call Java Static Instruction (method handle version) 10102 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ 10103 match(CallStaticJava); 10104 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 10105 effect(USE meth, KILL l7_mh_SP_save); 10106 10107 size(16); 10108 ins_cost(CALL_COST); 10109 format %{ "CALL,static/MethodHandle" %} 10110 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); 10111 ins_pipe(simple_call); 10112 %} 10113 10114 // Call Java Dynamic Instruction 10115 instruct CallDynamicJavaDirect( method meth ) %{ 10116 match(CallDynamicJava); 10117 effect(USE meth); 10118 10119 ins_cost(CALL_COST); 10120 format %{ "SET (empty),R_G5\n\t" 10121 "CALL,dynamic ; NOP ==> " %} 10122 ins_encode( Java_Dynamic_Call( meth ), call_epilog ); 10123 ins_pipe(call); 10124 %} 10125 10126 // Call Runtime Instruction 10127 instruct CallRuntimeDirect(method meth, l7RegP l7) %{ 10128 match(CallRuntime); 10129 effect(USE meth, KILL l7); 10130 ins_cost(CALL_COST); 10131 format %{ "CALL,runtime" %} 10132 ins_encode( Java_To_Runtime( meth ), 10133 call_epilog, adjust_long_from_native_call ); 10134 ins_pipe(simple_call); 10135 %} 10136 10137 // Call runtime without safepoint - same as CallRuntime 10138 instruct CallLeafDirect(method meth, l7RegP l7) %{ 10139 match(CallLeaf); 10140 effect(USE meth, KILL l7); 10141 ins_cost(CALL_COST); 10142 format %{ "CALL,runtime leaf" %} 10143 ins_encode( Java_To_Runtime( meth ), 10144 call_epilog, 10145 adjust_long_from_native_call ); 10146 ins_pipe(simple_call); 10147 %} 10148 10149 // Call runtime without safepoint - same as CallLeaf 10150 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{ 10151 match(CallLeafNoFP); 10152 effect(USE meth, KILL l7); 10153 ins_cost(CALL_COST); 10154 format %{ "CALL,runtime leaf nofp" %} 10155 ins_encode( Java_To_Runtime( meth ), 10156 call_epilog, 10157 adjust_long_from_native_call ); 10158 ins_pipe(simple_call); 10159 %} 10160 10161 // Tail Call; Jump from runtime stub to Java code. 10162 // Also known as an 'interprocedural jump'. 10163 // Target of jump will eventually return to caller. 10164 // TailJump below removes the return address. 10165 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{ 10166 match(TailCall jump_target method_oop ); 10167 10168 ins_cost(CALL_COST); 10169 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %} 10170 ins_encode(form_jmpl(jump_target)); 10171 ins_pipe(tail_call); 10172 %} 10173 10174 10175 // Return Instruction 10176 instruct Ret() %{ 10177 match(Return); 10178 10179 // The epilogue node did the ret already. 10180 size(0); 10181 format %{ "! return" %} 10182 ins_encode(); 10183 ins_pipe(empty); 10184 %} 10185 10186 10187 // Tail Jump; remove the return address; jump to target. 10188 // TailCall above leaves the return address around. 10189 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 10190 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 10191 // "restore" before this instruction (in Epilogue), we need to materialize it 10192 // in %i0. 10193 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{ 10194 match( TailJump jump_target ex_oop ); 10195 ins_cost(CALL_COST); 10196 format %{ "! discard R_O7\n\t" 10197 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} 10198 ins_encode(form_jmpl_set_exception_pc(jump_target)); 10199 // opcode(Assembler::jmpl_op3, Assembler::arith_op); 10200 // The hack duplicates the exception oop into G3, so that CreateEx can use it there. 10201 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); 10202 ins_pipe(tail_call); 10203 %} 10204 10205 // Create exception oop: created by stack-crawling runtime code. 10206 // Created exception is now available to this handler, and is setup 10207 // just prior to jumping to this handler. No code emitted. 10208 instruct CreateException( o0RegP ex_oop ) 10209 %{ 10210 match(Set ex_oop (CreateEx)); 10211 ins_cost(0); 10212 10213 size(0); 10214 // use the following format syntax 10215 format %{ "! exception oop is in R_O0; no code emitted" %} 10216 ins_encode(); 10217 ins_pipe(empty); 10218 %} 10219 10220 10221 // Rethrow exception: 10222 // The exception oop will come in the first argument position. 10223 // Then JUMP (not call) to the rethrow stub code. 10224 instruct RethrowException() 10225 %{ 10226 match(Rethrow); 10227 ins_cost(CALL_COST); 10228 10229 // use the following format syntax 10230 format %{ "Jmp rethrow_stub" %} 10231 ins_encode(enc_rethrow); 10232 ins_pipe(tail_call); 10233 %} 10234 10235 10236 // Die now 10237 instruct ShouldNotReachHere( ) 10238 %{ 10239 match(Halt); 10240 ins_cost(CALL_COST); 10241 10242 size(4); 10243 // Use the following format syntax 10244 format %{ "ILLTRAP ; ShouldNotReachHere" %} 10245 ins_encode( form2_illtrap() ); 10246 ins_pipe(tail_call); 10247 %} 10248 10249 // ============================================================================ 10250 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 10251 // array for an instance of the superklass. Set a hidden internal cache on a 10252 // hit (cache is checked with exposed code in gen_subtype_check()). Return 10253 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 10254 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{ 10255 match(Set index (PartialSubtypeCheck sub super)); 10256 effect( KILL pcc, KILL o7 ); 10257 ins_cost(DEFAULT_COST*10); 10258 format %{ "CALL PartialSubtypeCheck\n\tNOP" %} 10259 ins_encode( enc_PartialSubtypeCheck() ); 10260 ins_pipe(partial_subtype_check_pipe); 10261 %} 10262 10263 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ 10264 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); 10265 effect( KILL idx, KILL o7 ); 10266 ins_cost(DEFAULT_COST*10); 10267 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %} 10268 ins_encode( enc_PartialSubtypeCheck() ); 10269 ins_pipe(partial_subtype_check_pipe); 10270 %} 10271 10272 10273 // ============================================================================ 10274 // inlined locking and unlocking 10275 10276 instruct cmpFastLock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{ 10277 match(Set pcc (FastLock object box)); 10278 10279 effect(KILL scratch, TEMP scratch2); 10280 ins_cost(100); 10281 10282 format %{ "FASTLOCK $object, $box; KILL $scratch, $scratch2, $box" %} 10283 ins_encode( Fast_Lock(object, box, scratch, scratch2) ); 10284 ins_pipe(long_memory_op); 10285 %} 10286 10287 10288 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, iRegP box, iRegP scratch2, o7RegP scratch ) %{ 10289 match(Set pcc (FastUnlock object box)); 10290 effect(KILL scratch, TEMP scratch2); 10291 ins_cost(100); 10292 10293 format %{ "FASTUNLOCK $object, $box; KILL $scratch, $scratch2, $box" %} 10294 ins_encode( Fast_Unlock(object, box, scratch, scratch2) ); 10295 ins_pipe(long_memory_op); 10296 %} 10297 10298 // The encodings are generic. 10299 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ 10300 predicate(!use_block_zeroing(n->in(2)) ); 10301 match(Set dummy (ClearArray cnt base)); 10302 effect(TEMP temp, KILL ccr); 10303 ins_cost(300); 10304 format %{ "MOV $cnt,$temp\n" 10305 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" 10306 " BRge loop\t\t! Clearing loop\n" 10307 " STX G0,[$base+$temp]\t! delay slot" %} 10308 10309 ins_encode %{ 10310 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 10311 Register nof_bytes_arg = $cnt$$Register; 10312 Register nof_bytes_tmp = $temp$$Register; 10313 Register base_pointer_arg = $base$$Register; 10314 10315 Label loop; 10316 __ mov(nof_bytes_arg, nof_bytes_tmp); 10317 10318 // Loop and clear, walking backwards through the array. 10319 // nof_bytes_tmp (if >0) is always the number of bytes to zero 10320 __ bind(loop); 10321 __ deccc(nof_bytes_tmp, 8); 10322 __ br(Assembler::greaterEqual, true, Assembler::pt, loop); 10323 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp); 10324 // %%%% this mini-loop must not cross a cache boundary! 10325 %} 10326 ins_pipe(long_memory_op); 10327 %} 10328 10329 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{ 10330 predicate(use_block_zeroing(n->in(2))); 10331 match(Set dummy (ClearArray cnt base)); 10332 effect(USE_KILL cnt, USE_KILL base, KILL ccr); 10333 ins_cost(300); 10334 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10335 10336 ins_encode %{ 10337 10338 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10339 Register to = $base$$Register; 10340 Register count = $cnt$$Register; 10341 10342 Label Ldone; 10343 __ nop(); // Separate short branches 10344 // Use BIS for zeroing (temp is not used). 10345 __ bis_zeroing(to, count, G0, Ldone); 10346 __ bind(Ldone); 10347 10348 %} 10349 ins_pipe(long_memory_op); 10350 %} 10351 10352 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{ 10353 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit)); 10354 match(Set dummy (ClearArray cnt base)); 10355 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr); 10356 ins_cost(300); 10357 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10358 10359 ins_encode %{ 10360 10361 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10362 Register to = $base$$Register; 10363 Register count = $cnt$$Register; 10364 Register temp = $tmp$$Register; 10365 10366 Label Ldone; 10367 __ nop(); // Separate short branches 10368 // Use BIS for zeroing 10369 __ bis_zeroing(to, count, temp, Ldone); 10370 __ bind(Ldone); 10371 10372 %} 10373 ins_pipe(long_memory_op); 10374 %} 10375 10376 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10377 o7RegI tmp, flagsReg ccr) %{ 10378 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10379 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10380 ins_cost(300); 10381 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10382 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) ); 10383 ins_pipe(long_memory_op); 10384 %} 10385 10386 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10387 o7RegI tmp, flagsReg ccr) %{ 10388 match(Set result (StrEquals (Binary str1 str2) cnt)); 10389 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10390 ins_cost(300); 10391 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %} 10392 ins_encode( enc_String_Equals(str1, str2, cnt, result) ); 10393 ins_pipe(long_memory_op); 10394 %} 10395 10396 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10397 o7RegI tmp2, flagsReg ccr) %{ 10398 match(Set result (AryEq ary1 ary2)); 10399 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10400 ins_cost(300); 10401 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10402 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result)); 10403 ins_pipe(long_memory_op); 10404 %} 10405 10406 10407 //---------- Zeros Count Instructions ------------------------------------------ 10408 10409 instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{ 10410 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10411 match(Set dst (CountLeadingZerosI src)); 10412 effect(TEMP dst, TEMP tmp, KILL cr); 10413 10414 // x |= (x >> 1); 10415 // x |= (x >> 2); 10416 // x |= (x >> 4); 10417 // x |= (x >> 8); 10418 // x |= (x >> 16); 10419 // return (WORDBITS - popc(x)); 10420 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t" 10421 "SRL $src,0,$dst\t! 32-bit zero extend\n\t" 10422 "OR $dst,$tmp,$dst\n\t" 10423 "SRL $dst,2,$tmp\n\t" 10424 "OR $dst,$tmp,$dst\n\t" 10425 "SRL $dst,4,$tmp\n\t" 10426 "OR $dst,$tmp,$dst\n\t" 10427 "SRL $dst,8,$tmp\n\t" 10428 "OR $dst,$tmp,$dst\n\t" 10429 "SRL $dst,16,$tmp\n\t" 10430 "OR $dst,$tmp,$dst\n\t" 10431 "POPC $dst,$dst\n\t" 10432 "MOV 32,$tmp\n\t" 10433 "SUB $tmp,$dst,$dst" %} 10434 ins_encode %{ 10435 Register Rdst = $dst$$Register; 10436 Register Rsrc = $src$$Register; 10437 Register Rtmp = $tmp$$Register; 10438 __ srl(Rsrc, 1, Rtmp); 10439 __ srl(Rsrc, 0, Rdst); 10440 __ or3(Rdst, Rtmp, Rdst); 10441 __ srl(Rdst, 2, Rtmp); 10442 __ or3(Rdst, Rtmp, Rdst); 10443 __ srl(Rdst, 4, Rtmp); 10444 __ or3(Rdst, Rtmp, Rdst); 10445 __ srl(Rdst, 8, Rtmp); 10446 __ or3(Rdst, Rtmp, Rdst); 10447 __ srl(Rdst, 16, Rtmp); 10448 __ or3(Rdst, Rtmp, Rdst); 10449 __ popc(Rdst, Rdst); 10450 __ mov(BitsPerInt, Rtmp); 10451 __ sub(Rtmp, Rdst, Rdst); 10452 %} 10453 ins_pipe(ialu_reg); 10454 %} 10455 10456 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{ 10457 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10458 match(Set dst (CountLeadingZerosL src)); 10459 effect(TEMP dst, TEMP tmp, KILL cr); 10460 10461 // x |= (x >> 1); 10462 // x |= (x >> 2); 10463 // x |= (x >> 4); 10464 // x |= (x >> 8); 10465 // x |= (x >> 16); 10466 // x |= (x >> 32); 10467 // return (WORDBITS - popc(x)); 10468 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t" 10469 "OR $src,$tmp,$dst\n\t" 10470 "SRLX $dst,2,$tmp\n\t" 10471 "OR $dst,$tmp,$dst\n\t" 10472 "SRLX $dst,4,$tmp\n\t" 10473 "OR $dst,$tmp,$dst\n\t" 10474 "SRLX $dst,8,$tmp\n\t" 10475 "OR $dst,$tmp,$dst\n\t" 10476 "SRLX $dst,16,$tmp\n\t" 10477 "OR $dst,$tmp,$dst\n\t" 10478 "SRLX $dst,32,$tmp\n\t" 10479 "OR $dst,$tmp,$dst\n\t" 10480 "POPC $dst,$dst\n\t" 10481 "MOV 64,$tmp\n\t" 10482 "SUB $tmp,$dst,$dst" %} 10483 ins_encode %{ 10484 Register Rdst = $dst$$Register; 10485 Register Rsrc = $src$$Register; 10486 Register Rtmp = $tmp$$Register; 10487 __ srlx(Rsrc, 1, Rtmp); 10488 __ or3( Rsrc, Rtmp, Rdst); 10489 __ srlx(Rdst, 2, Rtmp); 10490 __ or3( Rdst, Rtmp, Rdst); 10491 __ srlx(Rdst, 4, Rtmp); 10492 __ or3( Rdst, Rtmp, Rdst); 10493 __ srlx(Rdst, 8, Rtmp); 10494 __ or3( Rdst, Rtmp, Rdst); 10495 __ srlx(Rdst, 16, Rtmp); 10496 __ or3( Rdst, Rtmp, Rdst); 10497 __ srlx(Rdst, 32, Rtmp); 10498 __ or3( Rdst, Rtmp, Rdst); 10499 __ popc(Rdst, Rdst); 10500 __ mov(BitsPerLong, Rtmp); 10501 __ sub(Rtmp, Rdst, Rdst); 10502 %} 10503 ins_pipe(ialu_reg); 10504 %} 10505 10506 instruct countTrailingZerosI(iRegI dst, iRegI src, flagsReg cr) %{ 10507 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10508 match(Set dst (CountTrailingZerosI src)); 10509 effect(TEMP dst, KILL cr); 10510 10511 // return popc(~x & (x - 1)); 10512 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t" 10513 "ANDN $dst,$src,$dst\n\t" 10514 "SRL $dst,R_G0,$dst\n\t" 10515 "POPC $dst,$dst" %} 10516 ins_encode %{ 10517 Register Rdst = $dst$$Register; 10518 Register Rsrc = $src$$Register; 10519 __ sub(Rsrc, 1, Rdst); 10520 __ andn(Rdst, Rsrc, Rdst); 10521 __ srl(Rdst, G0, Rdst); 10522 __ popc(Rdst, Rdst); 10523 %} 10524 ins_pipe(ialu_reg); 10525 %} 10526 10527 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{ 10528 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10529 match(Set dst (CountTrailingZerosL src)); 10530 effect(TEMP dst, KILL cr); 10531 10532 // return popc(~x & (x - 1)); 10533 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t" 10534 "ANDN $dst,$src,$dst\n\t" 10535 "POPC $dst,$dst" %} 10536 ins_encode %{ 10537 Register Rdst = $dst$$Register; 10538 Register Rsrc = $src$$Register; 10539 __ sub(Rsrc, 1, Rdst); 10540 __ andn(Rdst, Rsrc, Rdst); 10541 __ popc(Rdst, Rdst); 10542 %} 10543 ins_pipe(ialu_reg); 10544 %} 10545 10546 10547 //---------- Population Count Instructions ------------------------------------- 10548 10549 instruct popCountI(iRegI dst, iRegI src) %{ 10550 predicate(UsePopCountInstruction); 10551 match(Set dst (PopCountI src)); 10552 10553 format %{ "POPC $src, $dst" %} 10554 ins_encode %{ 10555 __ popc($src$$Register, $dst$$Register); 10556 %} 10557 ins_pipe(ialu_reg); 10558 %} 10559 10560 // Note: Long.bitCount(long) returns an int. 10561 instruct popCountL(iRegI dst, iRegL src) %{ 10562 predicate(UsePopCountInstruction); 10563 match(Set dst (PopCountL src)); 10564 10565 format %{ "POPC $src, $dst" %} 10566 ins_encode %{ 10567 __ popc($src$$Register, $dst$$Register); 10568 %} 10569 ins_pipe(ialu_reg); 10570 %} 10571 10572 10573 // ============================================================================ 10574 //------------Bytes reverse-------------------------------------------------- 10575 10576 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ 10577 match(Set dst (ReverseBytesI src)); 10578 10579 // Op cost is artificially doubled to make sure that load or store 10580 // instructions are preferred over this one which requires a spill 10581 // onto a stack slot. 10582 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10583 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10584 10585 ins_encode %{ 10586 __ set($src$$disp + STACK_BIAS, O7); 10587 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10588 %} 10589 ins_pipe( iload_mem ); 10590 %} 10591 10592 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ 10593 match(Set dst (ReverseBytesL src)); 10594 10595 // Op cost is artificially doubled to make sure that load or store 10596 // instructions are preferred over this one which requires a spill 10597 // onto a stack slot. 10598 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10599 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10600 10601 ins_encode %{ 10602 __ set($src$$disp + STACK_BIAS, O7); 10603 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10604 %} 10605 ins_pipe( iload_mem ); 10606 %} 10607 10608 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ 10609 match(Set dst (ReverseBytesUS src)); 10610 10611 // Op cost is artificially doubled to make sure that load or store 10612 // instructions are preferred over this one which requires a spill 10613 // onto a stack slot. 10614 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10615 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} 10616 10617 ins_encode %{ 10618 // the value was spilled as an int so bias the load 10619 __ set($src$$disp + STACK_BIAS + 2, O7); 10620 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10621 %} 10622 ins_pipe( iload_mem ); 10623 %} 10624 10625 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ 10626 match(Set dst (ReverseBytesS src)); 10627 10628 // Op cost is artificially doubled to make sure that load or store 10629 // instructions are preferred over this one which requires a spill 10630 // onto a stack slot. 10631 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10632 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} 10633 10634 ins_encode %{ 10635 // the value was spilled as an int so bias the load 10636 __ set($src$$disp + STACK_BIAS + 2, O7); 10637 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10638 %} 10639 ins_pipe( iload_mem ); 10640 %} 10641 10642 // Load Integer reversed byte order 10643 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ 10644 match(Set dst (ReverseBytesI (LoadI src))); 10645 10646 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 10647 size(4); 10648 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10649 10650 ins_encode %{ 10651 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10652 %} 10653 ins_pipe(iload_mem); 10654 %} 10655 10656 // Load Long - aligned and reversed 10657 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ 10658 match(Set dst (ReverseBytesL (LoadL src))); 10659 10660 ins_cost(MEMORY_REF_COST); 10661 size(4); 10662 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10663 10664 ins_encode %{ 10665 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10666 %} 10667 ins_pipe(iload_mem); 10668 %} 10669 10670 // Load unsigned short / char reversed byte order 10671 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ 10672 match(Set dst (ReverseBytesUS (LoadUS src))); 10673 10674 ins_cost(MEMORY_REF_COST); 10675 size(4); 10676 format %{ "LDUHA $src, $dst\t!asi=primary_little" %} 10677 10678 ins_encode %{ 10679 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10680 %} 10681 ins_pipe(iload_mem); 10682 %} 10683 10684 // Load short reversed byte order 10685 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ 10686 match(Set dst (ReverseBytesS (LoadS src))); 10687 10688 ins_cost(MEMORY_REF_COST); 10689 size(4); 10690 format %{ "LDSHA $src, $dst\t!asi=primary_little" %} 10691 10692 ins_encode %{ 10693 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10694 %} 10695 ins_pipe(iload_mem); 10696 %} 10697 10698 // Store Integer reversed byte order 10699 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ 10700 match(Set dst (StoreI dst (ReverseBytesI src))); 10701 10702 ins_cost(MEMORY_REF_COST); 10703 size(4); 10704 format %{ "STWA $src, $dst\t!asi=primary_little" %} 10705 10706 ins_encode %{ 10707 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10708 %} 10709 ins_pipe(istore_mem_reg); 10710 %} 10711 10712 // Store Long reversed byte order 10713 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ 10714 match(Set dst (StoreL dst (ReverseBytesL src))); 10715 10716 ins_cost(MEMORY_REF_COST); 10717 size(4); 10718 format %{ "STXA $src, $dst\t!asi=primary_little" %} 10719 10720 ins_encode %{ 10721 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10722 %} 10723 ins_pipe(istore_mem_reg); 10724 %} 10725 10726 // Store unsighed short/char reversed byte order 10727 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ 10728 match(Set dst (StoreC dst (ReverseBytesUS src))); 10729 10730 ins_cost(MEMORY_REF_COST); 10731 size(4); 10732 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10733 10734 ins_encode %{ 10735 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10736 %} 10737 ins_pipe(istore_mem_reg); 10738 %} 10739 10740 // Store short reversed byte order 10741 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ 10742 match(Set dst (StoreC dst (ReverseBytesS src))); 10743 10744 ins_cost(MEMORY_REF_COST); 10745 size(4); 10746 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10747 10748 ins_encode %{ 10749 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10750 %} 10751 ins_pipe(istore_mem_reg); 10752 %} 10753 10754 //----------PEEPHOLE RULES----------------------------------------------------- 10755 // These must follow all instruction definitions as they use the names 10756 // defined in the instructions definitions. 10757 // 10758 // peepmatch ( root_instr_name [preceding_instruction]* ); 10759 // 10760 // peepconstraint %{ 10761 // (instruction_number.operand_name relational_op instruction_number.operand_name 10762 // [, ...] ); 10763 // // instruction numbers are zero-based using left to right order in peepmatch 10764 // 10765 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 10766 // // provide an instruction_number.operand_name for each operand that appears 10767 // // in the replacement instruction's match rule 10768 // 10769 // ---------VM FLAGS--------------------------------------------------------- 10770 // 10771 // All peephole optimizations can be turned off using -XX:-OptoPeephole 10772 // 10773 // Each peephole rule is given an identifying number starting with zero and 10774 // increasing by one in the order seen by the parser. An individual peephole 10775 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 10776 // on the command-line. 10777 // 10778 // ---------CURRENT LIMITATIONS---------------------------------------------- 10779 // 10780 // Only match adjacent instructions in same basic block 10781 // Only equality constraints 10782 // Only constraints between operands, not (0.dest_reg == EAX_enc) 10783 // Only one replacement instruction 10784 // 10785 // ---------EXAMPLE---------------------------------------------------------- 10786 // 10787 // // pertinent parts of existing instructions in architecture description 10788 // instruct movI(eRegI dst, eRegI src) %{ 10789 // match(Set dst (CopyI src)); 10790 // %} 10791 // 10792 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 10793 // match(Set dst (AddI dst src)); 10794 // effect(KILL cr); 10795 // %} 10796 // 10797 // // Change (inc mov) to lea 10798 // peephole %{ 10799 // // increment preceeded by register-register move 10800 // peepmatch ( incI_eReg movI ); 10801 // // require that the destination register of the increment 10802 // // match the destination register of the move 10803 // peepconstraint ( 0.dst == 1.dst ); 10804 // // construct a replacement instruction that sets 10805 // // the destination to ( move's source register + one ) 10806 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); 10807 // %} 10808 // 10809 10810 // // Change load of spilled value to only a spill 10811 // instruct storeI(memory mem, eRegI src) %{ 10812 // match(Set mem (StoreI mem src)); 10813 // %} 10814 // 10815 // instruct loadI(eRegI dst, memory mem) %{ 10816 // match(Set dst (LoadI mem)); 10817 // %} 10818 // 10819 // peephole %{ 10820 // peepmatch ( loadI storeI ); 10821 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 10822 // peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 10823 // %} 10824 10825 //----------SMARTSPILL RULES--------------------------------------------------- 10826 // These must follow all instruction definitions as they use the names 10827 // defined in the instructions definitions. 10828 // 10829 // SPARC will probably not have any of these rules due to RISC instruction set. 10830 10831 //----------PIPELINE----------------------------------------------------------- 10832 // Rules which define the behavior of the target architectures pipeline.