1 // 2 // Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // SPARC Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 register %{ 32 //----------Architecture Description Register Definitions---------------------- 33 // General Registers 34 // "reg_def" name ( register save type, C convention save type, 35 // ideal register type, encoding, vm name ); 36 // Register Save Types: 37 // 38 // NS = No-Save: The register allocator assumes that these registers 39 // can be used without saving upon entry to the method, & 40 // that they do not need to be saved at call sites. 41 // 42 // SOC = Save-On-Call: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, 44 // but that they must be saved at call sites. 45 // 46 // SOE = Save-On-Entry: The register allocator assumes that these registers 47 // must be saved before using them upon entry to the 48 // method, but they do not need to be saved at call 49 // sites. 50 // 51 // AS = Always-Save: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, & that they must be saved at call sites. 54 // 55 // Ideal Register Type is used to determine how to save & restore a 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 58 // 59 // The encoding number is the actual bit-pattern placed into the opcodes. 60 61 62 // ---------------------------- 63 // Integer/Long Registers 64 // ---------------------------- 65 66 // Need to expose the hi/lo aspect of 64-bit registers 67 // This register set is used for both the 64-bit build and 68 // the 32-bit build with 1-register longs. 69 70 // Global Registers 0-7 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next()); 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg()); 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next()); 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg()); 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next()); 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg()); 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next()); 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg()); 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next()); 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg()); 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next()); 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg()); 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next()); 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg()); 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next()); 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg()); 87 88 // Output Registers 0-7 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next()); 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg()); 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next()); 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg()); 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next()); 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg()); 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next()); 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg()); 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next()); 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg()); 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next()); 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg()); 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next()); 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg()); 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next()); 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg()); 105 106 // Local Registers 0-7 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next()); 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg()); 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next()); 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg()); 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next()); 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg()); 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next()); 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg()); 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next()); 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg()); 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next()); 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg()); 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next()); 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg()); 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next()); 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg()); 123 124 // Input Registers 0-7 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next()); 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg()); 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next()); 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg()); 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next()); 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg()); 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next()); 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg()); 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next()); 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg()); 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next()); 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg()); 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next()); 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg()); 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next()); 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg()); 141 142 // ---------------------------- 143 // Float/Double Registers 144 // ---------------------------- 145 146 // Float Registers 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); 179 180 // Double Registers 181 // The rules of ADL require that double registers be defined in pairs. 182 // Each pair must be two 32-bit values, but not necessarily a pair of 183 // single float registers. In each pair, ADLC-assigned register numbers 184 // must be adjacent, with the lower number even. Finally, when the 185 // CPU stores such a register pair to memory, the word associated with 186 // the lower ADLC-assigned number must be stored to the lower address. 187 188 // These definitions specify the actual bit encodings of the sparc 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 190 // wants 0-63, so we have to convert every time we want to use fp regs 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 192 // 255 is a flag meaning "don't go here". 193 // I believe we can't handle callee-save doubles D32 and up until 194 // the place in the sparc stack crawler that asserts on the 255 is 195 // fixed up. 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg()); 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next()); 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg()); 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next()); 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg()); 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next()); 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg()); 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next()); 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg()); 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next()); 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()); 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next()); 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()); 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next()); 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()); 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next()); 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()); 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next()); 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()); 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next()); 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()); 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next()); 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()); 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next()); 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()); 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next()); 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()); 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next()); 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()); 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next()); 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()); 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next()); 228 229 230 // ---------------------------- 231 // Special Registers 232 // Condition Codes Flag Registers 233 // I tried to break out ICC and XCC but it's not very pretty. 234 // Every Sparc instruction which defs/kills one also kills the other. 235 // Hence every compare instruction which defs one kind of flags ends 236 // up needing a kill of the other. 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 238 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad()); 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad()); 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad()); 243 244 // ---------------------------- 245 // Specify the enum values for the registers. These enums are only used by the 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed 247 // for visibility to the rest of the vm. The order of this enum influences the 248 // register allocator so having the freedom to set this order and not be stuck 249 // with the order that is natural for the rest of the vm is worth it. 250 alloc_class chunk0( 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H, 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H, 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H, 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H); 255 256 // Note that a register is not allocatable unless it is also mentioned 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg. 258 259 alloc_class chunk1( 260 // The first registers listed here are those most likely to be used 261 // as temporaries. We move F0..F7 away from the front of the list, 262 // to reduce the likelihood of interferences with parameters and 263 // return values. Likewise, we avoid using F0/F1 for parameters, 264 // since they are used for return values. 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean. 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23, 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31, 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x, 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x, 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x); 274 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3); 276 277 //----------Architecture Description Register Classes-------------------------- 278 // Several register classes are automatically defined based upon information in 279 // this architecture description. 280 // 1) reg_class inline_cache_reg ( as defined in frame section ) 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // G0 is not included in integer class since it has special meaning. 286 reg_class g0_reg(R_G0); 287 288 // ---------------------------- 289 // Integer Register Classes 290 // ---------------------------- 291 // Exclusions from i_reg: 292 // R_G0: hardwired zero 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java) 294 // R_G6: reserved by Solaris ABI to tools 295 // R_G7: reserved by Solaris ABI to libthread 296 // R_O7: Used as a temp in many encodings 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 298 299 // Class for all integer registers, except the G registers. This is used for 300 // encodings which use G registers as temps. The regular inputs to such 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator 302 // will not put an input into a temp register. 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 304 305 reg_class g1_regI(R_G1); 306 reg_class g3_regI(R_G3); 307 reg_class g4_regI(R_G4); 308 reg_class o0_regI(R_O0); 309 reg_class o7_regI(R_O7); 310 311 // ---------------------------- 312 // Pointer Register Classes 313 // ---------------------------- 314 // 64-bit build means 64-bit pointers means hi/lo pairs 315 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 316 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 317 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 318 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 319 // Lock encodings use G3 and G4 internally 320 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5, 321 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 322 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 323 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 324 // Special class for storeP instructions, which can store SP or RPC to TLS. 325 // It is also used for memory addressing, allowing direct TLS addressing. 326 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 327 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP, 328 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 329 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP ); 330 // R_L7 is the lowest-priority callee-save (i.e., NS) register 331 // We use it to save R_G2 across calls out of Java. 332 reg_class l7_regP(R_L7H,R_L7); 333 334 // Other special pointer regs 335 reg_class g1_regP(R_G1H,R_G1); 336 reg_class g2_regP(R_G2H,R_G2); 337 reg_class g3_regP(R_G3H,R_G3); 338 reg_class g4_regP(R_G4H,R_G4); 339 reg_class g5_regP(R_G5H,R_G5); 340 reg_class i0_regP(R_I0H,R_I0); 341 reg_class o0_regP(R_O0H,R_O0); 342 reg_class o1_regP(R_O1H,R_O1); 343 reg_class o2_regP(R_O2H,R_O2); 344 reg_class o7_regP(R_O7H,R_O7); 345 346 347 // ---------------------------- 348 // Long Register Classes 349 // ---------------------------- 350 // Longs in 1 register. Aligned adjacent hi/lo pairs. 351 // Note: O7 is never in this class; it is sometimes used as an encoding temp. 352 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 353 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 354 // 64-bit, longs in 1 register: use all 64-bit integer registers 355 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 356 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 357 ); 358 359 reg_class g1_regL(R_G1H,R_G1); 360 reg_class g3_regL(R_G3H,R_G3); 361 reg_class o2_regL(R_O2H,R_O2); 362 reg_class o7_regL(R_O7H,R_O7); 363 364 // ---------------------------- 365 // Special Class for Condition Code Flags Register 366 reg_class int_flags(CCR); 367 reg_class float_flags(FCC0,FCC1,FCC2,FCC3); 368 reg_class float_flag0(FCC0); 369 370 371 // ---------------------------- 372 // Float Point Register Classes 373 // ---------------------------- 374 // Skip F30/F31, they are reserved for mem-mem copies 375 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 376 377 // Paired floating point registers--they show up in the same order as the floats, 378 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 379 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 380 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29, 381 /* Use extra V9 double registers; this AD file does not support V8 */ 382 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 383 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x 384 ); 385 386 // Paired floating point registers--they show up in the same order as the floats, 387 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 388 // This class is usable for mis-aligned loads as happen in I2C adapters. 389 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 390 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 391 %} 392 393 //----------DEFINITION BLOCK--------------------------------------------------- 394 // Define name --> value mappings to inform the ADLC of an integer valued name 395 // Current support includes integer values in the range [0, 0x7FFFFFFF] 396 // Format: 397 // int_def <name> ( <int_value>, <expression>); 398 // Generated Code in ad_<arch>.hpp 399 // #define <name> (<expression>) 400 // // value == <int_value> 401 // Generated code in ad_<arch>.cpp adlc_verification() 402 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 403 // 404 definitions %{ 405 // The default cost (of an ALU instruction). 406 int_def DEFAULT_COST ( 100, 100); 407 int_def HUGE_COST (1000000, 1000000); 408 409 // Memory refs are twice as expensive as run-of-the-mill. 410 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); 411 412 // Branches are even more expensive. 413 int_def BRANCH_COST ( 300, DEFAULT_COST * 3); 414 int_def CALL_COST ( 300, DEFAULT_COST * 3); 415 %} 416 417 418 //----------SOURCE BLOCK------------------------------------------------------- 419 // This is a block of C++ code which provides values, functions, and 420 // definitions necessary in the rest of the architecture description 421 source_hpp %{ 422 // Header information of the source block. 423 // Method declarations/definitions which are used outside 424 // the ad-scope can conveniently be defined here. 425 // 426 // To keep related declarations/definitions/uses close together, 427 // we switch between source %{ }% and source_hpp %{ }% freely as needed. 428 429 // Must be visible to the DFA in dfa_sparc.cpp 430 extern bool can_branch_register( Node *bol, Node *cmp ); 431 432 extern bool use_block_zeroing(Node* count); 433 434 // Macros to extract hi & lo halves from a long pair. 435 // G0 is not part of any long pair, so assert on that. 436 // Prevents accidentally using G1 instead of G0. 437 #define LONG_HI_REG(x) (x) 438 #define LONG_LO_REG(x) (x) 439 440 class CallStubImpl { 441 442 //-------------------------------------------------------------- 443 //---< Used for optimization in Compile::Shorten_branches >--- 444 //-------------------------------------------------------------- 445 446 public: 447 // Size of call trampoline stub. 448 static uint size_call_trampoline() { 449 return 0; // no call trampolines on this platform 450 } 451 452 // number of relocations needed by a call trampoline stub 453 static uint reloc_call_trampoline() { 454 return 0; // no call trampolines on this platform 455 } 456 }; 457 458 class HandlerImpl { 459 460 public: 461 462 static int emit_exception_handler(CodeBuffer &cbuf); 463 static int emit_deopt_handler(CodeBuffer& cbuf); 464 465 static uint size_exception_handler() { 466 return ( NativeJump::instruction_size ); // sethi;jmp;nop 467 } 468 469 static uint size_deopt_handler() { 470 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore 471 } 472 }; 473 474 %} 475 476 source %{ 477 #define __ _masm. 478 479 // tertiary op of a LoadP or StoreP encoding 480 #define REGP_OP true 481 482 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding); 483 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding); 484 static Register reg_to_register_object(int register_encoding); 485 486 // Used by the DFA in dfa_sparc.cpp. 487 // Check for being able to use a V9 branch-on-register. Requires a 488 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign- 489 // extended. Doesn't work following an integer ADD, for example, because of 490 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On 491 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and 492 // replace them with zero, which could become sign-extension in a different OS 493 // release. There's no obvious reason why an interrupt will ever fill these 494 // bits with non-zero junk (the registers are reloaded with standard LD 495 // instructions which either zero-fill or sign-fill). 496 bool can_branch_register( Node *bol, Node *cmp ) { 497 if( !BranchOnRegister ) return false; 498 if( cmp->Opcode() == Op_CmpP ) 499 return true; // No problems with pointer compares 500 if( cmp->Opcode() == Op_CmpL ) 501 return true; // No problems with long compares 502 503 if( !SparcV9RegsHiBitsZero ) return false; 504 if( bol->as_Bool()->_test._test != BoolTest::ne && 505 bol->as_Bool()->_test._test != BoolTest::eq ) 506 return false; 507 508 // Check for comparing against a 'safe' value. Any operation which 509 // clears out the high word is safe. Thus, loads and certain shifts 510 // are safe, as are non-negative constants. Any operation which 511 // preserves zero bits in the high word is safe as long as each of its 512 // inputs are safe. Thus, phis and bitwise booleans are safe if their 513 // inputs are safe. At present, the only important case to recognize 514 // seems to be loads. Constants should fold away, and shifts & 515 // logicals can use the 'cc' forms. 516 Node *x = cmp->in(1); 517 if( x->is_Load() ) return true; 518 if( x->is_Phi() ) { 519 for( uint i = 1; i < x->req(); i++ ) 520 if( !x->in(i)->is_Load() ) 521 return false; 522 return true; 523 } 524 return false; 525 } 526 527 bool use_block_zeroing(Node* count) { 528 // Use BIS for zeroing if count is not constant 529 // or it is >= BlockZeroingLowLimit. 530 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit); 531 } 532 533 // **************************************************************************** 534 535 // REQUIRED FUNCTIONALITY 536 537 // !!!!! Special hack to get all type of calls to specify the byte offset 538 // from the start of the call to the point where the return address 539 // will point. 540 // The "return address" is the address of the call instruction, plus 8. 541 542 int MachCallStaticJavaNode::ret_addr_offset() { 543 int offset = NativeCall::instruction_size; // call; delay slot 544 if (_method_handle_invoke) 545 offset += 4; // restore SP 546 return offset; 547 } 548 549 int MachCallDynamicJavaNode::ret_addr_offset() { 550 int vtable_index = this->_vtable_index; 551 if (vtable_index < 0) { 552 // must be invalid_vtable_index, not nonvirtual_vtable_index 553 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 554 return (NativeMovConstReg::instruction_size + 555 NativeCall::instruction_size); // sethi; setlo; call; delay slot 556 } else { 557 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 558 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes(); 559 int v_off = entry_offset + vtableEntry::method_offset_in_bytes(); 560 int klass_load_size; 561 if (UseCompressedClassPointers) { 562 assert(Universe::heap() != NULL, "java heap should be initialized"); 563 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 564 } else { 565 klass_load_size = 1*BytesPerInstWord; 566 } 567 if (Assembler::is_simm13(v_off)) { 568 return klass_load_size + 569 (2*BytesPerInstWord + // ld_ptr, ld_ptr 570 NativeCall::instruction_size); // call; delay slot 571 } else { 572 return klass_load_size + 573 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr 574 NativeCall::instruction_size); // call; delay slot 575 } 576 } 577 } 578 579 int MachCallRuntimeNode::ret_addr_offset() { 580 if (MacroAssembler::is_far_target(entry_point())) { 581 return NativeFarCall::instruction_size; 582 } else { 583 return NativeCall::instruction_size; 584 } 585 } 586 587 // Indicate if the safepoint node needs the polling page as an input. 588 // Since Sparc does not have absolute addressing, it does. 589 bool SafePointNode::needs_polling_address_input() { 590 return true; 591 } 592 593 // emit an interrupt that is caught by the debugger (for debugging compiler) 594 void emit_break(CodeBuffer &cbuf) { 595 MacroAssembler _masm(&cbuf); 596 __ breakpoint_trap(); 597 } 598 599 #ifndef PRODUCT 600 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const { 601 st->print("TA"); 602 } 603 #endif 604 605 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 606 emit_break(cbuf); 607 } 608 609 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 610 return MachNode::size(ra_); 611 } 612 613 // Traceable jump 614 void emit_jmpl(CodeBuffer &cbuf, int jump_target) { 615 MacroAssembler _masm(&cbuf); 616 Register rdest = reg_to_register_object(jump_target); 617 __ JMP(rdest, 0); 618 __ delayed()->nop(); 619 } 620 621 // Traceable jump and set exception pc 622 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) { 623 MacroAssembler _masm(&cbuf); 624 Register rdest = reg_to_register_object(jump_target); 625 __ JMP(rdest, 0); 626 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc ); 627 } 628 629 void emit_nop(CodeBuffer &cbuf) { 630 MacroAssembler _masm(&cbuf); 631 __ nop(); 632 } 633 634 void emit_illtrap(CodeBuffer &cbuf) { 635 MacroAssembler _masm(&cbuf); 636 __ illtrap(0); 637 } 638 639 640 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) { 641 assert(n->rule() != loadUB_rule, ""); 642 643 intptr_t offset = 0; 644 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP 645 const Node* addr = n->get_base_and_disp(offset, adr_type); 646 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP"); 647 assert(addr != NULL && addr != (Node*)-1, "invalid addr"); 648 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 649 atype = atype->add_offset(offset); 650 assert(disp32 == offset, "wrong disp32"); 651 return atype->_offset; 652 } 653 654 655 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) { 656 assert(n->rule() != loadUB_rule, ""); 657 658 intptr_t offset = 0; 659 Node* addr = n->in(2); 660 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 661 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) { 662 Node* a = addr->in(2/*AddPNode::Address*/); 663 Node* o = addr->in(3/*AddPNode::Offset*/); 664 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot; 665 atype = a->bottom_type()->is_ptr()->add_offset(offset); 666 assert(atype->isa_oop_ptr(), "still an oop"); 667 } 668 offset = atype->is_ptr()->_offset; 669 if (offset != Type::OffsetBot) offset += disp32; 670 return offset; 671 } 672 673 static inline jlong replicate_immI(int con, int count, int width) { 674 // Load a constant replicated "count" times with width "width" 675 assert(count*width == 8 && width <= 4, "sanity"); 676 int bit_width = width * 8; 677 jlong val = con; 678 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 679 for (int i = 0; i < count - 1; i++) { 680 val |= (val << bit_width); 681 } 682 return val; 683 } 684 685 static inline jlong replicate_immF(float con) { 686 // Replicate float con 2 times and pack into vector. 687 int val = *((int*)&con); 688 jlong lval = val; 689 lval = (lval << 32) | (lval & 0xFFFFFFFFl); 690 return lval; 691 } 692 693 // Standard Sparc opcode form2 field breakdown 694 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 695 f0 &= (1<<19)-1; // Mask displacement to 19 bits 696 int op = (f30 << 30) | 697 (f29 << 29) | 698 (f25 << 25) | 699 (f22 << 22) | 700 (f20 << 20) | 701 (f19 << 19) | 702 (f0 << 0); 703 cbuf.insts()->emit_int32(op); 704 } 705 706 // Standard Sparc opcode form2 field breakdown 707 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) { 708 f0 >>= 10; // Drop 10 bits 709 f0 &= (1<<22)-1; // Mask displacement to 22 bits 710 int op = (f30 << 30) | 711 (f25 << 25) | 712 (f22 << 22) | 713 (f0 << 0); 714 cbuf.insts()->emit_int32(op); 715 } 716 717 // Standard Sparc opcode form3 field breakdown 718 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) { 719 int op = (f30 << 30) | 720 (f25 << 25) | 721 (f19 << 19) | 722 (f14 << 14) | 723 (f5 << 5) | 724 (f0 << 0); 725 cbuf.insts()->emit_int32(op); 726 } 727 728 // Standard Sparc opcode form3 field breakdown 729 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) { 730 simm13 &= (1<<13)-1; // Mask to 13 bits 731 int op = (f30 << 30) | 732 (f25 << 25) | 733 (f19 << 19) | 734 (f14 << 14) | 735 (1 << 13) | // bit to indicate immediate-mode 736 (simm13<<0); 737 cbuf.insts()->emit_int32(op); 738 } 739 740 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 741 simm10 &= (1<<10)-1; // Mask to 10 bits 742 emit3_simm13(cbuf,f30,f25,f19,f14,simm10); 743 } 744 745 #ifdef ASSERT 746 // Helper function for VerifyOops in emit_form3_mem_reg 747 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) { 748 warning("VerifyOops encountered unexpected instruction:"); 749 n->dump(2); 750 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]); 751 } 752 #endif 753 754 755 void emit_form3_mem_reg(CodeBuffer &cbuf, PhaseRegAlloc* ra, const MachNode* n, int primary, int tertiary, 756 int src1_enc, int disp32, int src2_enc, int dst_enc) { 757 758 #ifdef ASSERT 759 // The following code implements the +VerifyOops feature. 760 // It verifies oop values which are loaded into or stored out of 761 // the current method activation. +VerifyOops complements techniques 762 // like ScavengeALot, because it eagerly inspects oops in transit, 763 // as they enter or leave the stack, as opposed to ScavengeALot, 764 // which inspects oops "at rest", in the stack or heap, at safepoints. 765 // For this reason, +VerifyOops can sometimes detect bugs very close 766 // to their point of creation. It can also serve as a cross-check 767 // on the validity of oop maps, when used toegether with ScavengeALot. 768 769 // It would be good to verify oops at other points, especially 770 // when an oop is used as a base pointer for a load or store. 771 // This is presently difficult, because it is hard to know when 772 // a base address is biased or not. (If we had such information, 773 // it would be easy and useful to make a two-argument version of 774 // verify_oop which unbiases the base, and performs verification.) 775 776 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary"); 777 bool is_verified_oop_base = false; 778 bool is_verified_oop_load = false; 779 bool is_verified_oop_store = false; 780 int tmp_enc = -1; 781 if (VerifyOops && src1_enc != R_SP_enc) { 782 // classify the op, mainly for an assert check 783 int st_op = 0, ld_op = 0; 784 switch (primary) { 785 case Assembler::stb_op3: st_op = Op_StoreB; break; 786 case Assembler::sth_op3: st_op = Op_StoreC; break; 787 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0 788 case Assembler::stw_op3: st_op = Op_StoreI; break; 789 case Assembler::std_op3: st_op = Op_StoreL; break; 790 case Assembler::stf_op3: st_op = Op_StoreF; break; 791 case Assembler::stdf_op3: st_op = Op_StoreD; break; 792 793 case Assembler::ldsb_op3: ld_op = Op_LoadB; break; 794 case Assembler::ldub_op3: ld_op = Op_LoadUB; break; 795 case Assembler::lduh_op3: ld_op = Op_LoadUS; break; 796 case Assembler::ldsh_op3: ld_op = Op_LoadS; break; 797 case Assembler::ldx_op3: // may become LoadP or stay LoadI 798 case Assembler::ldsw_op3: // may become LoadP or stay LoadI 799 case Assembler::lduw_op3: ld_op = Op_LoadI; break; 800 case Assembler::ldd_op3: ld_op = Op_LoadL; break; 801 case Assembler::ldf_op3: ld_op = Op_LoadF; break; 802 case Assembler::lddf_op3: ld_op = Op_LoadD; break; 803 case Assembler::prefetch_op3: ld_op = Op_LoadI; break; 804 805 default: ShouldNotReachHere(); 806 } 807 if (tertiary == REGP_OP) { 808 if (st_op == Op_StoreI) st_op = Op_StoreP; 809 else if (ld_op == Op_LoadI) ld_op = Op_LoadP; 810 else ShouldNotReachHere(); 811 if (st_op) { 812 // a store 813 // inputs are (0:control, 1:memory, 2:address, 3:value) 814 Node* n2 = n->in(3); 815 if (n2 != NULL) { 816 const Type* t = n2->bottom_type(); 817 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 818 } 819 } else { 820 // a load 821 const Type* t = n->bottom_type(); 822 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 823 } 824 } 825 826 if (ld_op) { 827 // a Load 828 // inputs are (0:control, 1:memory, 2:address) 829 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases 830 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && 831 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && 832 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && 833 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) && 834 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) && 835 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) && 836 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) && 837 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) && 838 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) && 839 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && 840 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) && 841 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) && 842 !(n->rule() == loadUB_rule)) { 843 verify_oops_warning(n, n->ideal_Opcode(), ld_op); 844 } 845 } else if (st_op) { 846 // a Store 847 // inputs are (0:control, 1:memory, 2:address, 3:value) 848 if (!(n->ideal_Opcode()==st_op) && // Following are special cases 849 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) && 850 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && 851 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && 852 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && 853 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) && 854 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { 855 verify_oops_warning(n, n->ideal_Opcode(), st_op); 856 } 857 } 858 859 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) { 860 Node* addr = n->in(2); 861 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) { 862 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr? 863 if (atype != NULL) { 864 intptr_t offset = get_offset_from_base(n, atype, disp32); 865 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32); 866 if (offset != offset_2) { 867 get_offset_from_base(n, atype, disp32); 868 get_offset_from_base_2(n, atype, disp32); 869 } 870 assert(offset == offset_2, "different offsets"); 871 if (offset == disp32) { 872 // we now know that src1 is a true oop pointer 873 is_verified_oop_base = true; 874 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) { 875 if( primary == Assembler::ldd_op3 ) { 876 is_verified_oop_base = false; // Cannot 'ldd' into O7 877 } else { 878 tmp_enc = dst_enc; 879 dst_enc = R_O7_enc; // Load into O7; preserve source oop 880 assert(src1_enc != dst_enc, ""); 881 } 882 } 883 } 884 if (st_op && (( offset == oopDesc::klass_offset_in_bytes()) 885 || offset == oopDesc::mark_offset_in_bytes())) { 886 // loading the mark should not be allowed either, but 887 // we don't check this since it conflicts with InlineObjectHash 888 // usage of LoadINode to get the mark. We could keep the 889 // check if we create a new LoadMarkNode 890 // but do not verify the object before its header is initialized 891 ShouldNotReachHere(); 892 } 893 } 894 } 895 } 896 } 897 #endif 898 899 uint instr = (Assembler::ldst_op << 30) 900 | (dst_enc << 25) 901 | (primary << 19) 902 | (src1_enc << 14); 903 904 uint index = src2_enc; 905 int disp = disp32; 906 907 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) { 908 disp += STACK_BIAS; 909 // Check that stack offset fits, load into O7 if not 910 if (!Assembler::is_simm13(disp)) { 911 MacroAssembler _masm(&cbuf); 912 __ set(disp, O7); 913 if (index != R_G0_enc) { 914 __ add(O7, reg_to_register_object(index), O7); 915 } 916 index = R_O7_enc; 917 disp = 0; 918 } 919 } 920 921 if( disp == 0 ) { 922 // use reg-reg form 923 // bit 13 is already zero 924 instr |= index; 925 } else { 926 // use reg-imm form 927 instr |= 0x00002000; // set bit 13 to one 928 instr |= disp & 0x1FFF; 929 } 930 931 cbuf.insts()->emit_int32(instr); 932 933 #ifdef ASSERT 934 if (VerifyOops) { 935 MacroAssembler _masm(&cbuf); 936 if (is_verified_oop_base) { 937 __ verify_oop(reg_to_register_object(src1_enc)); 938 } 939 if (is_verified_oop_store) { 940 __ verify_oop(reg_to_register_object(dst_enc)); 941 } 942 if (tmp_enc != -1) { 943 __ mov(O7, reg_to_register_object(tmp_enc)); 944 } 945 if (is_verified_oop_load) { 946 __ verify_oop(reg_to_register_object(dst_enc)); 947 } 948 } 949 #endif 950 } 951 952 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, RelocationHolder const& rspec, bool preserve_g2 = false) { 953 // The method which records debug information at every safepoint 954 // expects the call to be the first instruction in the snippet as 955 // it creates a PcDesc structure which tracks the offset of a call 956 // from the start of the codeBlob. This offset is computed as 957 // code_end() - code_begin() of the code which has been emitted 958 // so far. 959 // In this particular case we have skirted around the problem by 960 // putting the "mov" instruction in the delay slot but the problem 961 // may bite us again at some other point and a cleaner/generic 962 // solution using relocations would be needed. 963 MacroAssembler _masm(&cbuf); 964 __ set_inst_mark(); 965 966 // We flush the current window just so that there is a valid stack copy 967 // the fact that the current window becomes active again instantly is 968 // not a problem there is nothing live in it. 969 970 #ifdef ASSERT 971 int startpos = __ offset(); 972 #endif /* ASSERT */ 973 974 __ call((address)entry_point, rspec); 975 976 if (preserve_g2) __ delayed()->mov(G2, L7); 977 else __ delayed()->nop(); 978 979 if (preserve_g2) __ mov(L7, G2); 980 981 #ifdef ASSERT 982 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { 983 // Trash argument dump slots. 984 __ set(0xb0b8ac0db0b8ac0d, G1); 985 __ mov(G1, G5); 986 __ stx(G1, SP, STACK_BIAS + 0x80); 987 __ stx(G1, SP, STACK_BIAS + 0x88); 988 __ stx(G1, SP, STACK_BIAS + 0x90); 989 __ stx(G1, SP, STACK_BIAS + 0x98); 990 __ stx(G1, SP, STACK_BIAS + 0xA0); 991 __ stx(G1, SP, STACK_BIAS + 0xA8); 992 } 993 #endif /*ASSERT*/ 994 } 995 996 //============================================================================= 997 // REQUIRED FUNCTIONALITY for encoding 998 void emit_lo(CodeBuffer &cbuf, int val) { } 999 void emit_hi(CodeBuffer &cbuf, int val) { } 1000 1001 1002 //============================================================================= 1003 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask(); 1004 1005 int Compile::ConstantTable::calculate_table_base_offset() const { 1006 if (UseRDPCForConstantTableBase) { 1007 // The table base offset might be less but then it fits into 1008 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit). 1009 return Assembler::min_simm13(); 1010 } else { 1011 int offset = -(size() / 2); 1012 if (!Assembler::is_simm13(offset)) { 1013 offset = Assembler::min_simm13(); 1014 } 1015 return offset; 1016 } 1017 } 1018 1019 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; } 1020 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) { 1021 ShouldNotReachHere(); 1022 } 1023 1024 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1025 Compile* C = ra_->C; 1026 Compile::ConstantTable& constant_table = C->constant_table(); 1027 MacroAssembler _masm(&cbuf); 1028 1029 Register r = as_Register(ra_->get_encode(this)); 1030 CodeSection* consts_section = __ code()->consts(); 1031 int consts_size = consts_section->align_at_start(consts_section->size()); 1032 assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size); 1033 1034 if (UseRDPCForConstantTableBase) { 1035 // For the following RDPC logic to work correctly the consts 1036 // section must be allocated right before the insts section. This 1037 // assert checks for that. The layout and the SECT_* constants 1038 // are defined in src/share/vm/asm/codeBuffer.hpp. 1039 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 1040 int insts_offset = __ offset(); 1041 1042 // Layout: 1043 // 1044 // |----------- consts section ------------|----------- insts section -----------... 1045 // |------ constant table -----|- padding -|------------------x---- 1046 // \ current PC (RDPC instruction) 1047 // |<------------- consts_size ----------->|<- insts_offset ->| 1048 // \ table base 1049 // The table base offset is later added to the load displacement 1050 // so it has to be negative. 1051 int table_base_offset = -(consts_size + insts_offset); 1052 int disp; 1053 1054 // If the displacement from the current PC to the constant table 1055 // base fits into simm13 we set the constant table base to the 1056 // current PC. 1057 if (Assembler::is_simm13(table_base_offset)) { 1058 constant_table.set_table_base_offset(table_base_offset); 1059 disp = 0; 1060 } else { 1061 // Otherwise we set the constant table base offset to the 1062 // maximum negative displacement of load instructions to keep 1063 // the disp as small as possible: 1064 // 1065 // |<------------- consts_size ----------->|<- insts_offset ->| 1066 // |<--------- min_simm13 --------->|<-------- disp --------->| 1067 // \ table base 1068 table_base_offset = Assembler::min_simm13(); 1069 constant_table.set_table_base_offset(table_base_offset); 1070 disp = (consts_size + insts_offset) + table_base_offset; 1071 } 1072 1073 __ rdpc(r); 1074 1075 if (disp == 0) { 1076 // Emitting an additional 'nop' instruction in order not to cause a code 1077 // size adjustment in the code following the table setup (if the instruction 1078 // immediately following after this section is a CTI). 1079 __ nop(); 1080 } 1081 else { 1082 assert(r != O7, "need temporary"); 1083 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 1084 } 1085 } 1086 else { 1087 // Materialize the constant table base. 1088 address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); 1089 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 1090 AddressLiteral base(baseaddr, rspec); 1091 __ set(base, r); 1092 } 1093 } 1094 1095 uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 1096 if (UseRDPCForConstantTableBase) { 1097 // This is really the worst case but generally it's only 1 instruction. 1098 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord; 1099 } else { 1100 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord; 1101 } 1102 } 1103 1104 #ifndef PRODUCT 1105 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1106 char reg[128]; 1107 ra_->dump_register(this, reg); 1108 if (UseRDPCForConstantTableBase) { 1109 st->print("RDPC %s\t! constant table base", reg); 1110 } else { 1111 st->print("SET &constanttable,%s\t! constant table base", reg); 1112 } 1113 } 1114 #endif 1115 1116 1117 //============================================================================= 1118 1119 #ifndef PRODUCT 1120 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1121 Compile* C = ra_->C; 1122 1123 for (int i = 0; i < OptoPrologueNops; i++) { 1124 st->print_cr("NOP"); st->print("\t"); 1125 } 1126 1127 if( VerifyThread ) { 1128 st->print_cr("Verify_Thread"); st->print("\t"); 1129 } 1130 1131 size_t framesize = C->frame_size_in_bytes(); 1132 int bangsize = C->bang_size_in_bytes(); 1133 1134 // Calls to C2R adapters often do not accept exceptional returns. 1135 // We require that their callers must bang for them. But be careful, because 1136 // some VM calls (such as call site linkage) can use several kilobytes of 1137 // stack. But the stack safety zone should account for that. 1138 // See bugs 4446381, 4468289, 4497237. 1139 if (C->need_stack_bang(bangsize)) { 1140 st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t"); 1141 } 1142 1143 if (Assembler::is_simm13(-framesize)) { 1144 st->print ("SAVE R_SP,-" SIZE_FORMAT ",R_SP",framesize); 1145 } else { 1146 st->print_cr("SETHI R_SP,hi%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); 1147 st->print_cr("ADD R_G3,lo%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); 1148 st->print ("SAVE R_SP,R_G3,R_SP"); 1149 } 1150 1151 } 1152 #endif 1153 1154 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1155 Compile* C = ra_->C; 1156 MacroAssembler _masm(&cbuf); 1157 1158 for (int i = 0; i < OptoPrologueNops; i++) { 1159 __ nop(); 1160 } 1161 1162 __ verify_thread(); 1163 1164 size_t framesize = C->frame_size_in_bytes(); 1165 assert(framesize >= 16*wordSize, "must have room for reg. save area"); 1166 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1167 int bangsize = C->bang_size_in_bytes(); 1168 1169 // Calls to C2R adapters often do not accept exceptional returns. 1170 // We require that their callers must bang for them. But be careful, because 1171 // some VM calls (such as call site linkage) can use several kilobytes of 1172 // stack. But the stack safety zone should account for that. 1173 // See bugs 4446381, 4468289, 4497237. 1174 if (C->need_stack_bang(bangsize)) { 1175 __ generate_stack_overflow_check(bangsize); 1176 } 1177 1178 if (Assembler::is_simm13(-framesize)) { 1179 __ save(SP, -framesize, SP); 1180 } else { 1181 __ sethi(-framesize & ~0x3ff, G3); 1182 __ add(G3, -framesize & 0x3ff, G3); 1183 __ save(SP, G3, SP); 1184 } 1185 C->set_frame_complete( __ offset() ); 1186 1187 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) { 1188 // NOTE: We set the table base offset here because users might be 1189 // emitted before MachConstantBaseNode. 1190 Compile::ConstantTable& constant_table = C->constant_table(); 1191 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 1192 } 1193 } 1194 1195 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1196 return MachNode::size(ra_); 1197 } 1198 1199 int MachPrologNode::reloc() const { 1200 return 10; // a large enough number 1201 } 1202 1203 //============================================================================= 1204 #ifndef PRODUCT 1205 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1206 Compile* C = ra_->C; 1207 1208 if(do_polling() && ra_->C->is_method_compilation()) { 1209 if (SafepointMechanism::uses_global_page_poll()) { 1210 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); 1211 } else { 1212 st->print("LDX [R_G2 + #poll_offset],L0\t! Load local polling address\n\t"); 1213 } 1214 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); 1215 } 1216 1217 if(do_polling()) { 1218 if (UseCBCond && !ra_->C->is_method_compilation()) { 1219 st->print("NOP\n\t"); 1220 } 1221 st->print("RET\n\t"); 1222 } 1223 1224 st->print("RESTORE"); 1225 } 1226 #endif 1227 1228 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1229 MacroAssembler _masm(&cbuf); 1230 Compile* C = ra_->C; 1231 1232 __ verify_thread(); 1233 1234 if (StackReservedPages > 0 && C->has_reserved_stack_access()) { 1235 __ reserved_stack_check(); 1236 } 1237 1238 // If this does safepoint polling, then do it here 1239 if(do_polling() && ra_->C->is_method_compilation()) { 1240 if (SafepointMechanism::uses_thread_local_poll()) { 1241 __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0); 1242 } else { 1243 AddressLiteral polling_page(os::get_polling_page()); 1244 __ sethi(polling_page, L0); 1245 } 1246 __ relocate(relocInfo::poll_return_type); 1247 __ ld_ptr(L0, 0, G0); 1248 } 1249 1250 // If this is a return, then stuff the restore in the delay slot 1251 if(do_polling()) { 1252 if (UseCBCond && !ra_->C->is_method_compilation()) { 1253 // Insert extra padding for the case when the epilogue is preceded by 1254 // a cbcond jump, which can't be followed by a CTI instruction 1255 __ nop(); 1256 } 1257 __ ret(); 1258 __ delayed()->restore(); 1259 } else { 1260 __ restore(); 1261 } 1262 } 1263 1264 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1265 return MachNode::size(ra_); 1266 } 1267 1268 int MachEpilogNode::reloc() const { 1269 return 16; // a large enough number 1270 } 1271 1272 const Pipeline * MachEpilogNode::pipeline() const { 1273 return MachNode::pipeline_class(); 1274 } 1275 1276 int MachEpilogNode::safepoint_offset() const { 1277 assert(SafepointMechanism::uses_global_page_poll(), "sanity"); 1278 assert( do_polling(), "no return for this epilog node"); 1279 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; 1280 } 1281 1282 //============================================================================= 1283 1284 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack 1285 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1286 static enum RC rc_class( OptoReg::Name reg ) { 1287 if (!OptoReg::is_valid(reg)) return rc_bad; 1288 if (OptoReg::is_stack(reg)) return rc_stack; 1289 VMReg r = OptoReg::as_VMReg(reg); 1290 if (r->is_Register()) return rc_int; 1291 assert(r->is_FloatRegister(), "must be"); 1292 return rc_float; 1293 } 1294 1295 #ifndef PRODUCT 1296 ATTRIBUTE_PRINTF(2, 3) 1297 static void print_helper(outputStream* st, const char* format, ...) { 1298 if (st->position() > 0) { 1299 st->cr(); 1300 st->sp(); 1301 } 1302 va_list ap; 1303 va_start(ap, format); 1304 st->vprint(format, ap); 1305 va_end(ap); 1306 } 1307 #endif // !PRODUCT 1308 1309 static void impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool is_load, int offset, int reg, int opcode, const char *op_str, outputStream* st) { 1310 if (cbuf) { 1311 emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]); 1312 } 1313 #ifndef PRODUCT 1314 else { 1315 if (is_load) { 1316 print_helper(st, "%s [R_SP + #%d],R_%s\t! spill", op_str, offset, OptoReg::regname(reg)); 1317 } else { 1318 print_helper(st, "%s R_%s,[R_SP + #%d]\t! spill", op_str, OptoReg::regname(reg), offset); 1319 } 1320 } 1321 #endif 1322 } 1323 1324 static void impl_mov_helper(CodeBuffer *cbuf, int src, int dst, int op1, int op2, const char *op_str, outputStream* st) { 1325 if (cbuf) { 1326 emit3(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src]); 1327 } 1328 #ifndef PRODUCT 1329 else { 1330 print_helper(st, "%s R_%s,R_%s\t! spill", op_str, OptoReg::regname(src), OptoReg::regname(dst)); 1331 } 1332 #endif 1333 } 1334 1335 static void mach_spill_copy_implementation_helper(const MachNode* mach, 1336 CodeBuffer *cbuf, 1337 PhaseRegAlloc *ra_, 1338 outputStream* st) { 1339 // Get registers to move 1340 OptoReg::Name src_second = ra_->get_reg_second(mach->in(1)); 1341 OptoReg::Name src_first = ra_->get_reg_first(mach->in(1)); 1342 OptoReg::Name dst_second = ra_->get_reg_second(mach); 1343 OptoReg::Name dst_first = ra_->get_reg_first(mach); 1344 1345 enum RC src_second_rc = rc_class(src_second); 1346 enum RC src_first_rc = rc_class(src_first); 1347 enum RC dst_second_rc = rc_class(dst_second); 1348 enum RC dst_first_rc = rc_class(dst_first); 1349 1350 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register"); 1351 1352 if (src_first == dst_first && src_second == dst_second) { 1353 return; // Self copy, no move 1354 } 1355 1356 // -------------------------------------- 1357 // Check for mem-mem move. Load into unused float registers and fall into 1358 // the float-store case. 1359 if (src_first_rc == rc_stack && dst_first_rc == rc_stack) { 1360 int offset = ra_->reg2offset(src_first); 1361 // Further check for aligned-adjacent pair, so we can use a double load 1362 if ((src_first&1) == 0 && src_first+1 == src_second) { 1363 src_second = OptoReg::Name(R_F31_num); 1364 src_second_rc = rc_float; 1365 impl_helper(mach, cbuf, ra_, true, offset, R_F30_num, Assembler::lddf_op3, "LDDF", st); 1366 } else { 1367 impl_helper(mach, cbuf, ra_, true, offset, R_F30_num, Assembler::ldf_op3, "LDF ", st); 1368 } 1369 src_first = OptoReg::Name(R_F30_num); 1370 src_first_rc = rc_float; 1371 } 1372 1373 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { 1374 int offset = ra_->reg2offset(src_second); 1375 impl_helper(mach, cbuf, ra_, true, offset, R_F31_num, Assembler::ldf_op3, "LDF ", st); 1376 src_second = OptoReg::Name(R_F31_num); 1377 src_second_rc = rc_float; 1378 } 1379 1380 // -------------------------------------- 1381 // Check for float->int copy; requires a trip through memory 1382 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) { 1383 int offset = frame::register_save_words*wordSize; 1384 if (cbuf) { 1385 emit3_simm13(*cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16); 1386 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stf_op3, "STF ", st); 1387 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lduw_op3, "LDUW", st); 1388 emit3_simm13(*cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16); 1389 } 1390 #ifndef PRODUCT 1391 else { 1392 print_helper(st, "SUB R_SP,16,R_SP"); 1393 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stf_op3, "STF ", st); 1394 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lduw_op3, "LDUW", st); 1395 print_helper(st, "ADD R_SP,16,R_SP"); 1396 } 1397 #endif 1398 } 1399 1400 // Check for float->int copy on T4 1401 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) { 1402 // Further check for aligned-adjacent pair, so we can use a double move 1403 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1404 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mdtox_opf, "MOVDTOX", st); 1405 return; 1406 } 1407 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mstouw_opf, "MOVSTOUW", st); 1408 } 1409 // Check for int->float copy on T4 1410 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) { 1411 // Further check for aligned-adjacent pair, so we can use a double move 1412 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1413 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mxtod_opf, "MOVXTOD", st); 1414 return; 1415 } 1416 impl_mov_helper(cbuf, src_first, dst_first, Assembler::mftoi_op3, Assembler::mwtos_opf, "MOVWTOS", st); 1417 } 1418 1419 // -------------------------------------- 1420 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. 1421 // In such cases, I have to do the big-endian swap. For aligned targets, the 1422 // hardware does the flop for me. Doubles are always aligned, so no problem 1423 // there. Misaligned sources only come from native-long-returns (handled 1424 // special below). 1425 1426 // -------------------------------------- 1427 // Check for integer reg-reg copy 1428 if (src_first_rc == rc_int && dst_first_rc == rc_int) { 1429 // Else normal reg-reg copy 1430 assert(src_second != dst_first, "smashed second before evacuating it"); 1431 impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV ", st); 1432 assert((src_first & 1) == 0 && (dst_first & 1) == 0, "never move second-halves of int registers"); 1433 // This moves an aligned adjacent pair. 1434 // See if we are done. 1435 if (src_first + 1 == src_second && dst_first + 1 == dst_second) { 1436 return; 1437 } 1438 } 1439 1440 // Check for integer store 1441 if (src_first_rc == rc_int && dst_first_rc == rc_stack) { 1442 int offset = ra_->reg2offset(dst_first); 1443 // Further check for aligned-adjacent pair, so we can use a double store 1444 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1445 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stx_op3, "STX ", st); 1446 return; 1447 } 1448 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stw_op3, "STW ", st); 1449 } 1450 1451 // Check for integer load 1452 if (dst_first_rc == rc_int && src_first_rc == rc_stack) { 1453 int offset = ra_->reg2offset(src_first); 1454 // Further check for aligned-adjacent pair, so we can use a double load 1455 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1456 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldx_op3, "LDX ", st); 1457 return; 1458 } 1459 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lduw_op3, "LDUW", st); 1460 } 1461 1462 // Check for float reg-reg copy 1463 if (src_first_rc == rc_float && dst_first_rc == rc_float) { 1464 // Further check for aligned-adjacent pair, so we can use a double move 1465 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1466 impl_mov_helper(cbuf, src_first, dst_first, Assembler::fpop1_op3, Assembler::fmovd_opf, "FMOVD", st); 1467 return; 1468 } 1469 impl_mov_helper(cbuf, src_first, dst_first, Assembler::fpop1_op3, Assembler::fmovs_opf, "FMOVS", st); 1470 } 1471 1472 // Check for float store 1473 if (src_first_rc == rc_float && dst_first_rc == rc_stack) { 1474 int offset = ra_->reg2offset(dst_first); 1475 // Further check for aligned-adjacent pair, so we can use a double store 1476 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1477 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stdf_op3, "STDF", st); 1478 return; 1479 } 1480 impl_helper(mach, cbuf, ra_, false, offset, src_first, Assembler::stf_op3, "STF ", st); 1481 } 1482 1483 // Check for float load 1484 if (dst_first_rc == rc_float && src_first_rc == rc_stack) { 1485 int offset = ra_->reg2offset(src_first); 1486 // Further check for aligned-adjacent pair, so we can use a double load 1487 if ((src_first & 1) == 0 && src_first + 1 == src_second && (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1488 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::lddf_op3, "LDDF", st); 1489 return; 1490 } 1491 impl_helper(mach, cbuf, ra_, true, offset, dst_first, Assembler::ldf_op3, "LDF ", st); 1492 } 1493 1494 // -------------------------------------------------------------------- 1495 // Check for hi bits still needing moving. Only happens for misaligned 1496 // arguments to native calls. 1497 if (src_second == dst_second) { 1498 return; // Self copy; no move 1499 } 1500 assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad"); 1501 1502 Unimplemented(); 1503 } 1504 1505 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, 1506 PhaseRegAlloc *ra_, 1507 bool do_size, 1508 outputStream* st) const { 1509 assert(!do_size, "not supported"); 1510 mach_spill_copy_implementation_helper(this, cbuf, ra_, st); 1511 return 0; 1512 } 1513 1514 #ifndef PRODUCT 1515 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1516 implementation( NULL, ra_, false, st ); 1517 } 1518 #endif 1519 1520 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1521 implementation( &cbuf, ra_, false, NULL ); 1522 } 1523 1524 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1525 return MachNode::size(ra_); 1526 } 1527 1528 //============================================================================= 1529 #ifndef PRODUCT 1530 void MachNopNode::format(PhaseRegAlloc *, outputStream *st) const { 1531 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); 1532 } 1533 #endif 1534 1535 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const { 1536 MacroAssembler _masm(&cbuf); 1537 for (int i = 0; i < _count; i += 1) { 1538 __ nop(); 1539 } 1540 } 1541 1542 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 1543 return 4 * _count; 1544 } 1545 1546 1547 //============================================================================= 1548 #ifndef PRODUCT 1549 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1550 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1551 int reg = ra_->get_reg_first(this); 1552 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]); 1553 } 1554 #endif 1555 1556 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1557 MacroAssembler _masm(&cbuf); 1558 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS; 1559 int reg = ra_->get_encode(this); 1560 1561 if (Assembler::is_simm13(offset)) { 1562 __ add(SP, offset, reg_to_register_object(reg)); 1563 } else { 1564 __ set(offset, O7); 1565 __ add(SP, O7, reg_to_register_object(reg)); 1566 } 1567 } 1568 1569 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 1570 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_) 1571 assert(ra_ == ra_->C->regalloc(), "sanity"); 1572 return ra_->C->scratch_emit_size(this); 1573 } 1574 1575 //============================================================================= 1576 #ifndef PRODUCT 1577 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1578 st->print_cr("\nUEP:"); 1579 if (UseCompressedClassPointers) { 1580 assert(Universe::heap() != NULL, "java heap should be initialized"); 1581 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 1582 if (Universe::narrow_klass_base() != 0) { 1583 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); 1584 if (Universe::narrow_klass_shift() != 0) { 1585 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1586 } 1587 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 1588 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); 1589 } else { 1590 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1591 } 1592 } else { 1593 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1594 } 1595 st->print_cr("\tCMP R_G5,R_G3" ); 1596 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1597 } 1598 #endif 1599 1600 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1601 MacroAssembler _masm(&cbuf); 1602 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 1603 Register temp_reg = G3; 1604 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 1605 1606 // Load klass from receiver 1607 __ load_klass(O0, temp_reg); 1608 // Compare against expected klass 1609 __ cmp(temp_reg, G5_ic_reg); 1610 // Branch to miss code, checks xcc or icc depending 1611 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2); 1612 } 1613 1614 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 1615 return MachNode::size(ra_); 1616 } 1617 1618 1619 //============================================================================= 1620 1621 1622 // Emit exception handler code. 1623 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) { 1624 Register temp_reg = G3; 1625 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 1626 MacroAssembler _masm(&cbuf); 1627 1628 address base = __ start_a_stub(size_exception_handler()); 1629 if (base == NULL) { 1630 ciEnv::current()->record_failure("CodeCache is full"); 1631 return 0; // CodeBuffer::expand failed 1632 } 1633 1634 int offset = __ offset(); 1635 1636 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp 1637 __ delayed()->nop(); 1638 1639 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1640 1641 __ end_a_stub(); 1642 1643 return offset; 1644 } 1645 1646 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { 1647 // Can't use any of the current frame's registers as we may have deopted 1648 // at a poll and everything (including G3) can be live. 1649 Register temp_reg = L0; 1650 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 1651 MacroAssembler _masm(&cbuf); 1652 1653 address base = __ start_a_stub(size_deopt_handler()); 1654 if (base == NULL) { 1655 ciEnv::current()->record_failure("CodeCache is full"); 1656 return 0; // CodeBuffer::expand failed 1657 } 1658 1659 int offset = __ offset(); 1660 __ save_frame(0); 1661 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp 1662 __ delayed()->restore(); 1663 1664 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1665 1666 __ end_a_stub(); 1667 return offset; 1668 1669 } 1670 1671 // Given a register encoding, produce a Integer Register object 1672 static Register reg_to_register_object(int register_encoding) { 1673 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding"); 1674 return as_Register(register_encoding); 1675 } 1676 1677 // Given a register encoding, produce a single-precision Float Register object 1678 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) { 1679 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding"); 1680 return as_SingleFloatRegister(register_encoding); 1681 } 1682 1683 // Given a register encoding, produce a double-precision Float Register object 1684 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) { 1685 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding"); 1686 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding"); 1687 return as_DoubleFloatRegister(register_encoding); 1688 } 1689 1690 const bool Matcher::match_rule_supported(int opcode) { 1691 if (!has_match_rule(opcode)) 1692 return false; 1693 1694 switch (opcode) { 1695 case Op_CountLeadingZerosI: 1696 case Op_CountLeadingZerosL: 1697 case Op_CountTrailingZerosI: 1698 case Op_CountTrailingZerosL: 1699 case Op_PopCountI: 1700 case Op_PopCountL: 1701 if (!UsePopCountInstruction) 1702 return false; 1703 case Op_CompareAndSwapL: 1704 case Op_CompareAndSwapP: 1705 if (!VM_Version::supports_cx8()) 1706 return false; 1707 break; 1708 } 1709 1710 return true; // Per default match rules are supported. 1711 } 1712 1713 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { 1714 1715 // TODO 1716 // identify extra cases that we might want to provide match rules for 1717 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen 1718 bool ret_value = match_rule_supported(opcode); 1719 // Add rules here. 1720 1721 return ret_value; // Per default match rules are supported. 1722 } 1723 1724 const bool Matcher::has_predicated_vectors(void) { 1725 return false; 1726 } 1727 1728 const int Matcher::float_pressure(int default_pressure_threshold) { 1729 return default_pressure_threshold; 1730 } 1731 1732 int Matcher::regnum_to_fpu_offset(int regnum) { 1733 return regnum - 32; // The FP registers are in the second chunk 1734 } 1735 1736 #ifdef ASSERT 1737 address last_rethrow = NULL; // debugging aid for Rethrow encoding 1738 #endif 1739 1740 // Vector width in bytes 1741 const int Matcher::vector_width_in_bytes(BasicType bt) { 1742 assert(MaxVectorSize == 8, ""); 1743 return 8; 1744 } 1745 1746 // Vector ideal reg 1747 const uint Matcher::vector_ideal_reg(int size) { 1748 assert(MaxVectorSize == 8, ""); 1749 return Op_RegD; 1750 } 1751 1752 const uint Matcher::vector_shift_count_ideal_reg(int size) { 1753 fatal("vector shift is not supported"); 1754 return Node::NotAMachineReg; 1755 } 1756 1757 // Limits on vector size (number of elements) loaded into vector. 1758 const int Matcher::max_vector_size(const BasicType bt) { 1759 assert(is_java_primitive(bt), "only primitive type vectors"); 1760 return vector_width_in_bytes(bt)/type2aelembytes(bt); 1761 } 1762 1763 const int Matcher::min_vector_size(const BasicType bt) { 1764 return max_vector_size(bt); // Same as max. 1765 } 1766 1767 // SPARC doesn't support misaligned vectors store/load. 1768 const bool Matcher::misaligned_vectors_ok() { 1769 return false; 1770 } 1771 1772 // Current (2013) SPARC platforms need to read original key 1773 // to construct decryption expanded key 1774 const bool Matcher::pass_original_key_for_aes() { 1775 return true; 1776 } 1777 1778 // NOTE: All currently supported SPARC HW provides fast conversion. 1779 const bool Matcher::convL2FSupported(void) { return true; } 1780 1781 // Is this branch offset short enough that a short branch can be used? 1782 // 1783 // NOTE: If the platform does not provide any short branch variants, then 1784 // this method should return false for offset 0. 1785 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1786 // The passed offset is relative to address of the branch. 1787 // Don't need to adjust the offset. 1788 return UseCBCond && Assembler::is_simm12(offset); 1789 } 1790 1791 const bool Matcher::isSimpleConstant64(jlong value) { 1792 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1793 // Depends on optimizations in MacroAssembler::setx. 1794 int hi = (int)(value >> 32); 1795 int lo = (int)(value & ~0); 1796 return (hi == 0) || (hi == -1) || (lo == 0); 1797 } 1798 1799 // No scaling for the parameter the ClearArray node. 1800 const bool Matcher::init_array_count_is_in_bytes = true; 1801 1802 // No additional cost for CMOVL. 1803 const int Matcher::long_cmove_cost() { return 0; } 1804 1805 // CMOVF/CMOVD are expensive on e.g., T4 and SPARC64. 1806 const int Matcher::float_cmove_cost() { 1807 return VM_Version::has_fast_cmove() ? 0 : ConditionalMoveLimit; 1808 } 1809 1810 // Does the CPU require late expand (see block.cpp for description of late expand)? 1811 const bool Matcher::require_postalloc_expand = false; 1812 1813 // Do we need to mask the count passed to shift instructions or does 1814 // the cpu only look at the lower 5/6 bits anyway? 1815 const bool Matcher::need_masked_shift_count = false; 1816 1817 bool Matcher::narrow_oop_use_complex_address() { 1818 assert(UseCompressedOops, "only for compressed oops code"); 1819 return false; 1820 } 1821 1822 bool Matcher::narrow_klass_use_complex_address() { 1823 assert(UseCompressedClassPointers, "only for compressed klass code"); 1824 return false; 1825 } 1826 1827 bool Matcher::const_oop_prefer_decode() { 1828 // TODO: Check if loading ConP from TOC in heap-based mode is better: 1829 // Prefer ConN+DecodeN over ConP in simple compressed oops mode. 1830 // return Universe::narrow_oop_base() == NULL; 1831 return true; 1832 } 1833 1834 bool Matcher::const_klass_prefer_decode() { 1835 // TODO: Check if loading ConP from TOC in heap-based mode is better: 1836 // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode. 1837 // return Universe::narrow_klass_base() == NULL; 1838 return true; 1839 } 1840 1841 // Is it better to copy float constants, or load them directly from memory? 1842 // Intel can load a float constant from a direct address, requiring no 1843 // extra registers. Most RISCs will have to materialize an address into a 1844 // register first, so they would do better to copy the constant from stack. 1845 const bool Matcher::rematerialize_float_constants = false; 1846 1847 // If CPU can load and store mis-aligned doubles directly then no fixup is 1848 // needed. Else we split the double into 2 integer pieces and move it 1849 // piece-by-piece. Only happens when passing doubles into C code as the 1850 // Java calling convention forces doubles to be aligned. 1851 const bool Matcher::misaligned_doubles_ok = true; 1852 1853 // No-op on SPARC. 1854 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 1855 } 1856 1857 // Advertise here if the CPU requires explicit rounding operations 1858 // to implement the UseStrictFP mode. 1859 const bool Matcher::strict_fp_requires_explicit_rounding = false; 1860 1861 // Are floats converted to double when stored to stack during deoptimization? 1862 // Sparc does not handle callee-save floats. 1863 bool Matcher::float_in_double() { return false; } 1864 1865 // Do ints take an entire long register or just half? 1866 // Note that we if-def off of _LP64. 1867 // The relevant question is how the int is callee-saved. In _LP64 1868 // the whole long is written but de-opt'ing will have to extract 1869 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written. 1870 const bool Matcher::int_in_long = true; 1871 1872 // Return whether or not this register is ever used as an argument. This 1873 // function is used on startup to build the trampoline stubs in generateOptoStub. 1874 // Registers not mentioned will be killed by the VM call in the trampoline, and 1875 // arguments in those registers not be available to the callee. 1876 bool Matcher::can_be_java_arg( int reg ) { 1877 // Standard sparc 6 args in registers 1878 if( reg == R_I0_num || 1879 reg == R_I1_num || 1880 reg == R_I2_num || 1881 reg == R_I3_num || 1882 reg == R_I4_num || 1883 reg == R_I5_num ) return true; 1884 // 64-bit builds can pass 64-bit pointers and longs in 1885 // the high I registers 1886 if( reg == R_I0H_num || 1887 reg == R_I1H_num || 1888 reg == R_I2H_num || 1889 reg == R_I3H_num || 1890 reg == R_I4H_num || 1891 reg == R_I5H_num ) return true; 1892 1893 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) { 1894 return true; 1895 } 1896 1897 // A few float args in registers 1898 if( reg >= R_F0_num && reg <= R_F7_num ) return true; 1899 1900 return false; 1901 } 1902 1903 bool Matcher::is_spillable_arg( int reg ) { 1904 return can_be_java_arg(reg); 1905 } 1906 1907 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 1908 // Use hardware SDIVX instruction when it is 1909 // faster than a code which use multiply. 1910 return VM_Version::has_fast_idiv(); 1911 } 1912 1913 // Register for DIVI projection of divmodI 1914 RegMask Matcher::divI_proj_mask() { 1915 ShouldNotReachHere(); 1916 return RegMask(); 1917 } 1918 1919 // Register for MODI projection of divmodI 1920 RegMask Matcher::modI_proj_mask() { 1921 ShouldNotReachHere(); 1922 return RegMask(); 1923 } 1924 1925 // Register for DIVL projection of divmodL 1926 RegMask Matcher::divL_proj_mask() { 1927 ShouldNotReachHere(); 1928 return RegMask(); 1929 } 1930 1931 // Register for MODL projection of divmodL 1932 RegMask Matcher::modL_proj_mask() { 1933 ShouldNotReachHere(); 1934 return RegMask(); 1935 } 1936 1937 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 1938 return L7_REGP_mask(); 1939 } 1940 1941 1942 const bool Matcher::convi2l_type_required = true; 1943 1944 // Should the Matcher clone shifts on addressing modes, expecting them 1945 // to be subsumed into complex addressing expressions or compute them 1946 // into registers? 1947 bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) { 1948 return clone_base_plus_offset_address(m, mstack, address_visited); 1949 } 1950 1951 void Compile::reshape_address(AddPNode* addp) { 1952 } 1953 1954 %} 1955 1956 1957 // The intptr_t operand types, defined by textual substitution. 1958 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) 1959 #define immX immL 1960 #define immX13 immL13 1961 #define immX13m7 immL13m7 1962 #define iRegX iRegL 1963 #define g1RegX g1RegL 1964 1965 //----------ENCODING BLOCK----------------------------------------------------- 1966 // This block specifies the encoding classes used by the compiler to output 1967 // byte streams. Encoding classes are parameterized macros used by 1968 // Machine Instruction Nodes in order to generate the bit encoding of the 1969 // instruction. Operands specify their base encoding interface with the 1970 // interface keyword. There are currently supported four interfaces, 1971 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 1972 // operand to generate a function which returns its register number when 1973 // queried. CONST_INTER causes an operand to generate a function which 1974 // returns the value of the constant when queried. MEMORY_INTER causes an 1975 // operand to generate four functions which return the Base Register, the 1976 // Index Register, the Scale Value, and the Offset Value of the operand when 1977 // queried. COND_INTER causes an operand to generate six functions which 1978 // return the encoding code (ie - encoding bits for the instruction) 1979 // associated with each basic boolean condition for a conditional instruction. 1980 // 1981 // Instructions specify two basic values for encoding. Again, a function 1982 // is available to check if the constant displacement is an oop. They use the 1983 // ins_encode keyword to specify their encoding classes (which must be 1984 // a sequence of enc_class names, and their parameters, specified in 1985 // the encoding block), and they use the 1986 // opcode keyword to specify, in order, their primary, secondary, and 1987 // tertiary opcode. Only the opcode sections which a particular instruction 1988 // needs for encoding need to be specified. 1989 encode %{ 1990 enc_class enc_untested %{ 1991 #ifdef ASSERT 1992 MacroAssembler _masm(&cbuf); 1993 __ untested("encoding"); 1994 #endif 1995 %} 1996 1997 enc_class form3_mem_reg( memory mem, iRegI dst ) %{ 1998 emit_form3_mem_reg(cbuf, ra_, this, $primary, $tertiary, 1999 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2000 %} 2001 2002 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{ 2003 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2004 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2005 %} 2006 2007 enc_class form3_mem_prefetch_read( memory mem ) %{ 2008 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2009 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); 2010 %} 2011 2012 enc_class form3_mem_prefetch_write( memory mem ) %{ 2013 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2014 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/); 2015 %} 2016 2017 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{ 2018 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2019 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2020 guarantee($mem$$index == R_G0_enc, "double index?"); 2021 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc ); 2022 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg ); 2023 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 ); 2024 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc ); 2025 %} 2026 2027 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{ 2028 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2029 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2030 guarantee($mem$$index == R_G0_enc, "double index?"); 2031 // Load long with 2 instructions 2032 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 ); 2033 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 ); 2034 %} 2035 2036 //%%% form3_mem_plus_4_reg is a hack--get rid of it 2037 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{ 2038 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4"); 2039 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg); 2040 %} 2041 2042 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{ 2043 // Encode a reg-reg copy. If it is useless, then empty encoding. 2044 if( $rs2$$reg != $rd$$reg ) 2045 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg ); 2046 %} 2047 2048 // Target lo half of long 2049 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{ 2050 // Encode a reg-reg copy. If it is useless, then empty encoding. 2051 if( $rs2$$reg != LONG_LO_REG($rd$$reg) ) 2052 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg ); 2053 %} 2054 2055 // Source lo half of long 2056 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{ 2057 // Encode a reg-reg copy. If it is useless, then empty encoding. 2058 if( LONG_LO_REG($rs2$$reg) != $rd$$reg ) 2059 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) ); 2060 %} 2061 2062 // Target hi half of long 2063 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{ 2064 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 ); 2065 %} 2066 2067 // Source lo half of long, and leave it sign extended. 2068 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{ 2069 // Sign extend low half 2070 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 ); 2071 %} 2072 2073 // Source hi half of long, and leave it sign extended. 2074 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{ 2075 // Shift high half to low half 2076 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 ); 2077 %} 2078 2079 // Source hi half of long 2080 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{ 2081 // Encode a reg-reg copy. If it is useless, then empty encoding. 2082 if( LONG_HI_REG($rs2$$reg) != $rd$$reg ) 2083 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) ); 2084 %} 2085 2086 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{ 2087 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg ); 2088 %} 2089 2090 enc_class enc_to_bool( iRegI src, iRegI dst ) %{ 2091 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg ); 2092 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 ); 2093 %} 2094 2095 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{ 2096 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg ); 2097 // clear if nothing else is happening 2098 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 ); 2099 // blt,a,pn done 2100 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 ); 2101 // mov dst,-1 in delay slot 2102 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2103 %} 2104 2105 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{ 2106 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F ); 2107 %} 2108 2109 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{ 2110 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 ); 2111 %} 2112 2113 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{ 2114 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg ); 2115 %} 2116 2117 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{ 2118 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant ); 2119 %} 2120 2121 enc_class move_return_pc_to_o1() %{ 2122 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); 2123 %} 2124 2125 /* %%% merge with enc_to_bool */ 2126 enc_class enc_convP2B( iRegI dst, iRegP src ) %{ 2127 MacroAssembler _masm(&cbuf); 2128 2129 Register src_reg = reg_to_register_object($src$$reg); 2130 Register dst_reg = reg_to_register_object($dst$$reg); 2131 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg); 2132 %} 2133 2134 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ 2135 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) 2136 MacroAssembler _masm(&cbuf); 2137 2138 Register p_reg = reg_to_register_object($p$$reg); 2139 Register q_reg = reg_to_register_object($q$$reg); 2140 Register y_reg = reg_to_register_object($y$$reg); 2141 Register tmp_reg = reg_to_register_object($tmp$$reg); 2142 2143 __ subcc( p_reg, q_reg, p_reg ); 2144 __ add ( p_reg, y_reg, tmp_reg ); 2145 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg ); 2146 %} 2147 2148 enc_class form_d2i_helper(regD src, regF dst) %{ 2149 // fcmp %fcc0,$src,$src 2150 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2151 // branch %fcc0 not-nan, predict taken 2152 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2153 // fdtoi $src,$dst 2154 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg ); 2155 // fitos $dst,$dst (if nan) 2156 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2157 // clear $dst (if nan) 2158 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2159 // carry on here... 2160 %} 2161 2162 enc_class form_d2l_helper(regD src, regD dst) %{ 2163 // fcmp %fcc0,$src,$src check for NAN 2164 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2165 // branch %fcc0 not-nan, predict taken 2166 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2167 // fdtox $src,$dst convert in delay slot 2168 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg ); 2169 // fxtod $dst,$dst (if nan) 2170 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2171 // clear $dst (if nan) 2172 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2173 // carry on here... 2174 %} 2175 2176 enc_class form_f2i_helper(regF src, regF dst) %{ 2177 // fcmps %fcc0,$src,$src 2178 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2179 // branch %fcc0 not-nan, predict taken 2180 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2181 // fstoi $src,$dst 2182 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg ); 2183 // fitos $dst,$dst (if nan) 2184 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2185 // clear $dst (if nan) 2186 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2187 // carry on here... 2188 %} 2189 2190 enc_class form_f2l_helper(regF src, regD dst) %{ 2191 // fcmps %fcc0,$src,$src 2192 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2193 // branch %fcc0 not-nan, predict taken 2194 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2195 // fstox $src,$dst 2196 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg ); 2197 // fxtod $dst,$dst (if nan) 2198 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2199 // clear $dst (if nan) 2200 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2201 // carry on here... 2202 %} 2203 2204 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2205 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2206 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2207 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2208 2209 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %} 2210 2211 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2212 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %} 2213 2214 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{ 2215 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2216 %} 2217 2218 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{ 2219 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2220 %} 2221 2222 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{ 2223 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2224 %} 2225 2226 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{ 2227 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2228 %} 2229 2230 enc_class form3_convI2F(regF rs2, regF rd) %{ 2231 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg); 2232 %} 2233 2234 // Encloding class for traceable jumps 2235 enc_class form_jmpl(g3RegP dest) %{ 2236 emit_jmpl(cbuf, $dest$$reg); 2237 %} 2238 2239 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{ 2240 emit_jmpl_set_exception_pc(cbuf, $dest$$reg); 2241 %} 2242 2243 enc_class form2_nop() %{ 2244 emit_nop(cbuf); 2245 %} 2246 2247 enc_class form2_illtrap() %{ 2248 emit_illtrap(cbuf); 2249 %} 2250 2251 2252 // Compare longs and convert into -1, 0, 1. 2253 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{ 2254 // CMP $src1,$src2 2255 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg ); 2256 // blt,a,pn done 2257 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 ); 2258 // mov dst,-1 in delay slot 2259 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2260 // bgt,a,pn done 2261 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 ); 2262 // mov dst,1 in delay slot 2263 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 ); 2264 // CLR $dst 2265 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 ); 2266 %} 2267 2268 enc_class enc_PartialSubtypeCheck() %{ 2269 MacroAssembler _masm(&cbuf); 2270 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type); 2271 __ delayed()->nop(); 2272 %} 2273 2274 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{ 2275 MacroAssembler _masm(&cbuf); 2276 Label* L = $labl$$label; 2277 Assembler::Predict predict_taken = 2278 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2279 2280 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 2281 __ delayed()->nop(); 2282 %} 2283 2284 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{ 2285 MacroAssembler _masm(&cbuf); 2286 Label* L = $labl$$label; 2287 Assembler::Predict predict_taken = 2288 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2289 2290 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L); 2291 __ delayed()->nop(); 2292 %} 2293 2294 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{ 2295 int op = (Assembler::arith_op << 30) | 2296 ($dst$$reg << 25) | 2297 (Assembler::movcc_op3 << 19) | 2298 (1 << 18) | // cc2 bit for 'icc' 2299 ($cmp$$cmpcode << 14) | 2300 (0 << 13) | // select register move 2301 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 2302 ($src$$reg << 0); 2303 cbuf.insts()->emit_int32(op); 2304 %} 2305 2306 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 2307 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2308 int op = (Assembler::arith_op << 30) | 2309 ($dst$$reg << 25) | 2310 (Assembler::movcc_op3 << 19) | 2311 (1 << 18) | // cc2 bit for 'icc' 2312 ($cmp$$cmpcode << 14) | 2313 (1 << 13) | // select immediate move 2314 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 2315 (simm11 << 0); 2316 cbuf.insts()->emit_int32(op); 2317 %} 2318 2319 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 2320 int op = (Assembler::arith_op << 30) | 2321 ($dst$$reg << 25) | 2322 (Assembler::movcc_op3 << 19) | 2323 (0 << 18) | // cc2 bit for 'fccX' 2324 ($cmp$$cmpcode << 14) | 2325 (0 << 13) | // select register move 2326 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2327 ($src$$reg << 0); 2328 cbuf.insts()->emit_int32(op); 2329 %} 2330 2331 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 2332 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2333 int op = (Assembler::arith_op << 30) | 2334 ($dst$$reg << 25) | 2335 (Assembler::movcc_op3 << 19) | 2336 (0 << 18) | // cc2 bit for 'fccX' 2337 ($cmp$$cmpcode << 14) | 2338 (1 << 13) | // select immediate move 2339 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2340 (simm11 << 0); 2341 cbuf.insts()->emit_int32(op); 2342 %} 2343 2344 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 2345 int op = (Assembler::arith_op << 30) | 2346 ($dst$$reg << 25) | 2347 (Assembler::fpop2_op3 << 19) | 2348 (0 << 18) | 2349 ($cmp$$cmpcode << 14) | 2350 (1 << 13) | // select register move 2351 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 2352 ($primary << 5) | // select single, double or quad 2353 ($src$$reg << 0); 2354 cbuf.insts()->emit_int32(op); 2355 %} 2356 2357 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 2358 int op = (Assembler::arith_op << 30) | 2359 ($dst$$reg << 25) | 2360 (Assembler::fpop2_op3 << 19) | 2361 (0 << 18) | 2362 ($cmp$$cmpcode << 14) | 2363 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 2364 ($primary << 5) | // select single, double or quad 2365 ($src$$reg << 0); 2366 cbuf.insts()->emit_int32(op); 2367 %} 2368 2369 // Used by the MIN/MAX encodings. Same as a CMOV, but 2370 // the condition comes from opcode-field instead of an argument. 2371 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{ 2372 int op = (Assembler::arith_op << 30) | 2373 ($dst$$reg << 25) | 2374 (Assembler::movcc_op3 << 19) | 2375 (1 << 18) | // cc2 bit for 'icc' 2376 ($primary << 14) | 2377 (0 << 13) | // select register move 2378 (0 << 11) | // cc1, cc0 bits for 'icc' 2379 ($src$$reg << 0); 2380 cbuf.insts()->emit_int32(op); 2381 %} 2382 2383 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 2384 int op = (Assembler::arith_op << 30) | 2385 ($dst$$reg << 25) | 2386 (Assembler::movcc_op3 << 19) | 2387 (6 << 16) | // cc2 bit for 'xcc' 2388 ($primary << 14) | 2389 (0 << 13) | // select register move 2390 (0 << 11) | // cc1, cc0 bits for 'icc' 2391 ($src$$reg << 0); 2392 cbuf.insts()->emit_int32(op); 2393 %} 2394 2395 enc_class Set13( immI13 src, iRegI rd ) %{ 2396 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 2397 %} 2398 2399 enc_class SetHi22( immI src, iRegI rd ) %{ 2400 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant ); 2401 %} 2402 2403 enc_class Set32( immI src, iRegI rd ) %{ 2404 MacroAssembler _masm(&cbuf); 2405 __ set($src$$constant, reg_to_register_object($rd$$reg)); 2406 %} 2407 2408 enc_class call_epilog %{ 2409 if( VerifyStackAtCalls ) { 2410 MacroAssembler _masm(&cbuf); 2411 int framesize = ra_->C->frame_size_in_bytes(); 2412 Register temp_reg = G3; 2413 __ add(SP, framesize, temp_reg); 2414 __ cmp(temp_reg, FP); 2415 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc); 2416 } 2417 %} 2418 2419 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value 2420 // to G1 so the register allocator will not have to deal with the misaligned register 2421 // pair. 2422 enc_class adjust_long_from_native_call %{ 2423 %} 2424 2425 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime 2426 // CALL directly to the runtime 2427 // The user of this is responsible for ensuring that R_L7 is empty (killed). 2428 emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec(), /*preserve_g2=*/true); 2429 %} 2430 2431 enc_class preserve_SP %{ 2432 MacroAssembler _masm(&cbuf); 2433 __ mov(SP, L7_mh_SP_save); 2434 %} 2435 2436 enc_class restore_SP %{ 2437 MacroAssembler _masm(&cbuf); 2438 __ mov(L7_mh_SP_save, SP); 2439 %} 2440 2441 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 2442 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2443 // who we intended to call. 2444 if (!_method) { 2445 emit_call_reloc(cbuf, $meth$$method, runtime_call_Relocation::spec()); 2446 } else { 2447 int method_index = resolved_method_index(cbuf); 2448 RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) 2449 : static_call_Relocation::spec(method_index); 2450 emit_call_reloc(cbuf, $meth$$method, rspec); 2451 2452 // Emit stub for static call. 2453 address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); 2454 if (stub == NULL) { 2455 ciEnv::current()->record_failure("CodeCache is full"); 2456 return; 2457 } 2458 } 2459 %} 2460 2461 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL 2462 MacroAssembler _masm(&cbuf); 2463 __ set_inst_mark(); 2464 int vtable_index = this->_vtable_index; 2465 // MachCallDynamicJavaNode::ret_addr_offset uses this same test 2466 if (vtable_index < 0) { 2467 // must be invalid_vtable_index, not nonvirtual_vtable_index 2468 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 2469 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2470 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); 2471 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); 2472 __ ic_call((address)$meth$$method, /*emit_delay=*/true, resolved_method_index(cbuf)); 2473 } else { 2474 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 2475 // Just go thru the vtable 2476 // get receiver klass (receiver already checked for non-null) 2477 // If we end up going thru a c2i adapter interpreter expects method in G5 2478 int off = __ offset(); 2479 __ load_klass(O0, G3_scratch); 2480 int klass_load_size; 2481 if (UseCompressedClassPointers) { 2482 assert(Universe::heap() != NULL, "java heap should be initialized"); 2483 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 2484 } else { 2485 klass_load_size = 1*BytesPerInstWord; 2486 } 2487 int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes(); 2488 int v_off = entry_offset + vtableEntry::method_offset_in_bytes(); 2489 if (Assembler::is_simm13(v_off)) { 2490 __ ld_ptr(G3, v_off, G5_method); 2491 } else { 2492 // Generate 2 instructions 2493 __ Assembler::sethi(v_off & ~0x3ff, G5_method); 2494 __ or3(G5_method, v_off & 0x3ff, G5_method); 2495 // ld_ptr, set_hi, set 2496 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord, 2497 "Unexpected instruction size(s)"); 2498 __ ld_ptr(G3, G5_method, G5_method); 2499 } 2500 // NOTE: for vtable dispatches, the vtable entry will never be null. 2501 // However it may very well end up in handle_wrong_method if the 2502 // method is abstract for the particular class. 2503 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 2504 // jump to target (either compiled code or c2iadapter) 2505 __ jmpl(G3_scratch, G0, O7); 2506 __ delayed()->nop(); 2507 } 2508 %} 2509 2510 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL 2511 MacroAssembler _masm(&cbuf); 2512 2513 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2514 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because 2515 // we might be calling a C2I adapter which needs it. 2516 2517 assert(temp_reg != G5_ic_reg, "conflicting registers"); 2518 // Load nmethod 2519 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg); 2520 2521 // CALL to compiled java, indirect the contents of G3 2522 __ set_inst_mark(); 2523 __ callr(temp_reg, G0); 2524 __ delayed()->nop(); 2525 %} 2526 2527 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{ 2528 MacroAssembler _masm(&cbuf); 2529 Register Rdividend = reg_to_register_object($src1$$reg); 2530 Register Rdivisor = reg_to_register_object($src2$$reg); 2531 Register Rresult = reg_to_register_object($dst$$reg); 2532 2533 __ sra(Rdivisor, 0, Rdivisor); 2534 __ sra(Rdividend, 0, Rdividend); 2535 __ sdivx(Rdividend, Rdivisor, Rresult); 2536 %} 2537 2538 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{ 2539 MacroAssembler _masm(&cbuf); 2540 2541 Register Rdividend = reg_to_register_object($src1$$reg); 2542 int divisor = $imm$$constant; 2543 Register Rresult = reg_to_register_object($dst$$reg); 2544 2545 __ sra(Rdividend, 0, Rdividend); 2546 __ sdivx(Rdividend, divisor, Rresult); 2547 %} 2548 2549 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{ 2550 MacroAssembler _masm(&cbuf); 2551 Register Rsrc1 = reg_to_register_object($src1$$reg); 2552 Register Rsrc2 = reg_to_register_object($src2$$reg); 2553 Register Rdst = reg_to_register_object($dst$$reg); 2554 2555 __ sra( Rsrc1, 0, Rsrc1 ); 2556 __ sra( Rsrc2, 0, Rsrc2 ); 2557 __ mulx( Rsrc1, Rsrc2, Rdst ); 2558 __ srlx( Rdst, 32, Rdst ); 2559 %} 2560 2561 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{ 2562 MacroAssembler _masm(&cbuf); 2563 Register Rdividend = reg_to_register_object($src1$$reg); 2564 Register Rdivisor = reg_to_register_object($src2$$reg); 2565 Register Rresult = reg_to_register_object($dst$$reg); 2566 Register Rscratch = reg_to_register_object($scratch$$reg); 2567 2568 assert(Rdividend != Rscratch, ""); 2569 assert(Rdivisor != Rscratch, ""); 2570 2571 __ sra(Rdividend, 0, Rdividend); 2572 __ sra(Rdivisor, 0, Rdivisor); 2573 __ sdivx(Rdividend, Rdivisor, Rscratch); 2574 __ mulx(Rscratch, Rdivisor, Rscratch); 2575 __ sub(Rdividend, Rscratch, Rresult); 2576 %} 2577 2578 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{ 2579 MacroAssembler _masm(&cbuf); 2580 2581 Register Rdividend = reg_to_register_object($src1$$reg); 2582 int divisor = $imm$$constant; 2583 Register Rresult = reg_to_register_object($dst$$reg); 2584 Register Rscratch = reg_to_register_object($scratch$$reg); 2585 2586 assert(Rdividend != Rscratch, ""); 2587 2588 __ sra(Rdividend, 0, Rdividend); 2589 __ sdivx(Rdividend, divisor, Rscratch); 2590 __ mulx(Rscratch, divisor, Rscratch); 2591 __ sub(Rdividend, Rscratch, Rresult); 2592 %} 2593 2594 enc_class fabss (sflt_reg dst, sflt_reg src) %{ 2595 MacroAssembler _masm(&cbuf); 2596 2597 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2598 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2599 2600 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst); 2601 %} 2602 2603 enc_class fabsd (dflt_reg dst, dflt_reg src) %{ 2604 MacroAssembler _masm(&cbuf); 2605 2606 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2607 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2608 2609 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst); 2610 %} 2611 2612 enc_class fnegd (dflt_reg dst, dflt_reg src) %{ 2613 MacroAssembler _masm(&cbuf); 2614 2615 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2616 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2617 2618 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst); 2619 %} 2620 2621 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{ 2622 MacroAssembler _masm(&cbuf); 2623 2624 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2625 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2626 2627 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst); 2628 %} 2629 2630 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{ 2631 MacroAssembler _masm(&cbuf); 2632 2633 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2634 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2635 2636 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst); 2637 %} 2638 2639 2640 enc_class fmadds (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{ 2641 MacroAssembler _masm(&cbuf); 2642 2643 FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg); 2644 FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg); 2645 FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg); 2646 FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg); 2647 2648 __ fmadd(FloatRegisterImpl::S, Fra, Frb, Frc, Frd); 2649 %} 2650 2651 enc_class fmaddd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{ 2652 MacroAssembler _masm(&cbuf); 2653 2654 FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg); 2655 FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg); 2656 FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg); 2657 FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg); 2658 2659 __ fmadd(FloatRegisterImpl::D, Fra, Frb, Frc, Frd); 2660 %} 2661 2662 enc_class fmsubs (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{ 2663 MacroAssembler _masm(&cbuf); 2664 2665 FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg); 2666 FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg); 2667 FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg); 2668 FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg); 2669 2670 __ fmsub(FloatRegisterImpl::S, Fra, Frb, Frc, Frd); 2671 %} 2672 2673 enc_class fmsubd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{ 2674 MacroAssembler _masm(&cbuf); 2675 2676 FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg); 2677 FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg); 2678 FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg); 2679 FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg); 2680 2681 __ fmsub(FloatRegisterImpl::D, Fra, Frb, Frc, Frd); 2682 %} 2683 2684 enc_class fnmadds (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{ 2685 MacroAssembler _masm(&cbuf); 2686 2687 FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg); 2688 FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg); 2689 FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg); 2690 FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg); 2691 2692 __ fnmadd(FloatRegisterImpl::S, Fra, Frb, Frc, Frd); 2693 %} 2694 2695 enc_class fnmaddd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{ 2696 MacroAssembler _masm(&cbuf); 2697 2698 FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg); 2699 FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg); 2700 FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg); 2701 FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg); 2702 2703 __ fnmadd(FloatRegisterImpl::D, Fra, Frb, Frc, Frd); 2704 %} 2705 2706 enc_class fnmsubs (sflt_reg dst, sflt_reg a, sflt_reg b, sflt_reg c) %{ 2707 MacroAssembler _masm(&cbuf); 2708 2709 FloatRegister Frd = reg_to_SingleFloatRegister_object($dst$$reg); 2710 FloatRegister Fra = reg_to_SingleFloatRegister_object($a$$reg); 2711 FloatRegister Frb = reg_to_SingleFloatRegister_object($b$$reg); 2712 FloatRegister Frc = reg_to_SingleFloatRegister_object($c$$reg); 2713 2714 __ fnmsub(FloatRegisterImpl::S, Fra, Frb, Frc, Frd); 2715 %} 2716 2717 enc_class fnmsubd (dflt_reg dst, dflt_reg a, dflt_reg b, dflt_reg c) %{ 2718 MacroAssembler _masm(&cbuf); 2719 2720 FloatRegister Frd = reg_to_DoubleFloatRegister_object($dst$$reg); 2721 FloatRegister Fra = reg_to_DoubleFloatRegister_object($a$$reg); 2722 FloatRegister Frb = reg_to_DoubleFloatRegister_object($b$$reg); 2723 FloatRegister Frc = reg_to_DoubleFloatRegister_object($c$$reg); 2724 2725 __ fnmsub(FloatRegisterImpl::D, Fra, Frb, Frc, Frd); 2726 %} 2727 2728 2729 enc_class fmovs (dflt_reg dst, dflt_reg src) %{ 2730 MacroAssembler _masm(&cbuf); 2731 2732 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2733 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2734 2735 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst); 2736 %} 2737 2738 enc_class fmovd (dflt_reg dst, dflt_reg src) %{ 2739 MacroAssembler _masm(&cbuf); 2740 2741 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2742 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2743 2744 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst); 2745 %} 2746 2747 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2748 MacroAssembler _masm(&cbuf); 2749 2750 Register Roop = reg_to_register_object($oop$$reg); 2751 Register Rbox = reg_to_register_object($box$$reg); 2752 Register Rscratch = reg_to_register_object($scratch$$reg); 2753 Register Rmark = reg_to_register_object($scratch2$$reg); 2754 2755 assert(Roop != Rscratch, ""); 2756 assert(Roop != Rmark, ""); 2757 assert(Rbox != Rscratch, ""); 2758 assert(Rbox != Rmark, ""); 2759 2760 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining); 2761 %} 2762 2763 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2764 MacroAssembler _masm(&cbuf); 2765 2766 Register Roop = reg_to_register_object($oop$$reg); 2767 Register Rbox = reg_to_register_object($box$$reg); 2768 Register Rscratch = reg_to_register_object($scratch$$reg); 2769 Register Rmark = reg_to_register_object($scratch2$$reg); 2770 2771 assert(Roop != Rscratch, ""); 2772 assert(Roop != Rmark, ""); 2773 assert(Rbox != Rscratch, ""); 2774 assert(Rbox != Rmark, ""); 2775 2776 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining); 2777 %} 2778 2779 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{ 2780 MacroAssembler _masm(&cbuf); 2781 Register Rmem = reg_to_register_object($mem$$reg); 2782 Register Rold = reg_to_register_object($old$$reg); 2783 Register Rnew = reg_to_register_object($new$$reg); 2784 2785 __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 2786 __ cmp( Rold, Rnew ); 2787 %} 2788 2789 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{ 2790 Register Rmem = reg_to_register_object($mem$$reg); 2791 Register Rold = reg_to_register_object($old$$reg); 2792 Register Rnew = reg_to_register_object($new$$reg); 2793 2794 MacroAssembler _masm(&cbuf); 2795 __ mov(Rnew, O7); 2796 __ casx(Rmem, Rold, O7); 2797 __ cmp( Rold, O7 ); 2798 %} 2799 2800 // raw int cas, used for compareAndSwap 2801 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{ 2802 Register Rmem = reg_to_register_object($mem$$reg); 2803 Register Rold = reg_to_register_object($old$$reg); 2804 Register Rnew = reg_to_register_object($new$$reg); 2805 2806 MacroAssembler _masm(&cbuf); 2807 __ mov(Rnew, O7); 2808 __ cas(Rmem, Rold, O7); 2809 __ cmp( Rold, O7 ); 2810 %} 2811 2812 // raw int cas without using tmp register for compareAndExchange 2813 enc_class enc_casi_exch( iRegP mem, iRegL old, iRegL new) %{ 2814 Register Rmem = reg_to_register_object($mem$$reg); 2815 Register Rold = reg_to_register_object($old$$reg); 2816 Register Rnew = reg_to_register_object($new$$reg); 2817 2818 MacroAssembler _masm(&cbuf); 2819 __ cas(Rmem, Rold, Rnew); 2820 %} 2821 2822 // 64-bit cas without using tmp register for compareAndExchange 2823 enc_class enc_casx_exch( iRegP mem, iRegL old, iRegL new) %{ 2824 Register Rmem = reg_to_register_object($mem$$reg); 2825 Register Rold = reg_to_register_object($old$$reg); 2826 Register Rnew = reg_to_register_object($new$$reg); 2827 2828 MacroAssembler _masm(&cbuf); 2829 __ casx(Rmem, Rold, Rnew); 2830 %} 2831 2832 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{ 2833 Register Rres = reg_to_register_object($res$$reg); 2834 2835 MacroAssembler _masm(&cbuf); 2836 __ mov(1, Rres); 2837 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres ); 2838 %} 2839 2840 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{ 2841 Register Rres = reg_to_register_object($res$$reg); 2842 2843 MacroAssembler _masm(&cbuf); 2844 __ mov(1, Rres); 2845 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres ); 2846 %} 2847 2848 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{ 2849 MacroAssembler _masm(&cbuf); 2850 Register Rdst = reg_to_register_object($dst$$reg); 2851 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg) 2852 : reg_to_DoubleFloatRegister_object($src1$$reg); 2853 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg) 2854 : reg_to_DoubleFloatRegister_object($src2$$reg); 2855 2856 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1) 2857 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 2858 %} 2859 2860 enc_class enc_rethrow() %{ 2861 cbuf.set_insts_mark(); 2862 Register temp_reg = G3; 2863 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 2864 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 2865 MacroAssembler _masm(&cbuf); 2866 #ifdef ASSERT 2867 __ save_frame(0); 2868 AddressLiteral last_rethrow_addrlit(&last_rethrow); 2869 __ sethi(last_rethrow_addrlit, L1); 2870 Address addr(L1, last_rethrow_addrlit.low10()); 2871 __ rdpc(L2); 2872 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 2873 __ st_ptr(L2, addr); 2874 __ restore(); 2875 #endif 2876 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp 2877 __ delayed()->nop(); 2878 %} 2879 2880 enc_class emit_mem_nop() %{ 2881 // Generates the instruction LDUXA [o6,g0],#0x82,g0 2882 cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 2883 %} 2884 2885 enc_class emit_fadd_nop() %{ 2886 // Generates the instruction FMOVS f31,f31 2887 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 2888 %} 2889 2890 enc_class emit_br_nop() %{ 2891 // Generates the instruction BPN,PN . 2892 cbuf.insts()->emit_int32((unsigned int) 0x00400000); 2893 %} 2894 2895 enc_class enc_membar_acquire %{ 2896 MacroAssembler _masm(&cbuf); 2897 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) ); 2898 %} 2899 2900 enc_class enc_membar_release %{ 2901 MacroAssembler _masm(&cbuf); 2902 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) ); 2903 %} 2904 2905 enc_class enc_membar_volatile %{ 2906 MacroAssembler _masm(&cbuf); 2907 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 2908 %} 2909 2910 %} 2911 2912 //----------FRAME-------------------------------------------------------------- 2913 // Definition of frame structure and management information. 2914 // 2915 // S T A C K L A Y O U T Allocators stack-slot number 2916 // | (to get allocators register number 2917 // G Owned by | | v add VMRegImpl::stack0) 2918 // r CALLER | | 2919 // o | +--------+ pad to even-align allocators stack-slot 2920 // w V | pad0 | numbers; owned by CALLER 2921 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 2922 // h ^ | in | 5 2923 // | | args | 4 Holes in incoming args owned by SELF 2924 // | | | | 3 2925 // | | +--------+ 2926 // V | | old out| Empty on Intel, window on Sparc 2927 // | old |preserve| Must be even aligned. 2928 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned 2929 // | | in | 3 area for Intel ret address 2930 // Owned by |preserve| Empty on Sparc. 2931 // SELF +--------+ 2932 // | | pad2 | 2 pad to align old SP 2933 // | +--------+ 1 2934 // | | locks | 0 2935 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned 2936 // | | pad1 | 11 pad to align new SP 2937 // | +--------+ 2938 // | | | 10 2939 // | | spills | 9 spills 2940 // V | | 8 (pad0 slot for callee) 2941 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 2942 // ^ | out | 7 2943 // | | args | 6 Holes in outgoing args owned by CALLEE 2944 // Owned by +--------+ 2945 // CALLEE | new out| 6 Empty on Intel, window on Sparc 2946 // | new |preserve| Must be even-aligned. 2947 // | SP-+--------+----> Matcher::_new_SP, even aligned 2948 // | | | 2949 // 2950 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 2951 // known from SELF's arguments and the Java calling convention. 2952 // Region 6-7 is determined per call site. 2953 // Note 2: If the calling convention leaves holes in the incoming argument 2954 // area, those holes are owned by SELF. Holes in the outgoing area 2955 // are owned by the CALLEE. Holes should not be nessecary in the 2956 // incoming area, as the Java calling convention is completely under 2957 // the control of the AD file. Doubles can be sorted and packed to 2958 // avoid holes. Holes in the outgoing arguments may be necessary for 2959 // varargs C calling conventions. 2960 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 2961 // even aligned with pad0 as needed. 2962 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 2963 // region 6-11 is even aligned; it may be padded out more so that 2964 // the region from SP to FP meets the minimum stack alignment. 2965 2966 frame %{ 2967 // What direction does stack grow in (assumed to be same for native & Java) 2968 stack_direction(TOWARDS_LOW); 2969 2970 // These two registers define part of the calling convention 2971 // between compiled code and the interpreter. 2972 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C 2973 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter 2974 2975 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] 2976 cisc_spilling_operand_name(indOffset); 2977 2978 // Number of stack slots consumed by a Monitor enter 2979 sync_stack_slots(2); 2980 2981 // Compiled code's Frame Pointer 2982 frame_pointer(R_SP); 2983 2984 // Stack alignment requirement 2985 stack_alignment(StackAlignmentInBytes); 2986 // LP64: Alignment size in bytes (128-bit -> 16 bytes) 2987 // !LP64: Alignment size in bytes (64-bit -> 8 bytes) 2988 2989 // Number of stack slots between incoming argument block and the start of 2990 // a new frame. The PROLOG must add this many slots to the stack. The 2991 // EPILOG must remove this many slots. 2992 in_preserve_stack_slots(0); 2993 2994 // Number of outgoing stack slots killed above the out_preserve_stack_slots 2995 // for calls to C. Supports the var-args backing area for register parms. 2996 // ADLC doesn't support parsing expressions, so I folded the math by hand. 2997 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word 2998 varargs_C_out_slots_killed(12); 2999 3000 // The after-PROLOG location of the return address. Location of 3001 // return address specifies a type (REG or STACK) and a number 3002 // representing the register number (i.e. - use a register name) or 3003 // stack slot. 3004 return_addr(REG R_I7); // Ret Addr is in register I7 3005 3006 // Body of function which returns an OptoRegs array locating 3007 // arguments either in registers or in stack slots for calling 3008 // java 3009 calling_convention %{ 3010 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); 3011 3012 %} 3013 3014 // Body of function which returns an OptoRegs array locating 3015 // arguments either in registers or in stack slots for calling 3016 // C. 3017 c_calling_convention %{ 3018 // This is obviously always outgoing 3019 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length); 3020 %} 3021 3022 // Location of native (C/C++) and interpreter return values. This is specified to 3023 // be the same as Java. In the 32-bit VM, long values are actually returned from 3024 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying 3025 // to and from the register pairs is done by the appropriate call and epilog 3026 // opcodes. This simplifies the register allocator. 3027 c_return_value %{ 3028 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3029 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3030 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3031 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3032 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3033 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3034 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3035 %} 3036 3037 // Location of compiled Java return values. Same as C 3038 return_value %{ 3039 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3040 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3041 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3042 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3043 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3044 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3045 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3046 %} 3047 3048 %} 3049 3050 3051 //----------ATTRIBUTES--------------------------------------------------------- 3052 //----------Operand Attributes------------------------------------------------- 3053 op_attrib op_cost(1); // Required cost attribute 3054 3055 //----------Instruction Attributes--------------------------------------------- 3056 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute 3057 ins_attrib ins_size(32); // Required size attribute (in bits) 3058 3059 // avoid_back_to_back attribute is an expression that must return 3060 // one of the following values defined in MachNode: 3061 // AVOID_NONE - instruction can be placed anywhere 3062 // AVOID_BEFORE - instruction cannot be placed after an 3063 // instruction with MachNode::AVOID_AFTER 3064 // AVOID_AFTER - the next instruction cannot be the one 3065 // with MachNode::AVOID_BEFORE 3066 // AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at 3067 // the same time 3068 ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE); 3069 3070 ins_attrib ins_short_branch(0); // Required flag: is this instruction a 3071 // non-matching short branch variant of some 3072 // long branch? 3073 3074 //----------OPERANDS----------------------------------------------------------- 3075 // Operand definitions must precede instruction definitions for correct parsing 3076 // in the ADLC because operands constitute user defined types which are used in 3077 // instruction definitions. 3078 3079 //----------Simple Operands---------------------------------------------------- 3080 // Immediate Operands 3081 // Integer Immediate: 32-bit 3082 operand immI() %{ 3083 match(ConI); 3084 3085 op_cost(0); 3086 // formats are generated automatically for constants and base registers 3087 format %{ %} 3088 interface(CONST_INTER); 3089 %} 3090 3091 // Integer Immediate: 0-bit 3092 operand immI0() %{ 3093 predicate(n->get_int() == 0); 3094 match(ConI); 3095 op_cost(0); 3096 3097 format %{ %} 3098 interface(CONST_INTER); 3099 %} 3100 3101 // Integer Immediate: 5-bit 3102 operand immI5() %{ 3103 predicate(Assembler::is_simm5(n->get_int())); 3104 match(ConI); 3105 op_cost(0); 3106 format %{ %} 3107 interface(CONST_INTER); 3108 %} 3109 3110 // Integer Immediate: 8-bit 3111 operand immI8() %{ 3112 predicate(Assembler::is_simm8(n->get_int())); 3113 match(ConI); 3114 op_cost(0); 3115 format %{ %} 3116 interface(CONST_INTER); 3117 %} 3118 3119 // Integer Immediate: the value 10 3120 operand immI10() %{ 3121 predicate(n->get_int() == 10); 3122 match(ConI); 3123 op_cost(0); 3124 3125 format %{ %} 3126 interface(CONST_INTER); 3127 %} 3128 3129 // Integer Immediate: 11-bit 3130 operand immI11() %{ 3131 predicate(Assembler::is_simm11(n->get_int())); 3132 match(ConI); 3133 op_cost(0); 3134 format %{ %} 3135 interface(CONST_INTER); 3136 %} 3137 3138 // Integer Immediate: 13-bit 3139 operand immI13() %{ 3140 predicate(Assembler::is_simm13(n->get_int())); 3141 match(ConI); 3142 op_cost(0); 3143 3144 format %{ %} 3145 interface(CONST_INTER); 3146 %} 3147 3148 // Integer Immediate: 13-bit minus 7 3149 operand immI13m7() %{ 3150 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095)); 3151 match(ConI); 3152 op_cost(0); 3153 3154 format %{ %} 3155 interface(CONST_INTER); 3156 %} 3157 3158 // Integer Immediate: 16-bit 3159 operand immI16() %{ 3160 predicate(Assembler::is_simm16(n->get_int())); 3161 match(ConI); 3162 op_cost(0); 3163 format %{ %} 3164 interface(CONST_INTER); 3165 %} 3166 3167 // Integer Immediate: the values 1-31 3168 operand immI_1_31() %{ 3169 predicate(n->get_int() >= 1 && n->get_int() <= 31); 3170 match(ConI); 3171 op_cost(0); 3172 3173 format %{ %} 3174 interface(CONST_INTER); 3175 %} 3176 3177 // Integer Immediate: the values 32-63 3178 operand immI_32_63() %{ 3179 predicate(n->get_int() >= 32 && n->get_int() <= 63); 3180 match(ConI); 3181 op_cost(0); 3182 3183 format %{ %} 3184 interface(CONST_INTER); 3185 %} 3186 3187 // Immediates for special shifts (sign extend) 3188 3189 // Integer Immediate: the value 16 3190 operand immI_16() %{ 3191 predicate(n->get_int() == 16); 3192 match(ConI); 3193 op_cost(0); 3194 3195 format %{ %} 3196 interface(CONST_INTER); 3197 %} 3198 3199 // Integer Immediate: the value 24 3200 operand immI_24() %{ 3201 predicate(n->get_int() == 24); 3202 match(ConI); 3203 op_cost(0); 3204 3205 format %{ %} 3206 interface(CONST_INTER); 3207 %} 3208 // Integer Immediate: the value 255 3209 operand immI_255() %{ 3210 predicate( n->get_int() == 255 ); 3211 match(ConI); 3212 op_cost(0); 3213 3214 format %{ %} 3215 interface(CONST_INTER); 3216 %} 3217 3218 // Integer Immediate: the value 65535 3219 operand immI_65535() %{ 3220 predicate(n->get_int() == 65535); 3221 match(ConI); 3222 op_cost(0); 3223 3224 format %{ %} 3225 interface(CONST_INTER); 3226 %} 3227 3228 // Integer Immediate: the values 0-31 3229 operand immU5() %{ 3230 predicate(n->get_int() >= 0 && n->get_int() <= 31); 3231 match(ConI); 3232 op_cost(0); 3233 3234 format %{ %} 3235 interface(CONST_INTER); 3236 %} 3237 3238 // Integer Immediate: 6-bit 3239 operand immU6() %{ 3240 predicate(n->get_int() >= 0 && n->get_int() <= 63); 3241 match(ConI); 3242 op_cost(0); 3243 format %{ %} 3244 interface(CONST_INTER); 3245 %} 3246 3247 // Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13) 3248 operand immU12() %{ 3249 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); 3250 match(ConI); 3251 op_cost(0); 3252 3253 format %{ %} 3254 interface(CONST_INTER); 3255 %} 3256 3257 // Unsigned Long Immediate: 12-bit (non-negative that fits in simm13) 3258 operand immUL12() %{ 3259 predicate((0 <= n->get_long()) && (n->get_long() == (int)n->get_long()) && Assembler::is_simm13((int)n->get_long())); 3260 match(ConL); 3261 op_cost(0); 3262 3263 format %{ %} 3264 interface(CONST_INTER); 3265 %} 3266 3267 // Integer Immediate non-negative 3268 operand immU31() 3269 %{ 3270 predicate(n->get_int() >= 0); 3271 match(ConI); 3272 3273 op_cost(0); 3274 format %{ %} 3275 interface(CONST_INTER); 3276 %} 3277 3278 // Long Immediate: the value FF 3279 operand immL_FF() %{ 3280 predicate( n->get_long() == 0xFFL ); 3281 match(ConL); 3282 op_cost(0); 3283 3284 format %{ %} 3285 interface(CONST_INTER); 3286 %} 3287 3288 // Long Immediate: the value FFFF 3289 operand immL_FFFF() %{ 3290 predicate( n->get_long() == 0xFFFFL ); 3291 match(ConL); 3292 op_cost(0); 3293 3294 format %{ %} 3295 interface(CONST_INTER); 3296 %} 3297 3298 // Pointer Immediate: 32 or 64-bit 3299 operand immP() %{ 3300 match(ConP); 3301 3302 op_cost(5); 3303 // formats are generated automatically for constants and base registers 3304 format %{ %} 3305 interface(CONST_INTER); 3306 %} 3307 3308 // Pointer Immediate: 64-bit 3309 operand immP_set() %{ 3310 predicate(!VM_Version::has_fast_ld()); 3311 match(ConP); 3312 3313 op_cost(5); 3314 // formats are generated automatically for constants and base registers 3315 format %{ %} 3316 interface(CONST_INTER); 3317 %} 3318 3319 // Pointer Immediate: 64-bit 3320 // From Niagara2 processors on a load should be better than materializing. 3321 operand immP_load() %{ 3322 predicate(VM_Version::has_fast_ld() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3))); 3323 match(ConP); 3324 3325 op_cost(5); 3326 // formats are generated automatically for constants and base registers 3327 format %{ %} 3328 interface(CONST_INTER); 3329 %} 3330 3331 // Pointer Immediate: 64-bit 3332 operand immP_no_oop_cheap() %{ 3333 predicate(VM_Version::has_fast_ld() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3)); 3334 match(ConP); 3335 3336 op_cost(5); 3337 // formats are generated automatically for constants and base registers 3338 format %{ %} 3339 interface(CONST_INTER); 3340 %} 3341 3342 operand immP13() %{ 3343 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 3344 match(ConP); 3345 op_cost(0); 3346 3347 format %{ %} 3348 interface(CONST_INTER); 3349 %} 3350 3351 operand immP0() %{ 3352 predicate(n->get_ptr() == 0); 3353 match(ConP); 3354 op_cost(0); 3355 3356 format %{ %} 3357 interface(CONST_INTER); 3358 %} 3359 3360 operand immP_poll() %{ 3361 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 3362 match(ConP); 3363 3364 // formats are generated automatically for constants and base registers 3365 format %{ %} 3366 interface(CONST_INTER); 3367 %} 3368 3369 // Pointer Immediate 3370 operand immN() 3371 %{ 3372 match(ConN); 3373 3374 op_cost(10); 3375 format %{ %} 3376 interface(CONST_INTER); 3377 %} 3378 3379 operand immNKlass() 3380 %{ 3381 match(ConNKlass); 3382 3383 op_cost(10); 3384 format %{ %} 3385 interface(CONST_INTER); 3386 %} 3387 3388 // NULL Pointer Immediate 3389 operand immN0() 3390 %{ 3391 predicate(n->get_narrowcon() == 0); 3392 match(ConN); 3393 3394 op_cost(0); 3395 format %{ %} 3396 interface(CONST_INTER); 3397 %} 3398 3399 operand immL() %{ 3400 match(ConL); 3401 op_cost(40); 3402 // formats are generated automatically for constants and base registers 3403 format %{ %} 3404 interface(CONST_INTER); 3405 %} 3406 3407 operand immL0() %{ 3408 predicate(n->get_long() == 0L); 3409 match(ConL); 3410 op_cost(0); 3411 // formats are generated automatically for constants and base registers 3412 format %{ %} 3413 interface(CONST_INTER); 3414 %} 3415 3416 // Integer Immediate: 5-bit 3417 operand immL5() %{ 3418 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long())); 3419 match(ConL); 3420 op_cost(0); 3421 format %{ %} 3422 interface(CONST_INTER); 3423 %} 3424 3425 // Long Immediate: 13-bit 3426 operand immL13() %{ 3427 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L)); 3428 match(ConL); 3429 op_cost(0); 3430 3431 format %{ %} 3432 interface(CONST_INTER); 3433 %} 3434 3435 // Long Immediate: 13-bit minus 7 3436 operand immL13m7() %{ 3437 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L)); 3438 match(ConL); 3439 op_cost(0); 3440 3441 format %{ %} 3442 interface(CONST_INTER); 3443 %} 3444 3445 // Long Immediate: low 32-bit mask 3446 operand immL_32bits() %{ 3447 predicate(n->get_long() == 0xFFFFFFFFL); 3448 match(ConL); 3449 op_cost(0); 3450 3451 format %{ %} 3452 interface(CONST_INTER); 3453 %} 3454 3455 // Long Immediate: cheap (materialize in <= 3 instructions) 3456 operand immL_cheap() %{ 3457 predicate(!VM_Version::has_fast_ld() || MacroAssembler::insts_for_set64(n->get_long()) <= 3); 3458 match(ConL); 3459 op_cost(0); 3460 3461 format %{ %} 3462 interface(CONST_INTER); 3463 %} 3464 3465 // Long Immediate: expensive (materialize in > 3 instructions) 3466 operand immL_expensive() %{ 3467 predicate(VM_Version::has_fast_ld() && MacroAssembler::insts_for_set64(n->get_long()) > 3); 3468 match(ConL); 3469 op_cost(0); 3470 3471 format %{ %} 3472 interface(CONST_INTER); 3473 %} 3474 3475 // Double Immediate 3476 operand immD() %{ 3477 match(ConD); 3478 3479 op_cost(40); 3480 format %{ %} 3481 interface(CONST_INTER); 3482 %} 3483 3484 // Double Immediate: +0.0d 3485 operand immD0() %{ 3486 predicate(jlong_cast(n->getd()) == 0); 3487 match(ConD); 3488 3489 op_cost(0); 3490 format %{ %} 3491 interface(CONST_INTER); 3492 %} 3493 3494 // Float Immediate 3495 operand immF() %{ 3496 match(ConF); 3497 3498 op_cost(20); 3499 format %{ %} 3500 interface(CONST_INTER); 3501 %} 3502 3503 // Float Immediate: +0.0f 3504 operand immF0() %{ 3505 predicate(jint_cast(n->getf()) == 0); 3506 match(ConF); 3507 3508 op_cost(0); 3509 format %{ %} 3510 interface(CONST_INTER); 3511 %} 3512 3513 // Integer Register Operands 3514 // Integer Register 3515 operand iRegI() %{ 3516 constraint(ALLOC_IN_RC(int_reg)); 3517 match(RegI); 3518 3519 match(notemp_iRegI); 3520 match(g1RegI); 3521 match(o0RegI); 3522 match(iRegIsafe); 3523 3524 format %{ %} 3525 interface(REG_INTER); 3526 %} 3527 3528 operand notemp_iRegI() %{ 3529 constraint(ALLOC_IN_RC(notemp_int_reg)); 3530 match(RegI); 3531 3532 match(o0RegI); 3533 3534 format %{ %} 3535 interface(REG_INTER); 3536 %} 3537 3538 operand o0RegI() %{ 3539 constraint(ALLOC_IN_RC(o0_regI)); 3540 match(iRegI); 3541 3542 format %{ %} 3543 interface(REG_INTER); 3544 %} 3545 3546 // Pointer Register 3547 operand iRegP() %{ 3548 constraint(ALLOC_IN_RC(ptr_reg)); 3549 match(RegP); 3550 3551 match(lock_ptr_RegP); 3552 match(g1RegP); 3553 match(g2RegP); 3554 match(g3RegP); 3555 match(g4RegP); 3556 match(i0RegP); 3557 match(o0RegP); 3558 match(o1RegP); 3559 match(l7RegP); 3560 3561 format %{ %} 3562 interface(REG_INTER); 3563 %} 3564 3565 operand sp_ptr_RegP() %{ 3566 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3567 match(RegP); 3568 match(iRegP); 3569 3570 format %{ %} 3571 interface(REG_INTER); 3572 %} 3573 3574 operand lock_ptr_RegP() %{ 3575 constraint(ALLOC_IN_RC(lock_ptr_reg)); 3576 match(RegP); 3577 match(i0RegP); 3578 match(o0RegP); 3579 match(o1RegP); 3580 match(l7RegP); 3581 3582 format %{ %} 3583 interface(REG_INTER); 3584 %} 3585 3586 operand g1RegP() %{ 3587 constraint(ALLOC_IN_RC(g1_regP)); 3588 match(iRegP); 3589 3590 format %{ %} 3591 interface(REG_INTER); 3592 %} 3593 3594 operand g2RegP() %{ 3595 constraint(ALLOC_IN_RC(g2_regP)); 3596 match(iRegP); 3597 3598 format %{ %} 3599 interface(REG_INTER); 3600 %} 3601 3602 operand g3RegP() %{ 3603 constraint(ALLOC_IN_RC(g3_regP)); 3604 match(iRegP); 3605 3606 format %{ %} 3607 interface(REG_INTER); 3608 %} 3609 3610 operand g1RegI() %{ 3611 constraint(ALLOC_IN_RC(g1_regI)); 3612 match(iRegI); 3613 3614 format %{ %} 3615 interface(REG_INTER); 3616 %} 3617 3618 operand g3RegI() %{ 3619 constraint(ALLOC_IN_RC(g3_regI)); 3620 match(iRegI); 3621 3622 format %{ %} 3623 interface(REG_INTER); 3624 %} 3625 3626 operand g4RegI() %{ 3627 constraint(ALLOC_IN_RC(g4_regI)); 3628 match(iRegI); 3629 3630 format %{ %} 3631 interface(REG_INTER); 3632 %} 3633 3634 operand g4RegP() %{ 3635 constraint(ALLOC_IN_RC(g4_regP)); 3636 match(iRegP); 3637 3638 format %{ %} 3639 interface(REG_INTER); 3640 %} 3641 3642 operand i0RegP() %{ 3643 constraint(ALLOC_IN_RC(i0_regP)); 3644 match(iRegP); 3645 3646 format %{ %} 3647 interface(REG_INTER); 3648 %} 3649 3650 operand o0RegP() %{ 3651 constraint(ALLOC_IN_RC(o0_regP)); 3652 match(iRegP); 3653 3654 format %{ %} 3655 interface(REG_INTER); 3656 %} 3657 3658 operand o1RegP() %{ 3659 constraint(ALLOC_IN_RC(o1_regP)); 3660 match(iRegP); 3661 3662 format %{ %} 3663 interface(REG_INTER); 3664 %} 3665 3666 operand o2RegP() %{ 3667 constraint(ALLOC_IN_RC(o2_regP)); 3668 match(iRegP); 3669 3670 format %{ %} 3671 interface(REG_INTER); 3672 %} 3673 3674 operand o7RegP() %{ 3675 constraint(ALLOC_IN_RC(o7_regP)); 3676 match(iRegP); 3677 3678 format %{ %} 3679 interface(REG_INTER); 3680 %} 3681 3682 operand l7RegP() %{ 3683 constraint(ALLOC_IN_RC(l7_regP)); 3684 match(iRegP); 3685 3686 format %{ %} 3687 interface(REG_INTER); 3688 %} 3689 3690 operand o7RegI() %{ 3691 constraint(ALLOC_IN_RC(o7_regI)); 3692 match(iRegI); 3693 3694 format %{ %} 3695 interface(REG_INTER); 3696 %} 3697 3698 operand iRegN() %{ 3699 constraint(ALLOC_IN_RC(int_reg)); 3700 match(RegN); 3701 3702 format %{ %} 3703 interface(REG_INTER); 3704 %} 3705 3706 // Long Register 3707 operand iRegL() %{ 3708 constraint(ALLOC_IN_RC(long_reg)); 3709 match(RegL); 3710 3711 format %{ %} 3712 interface(REG_INTER); 3713 %} 3714 3715 operand o2RegL() %{ 3716 constraint(ALLOC_IN_RC(o2_regL)); 3717 match(iRegL); 3718 3719 format %{ %} 3720 interface(REG_INTER); 3721 %} 3722 3723 operand o7RegL() %{ 3724 constraint(ALLOC_IN_RC(o7_regL)); 3725 match(iRegL); 3726 3727 format %{ %} 3728 interface(REG_INTER); 3729 %} 3730 3731 operand g1RegL() %{ 3732 constraint(ALLOC_IN_RC(g1_regL)); 3733 match(iRegL); 3734 3735 format %{ %} 3736 interface(REG_INTER); 3737 %} 3738 3739 operand g3RegL() %{ 3740 constraint(ALLOC_IN_RC(g3_regL)); 3741 match(iRegL); 3742 3743 format %{ %} 3744 interface(REG_INTER); 3745 %} 3746 3747 // Int Register safe 3748 // This is 64bit safe 3749 operand iRegIsafe() %{ 3750 constraint(ALLOC_IN_RC(long_reg)); 3751 3752 match(iRegI); 3753 3754 format %{ %} 3755 interface(REG_INTER); 3756 %} 3757 3758 // Condition Code Flag Register 3759 operand flagsReg() %{ 3760 constraint(ALLOC_IN_RC(int_flags)); 3761 match(RegFlags); 3762 3763 format %{ "ccr" %} // both ICC and XCC 3764 interface(REG_INTER); 3765 %} 3766 3767 // Condition Code Register, unsigned comparisons. 3768 operand flagsRegU() %{ 3769 constraint(ALLOC_IN_RC(int_flags)); 3770 match(RegFlags); 3771 3772 format %{ "icc_U" %} 3773 interface(REG_INTER); 3774 %} 3775 3776 // Condition Code Register, pointer comparisons. 3777 operand flagsRegP() %{ 3778 constraint(ALLOC_IN_RC(int_flags)); 3779 match(RegFlags); 3780 3781 format %{ "xcc_P" %} 3782 interface(REG_INTER); 3783 %} 3784 3785 // Condition Code Register, long comparisons. 3786 operand flagsRegL() %{ 3787 constraint(ALLOC_IN_RC(int_flags)); 3788 match(RegFlags); 3789 3790 format %{ "xcc_L" %} 3791 interface(REG_INTER); 3792 %} 3793 3794 // Condition Code Register, unsigned long comparisons. 3795 operand flagsRegUL() %{ 3796 constraint(ALLOC_IN_RC(int_flags)); 3797 match(RegFlags); 3798 3799 format %{ "xcc_UL" %} 3800 interface(REG_INTER); 3801 %} 3802 3803 // Condition Code Register, floating comparisons, unordered same as "less". 3804 operand flagsRegF() %{ 3805 constraint(ALLOC_IN_RC(float_flags)); 3806 match(RegFlags); 3807 match(flagsRegF0); 3808 3809 format %{ %} 3810 interface(REG_INTER); 3811 %} 3812 3813 operand flagsRegF0() %{ 3814 constraint(ALLOC_IN_RC(float_flag0)); 3815 match(RegFlags); 3816 3817 format %{ %} 3818 interface(REG_INTER); 3819 %} 3820 3821 3822 // Condition Code Flag Register used by long compare 3823 operand flagsReg_long_LTGE() %{ 3824 constraint(ALLOC_IN_RC(int_flags)); 3825 match(RegFlags); 3826 format %{ "icc_LTGE" %} 3827 interface(REG_INTER); 3828 %} 3829 operand flagsReg_long_EQNE() %{ 3830 constraint(ALLOC_IN_RC(int_flags)); 3831 match(RegFlags); 3832 format %{ "icc_EQNE" %} 3833 interface(REG_INTER); 3834 %} 3835 operand flagsReg_long_LEGT() %{ 3836 constraint(ALLOC_IN_RC(int_flags)); 3837 match(RegFlags); 3838 format %{ "icc_LEGT" %} 3839 interface(REG_INTER); 3840 %} 3841 3842 3843 operand regD() %{ 3844 constraint(ALLOC_IN_RC(dflt_reg)); 3845 match(RegD); 3846 3847 match(regD_low); 3848 3849 format %{ %} 3850 interface(REG_INTER); 3851 %} 3852 3853 operand regF() %{ 3854 constraint(ALLOC_IN_RC(sflt_reg)); 3855 match(RegF); 3856 3857 format %{ %} 3858 interface(REG_INTER); 3859 %} 3860 3861 operand regD_low() %{ 3862 constraint(ALLOC_IN_RC(dflt_low_reg)); 3863 match(regD); 3864 3865 format %{ %} 3866 interface(REG_INTER); 3867 %} 3868 3869 // Special Registers 3870 3871 // Method Register 3872 operand inline_cache_regP(iRegP reg) %{ 3873 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1 3874 match(reg); 3875 format %{ %} 3876 interface(REG_INTER); 3877 %} 3878 3879 operand interpreter_method_oop_regP(iRegP reg) %{ 3880 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1 3881 match(reg); 3882 format %{ %} 3883 interface(REG_INTER); 3884 %} 3885 3886 3887 //----------Complex Operands--------------------------------------------------- 3888 // Indirect Memory Reference 3889 operand indirect(sp_ptr_RegP reg) %{ 3890 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3891 match(reg); 3892 3893 op_cost(100); 3894 format %{ "[$reg]" %} 3895 interface(MEMORY_INTER) %{ 3896 base($reg); 3897 index(0x0); 3898 scale(0x0); 3899 disp(0x0); 3900 %} 3901 %} 3902 3903 // Indirect with simm13 Offset 3904 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{ 3905 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3906 match(AddP reg offset); 3907 3908 op_cost(100); 3909 format %{ "[$reg + $offset]" %} 3910 interface(MEMORY_INTER) %{ 3911 base($reg); 3912 index(0x0); 3913 scale(0x0); 3914 disp($offset); 3915 %} 3916 %} 3917 3918 // Indirect with simm13 Offset minus 7 3919 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{ 3920 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3921 match(AddP reg offset); 3922 3923 op_cost(100); 3924 format %{ "[$reg + $offset]" %} 3925 interface(MEMORY_INTER) %{ 3926 base($reg); 3927 index(0x0); 3928 scale(0x0); 3929 disp($offset); 3930 %} 3931 %} 3932 3933 // Note: Intel has a swapped version also, like this: 3934 //operand indOffsetX(iRegI reg, immP offset) %{ 3935 // constraint(ALLOC_IN_RC(int_reg)); 3936 // match(AddP offset reg); 3937 // 3938 // op_cost(100); 3939 // format %{ "[$reg + $offset]" %} 3940 // interface(MEMORY_INTER) %{ 3941 // base($reg); 3942 // index(0x0); 3943 // scale(0x0); 3944 // disp($offset); 3945 // %} 3946 //%} 3947 //// However, it doesn't make sense for SPARC, since 3948 // we have no particularly good way to embed oops in 3949 // single instructions. 3950 3951 // Indirect with Register Index 3952 operand indIndex(iRegP addr, iRegX index) %{ 3953 constraint(ALLOC_IN_RC(ptr_reg)); 3954 match(AddP addr index); 3955 3956 op_cost(100); 3957 format %{ "[$addr + $index]" %} 3958 interface(MEMORY_INTER) %{ 3959 base($addr); 3960 index($index); 3961 scale(0x0); 3962 disp(0x0); 3963 %} 3964 %} 3965 3966 //----------Special Memory Operands-------------------------------------------- 3967 // Stack Slot Operand - This operand is used for loading and storing temporary 3968 // values on the stack where a match requires a value to 3969 // flow through memory. 3970 operand stackSlotI(sRegI reg) %{ 3971 constraint(ALLOC_IN_RC(stack_slots)); 3972 op_cost(100); 3973 //match(RegI); 3974 format %{ "[$reg]" %} 3975 interface(MEMORY_INTER) %{ 3976 base(0xE); // R_SP 3977 index(0x0); 3978 scale(0x0); 3979 disp($reg); // Stack Offset 3980 %} 3981 %} 3982 3983 operand stackSlotP(sRegP reg) %{ 3984 constraint(ALLOC_IN_RC(stack_slots)); 3985 op_cost(100); 3986 //match(RegP); 3987 format %{ "[$reg]" %} 3988 interface(MEMORY_INTER) %{ 3989 base(0xE); // R_SP 3990 index(0x0); 3991 scale(0x0); 3992 disp($reg); // Stack Offset 3993 %} 3994 %} 3995 3996 operand stackSlotF(sRegF reg) %{ 3997 constraint(ALLOC_IN_RC(stack_slots)); 3998 op_cost(100); 3999 //match(RegF); 4000 format %{ "[$reg]" %} 4001 interface(MEMORY_INTER) %{ 4002 base(0xE); // R_SP 4003 index(0x0); 4004 scale(0x0); 4005 disp($reg); // Stack Offset 4006 %} 4007 %} 4008 operand stackSlotD(sRegD reg) %{ 4009 constraint(ALLOC_IN_RC(stack_slots)); 4010 op_cost(100); 4011 //match(RegD); 4012 format %{ "[$reg]" %} 4013 interface(MEMORY_INTER) %{ 4014 base(0xE); // R_SP 4015 index(0x0); 4016 scale(0x0); 4017 disp($reg); // Stack Offset 4018 %} 4019 %} 4020 operand stackSlotL(sRegL reg) %{ 4021 constraint(ALLOC_IN_RC(stack_slots)); 4022 op_cost(100); 4023 //match(RegL); 4024 format %{ "[$reg]" %} 4025 interface(MEMORY_INTER) %{ 4026 base(0xE); // R_SP 4027 index(0x0); 4028 scale(0x0); 4029 disp($reg); // Stack Offset 4030 %} 4031 %} 4032 4033 // Operands for expressing Control Flow 4034 // NOTE: Label is a predefined operand which should not be redefined in 4035 // the AD file. It is generically handled within the ADLC. 4036 4037 //----------Conditional Branch Operands---------------------------------------- 4038 // Comparison Op - This is the operation of the comparison, and is limited to 4039 // the following set of codes: 4040 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4041 // 4042 // Other attributes of the comparison, such as unsignedness, are specified 4043 // by the comparison instruction that sets a condition code flags register. 4044 // That result is represented by a flags operand whose subtype is appropriate 4045 // to the unsignedness (etc.) of the comparison. 4046 // 4047 // Later, the instruction which matches both the Comparison Op (a Bool) and 4048 // the flags (produced by the Cmp) specifies the coding of the comparison op 4049 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4050 4051 operand cmpOp() %{ 4052 match(Bool); 4053 4054 format %{ "" %} 4055 interface(COND_INTER) %{ 4056 equal(0x1); 4057 not_equal(0x9); 4058 less(0x3); 4059 greater_equal(0xB); 4060 less_equal(0x2); 4061 greater(0xA); 4062 overflow(0x7); 4063 no_overflow(0xF); 4064 %} 4065 %} 4066 4067 // Comparison Op, unsigned 4068 operand cmpOpU() %{ 4069 match(Bool); 4070 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4071 n->as_Bool()->_test._test != BoolTest::no_overflow); 4072 4073 format %{ "u" %} 4074 interface(COND_INTER) %{ 4075 equal(0x1); 4076 not_equal(0x9); 4077 less(0x5); 4078 greater_equal(0xD); 4079 less_equal(0x4); 4080 greater(0xC); 4081 overflow(0x7); 4082 no_overflow(0xF); 4083 %} 4084 %} 4085 4086 // Comparison Op, pointer (same as unsigned) 4087 operand cmpOpP() %{ 4088 match(Bool); 4089 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4090 n->as_Bool()->_test._test != BoolTest::no_overflow); 4091 4092 format %{ "p" %} 4093 interface(COND_INTER) %{ 4094 equal(0x1); 4095 not_equal(0x9); 4096 less(0x5); 4097 greater_equal(0xD); 4098 less_equal(0x4); 4099 greater(0xC); 4100 overflow(0x7); 4101 no_overflow(0xF); 4102 %} 4103 %} 4104 4105 // Comparison Op, branch-register encoding 4106 operand cmpOp_reg() %{ 4107 match(Bool); 4108 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4109 n->as_Bool()->_test._test != BoolTest::no_overflow); 4110 4111 format %{ "" %} 4112 interface(COND_INTER) %{ 4113 equal (0x1); 4114 not_equal (0x5); 4115 less (0x3); 4116 greater_equal(0x7); 4117 less_equal (0x2); 4118 greater (0x6); 4119 overflow(0x7); // not supported 4120 no_overflow(0xF); // not supported 4121 %} 4122 %} 4123 4124 // Comparison Code, floating, unordered same as less 4125 operand cmpOpF() %{ 4126 match(Bool); 4127 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4128 n->as_Bool()->_test._test != BoolTest::no_overflow); 4129 4130 format %{ "fl" %} 4131 interface(COND_INTER) %{ 4132 equal(0x9); 4133 not_equal(0x1); 4134 less(0x3); 4135 greater_equal(0xB); 4136 less_equal(0xE); 4137 greater(0x6); 4138 4139 overflow(0x7); // not supported 4140 no_overflow(0xF); // not supported 4141 %} 4142 %} 4143 4144 // Used by long compare 4145 operand cmpOp_commute() %{ 4146 match(Bool); 4147 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4148 n->as_Bool()->_test._test != BoolTest::no_overflow); 4149 4150 format %{ "" %} 4151 interface(COND_INTER) %{ 4152 equal(0x1); 4153 not_equal(0x9); 4154 less(0xA); 4155 greater_equal(0x2); 4156 less_equal(0xB); 4157 greater(0x3); 4158 overflow(0x7); 4159 no_overflow(0xF); 4160 %} 4161 %} 4162 4163 //----------OPERAND CLASSES---------------------------------------------------- 4164 // Operand Classes are groups of operands that are used to simplify 4165 // instruction definitions by not requiring the AD writer to specify separate 4166 // instructions for every form of operand when the instruction accepts 4167 // multiple operand types with the same basic encoding and format. The classic 4168 // case of this is memory operands. 4169 opclass memory( indirect, indOffset13, indIndex ); 4170 opclass indIndexMemory( indIndex ); 4171 4172 //----------PIPELINE----------------------------------------------------------- 4173 pipeline %{ 4174 4175 //----------ATTRIBUTES--------------------------------------------------------- 4176 attributes %{ 4177 fixed_size_instructions; // Fixed size instructions 4178 branch_has_delay_slot; // Branch has delay slot following 4179 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle 4180 instruction_unit_size = 4; // An instruction is 4 bytes long 4181 instruction_fetch_unit_size = 16; // The processor fetches one line 4182 instruction_fetch_units = 1; // of 16 bytes 4183 4184 // List of nop instructions 4185 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR ); 4186 %} 4187 4188 //----------RESOURCES---------------------------------------------------------- 4189 // Resources are the functional units available to the machine 4190 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1); 4191 4192 //----------PIPELINE DESCRIPTION----------------------------------------------- 4193 // Pipeline Description specifies the stages in the machine's pipeline 4194 4195 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D); 4196 4197 //----------PIPELINE CLASSES--------------------------------------------------- 4198 // Pipeline Classes describe the stages in which input and output are 4199 // referenced by the hardware pipeline. 4200 4201 // Integer ALU reg-reg operation 4202 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4203 single_instruction; 4204 dst : E(write); 4205 src1 : R(read); 4206 src2 : R(read); 4207 IALU : R; 4208 %} 4209 4210 // Integer ALU reg-reg long operation 4211 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 4212 instruction_count(2); 4213 dst : E(write); 4214 src1 : R(read); 4215 src2 : R(read); 4216 IALU : R; 4217 IALU : R; 4218 %} 4219 4220 // Integer ALU reg-reg long dependent operation 4221 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{ 4222 instruction_count(1); multiple_bundles; 4223 dst : E(write); 4224 src1 : R(read); 4225 src2 : R(read); 4226 cr : E(write); 4227 IALU : R(2); 4228 %} 4229 4230 // Integer ALU reg-imm operaion 4231 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4232 single_instruction; 4233 dst : E(write); 4234 src1 : R(read); 4235 IALU : R; 4236 %} 4237 4238 // Integer ALU reg-reg operation with condition code 4239 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{ 4240 single_instruction; 4241 dst : E(write); 4242 cr : E(write); 4243 src1 : R(read); 4244 src2 : R(read); 4245 IALU : R; 4246 %} 4247 4248 // Integer ALU reg-imm operation with condition code 4249 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{ 4250 single_instruction; 4251 dst : E(write); 4252 cr : E(write); 4253 src1 : R(read); 4254 IALU : R; 4255 %} 4256 4257 // Integer ALU zero-reg operation 4258 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 4259 single_instruction; 4260 dst : E(write); 4261 src2 : R(read); 4262 IALU : R; 4263 %} 4264 4265 // Integer ALU zero-reg operation with condition code only 4266 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{ 4267 single_instruction; 4268 cr : E(write); 4269 src : R(read); 4270 IALU : R; 4271 %} 4272 4273 // Integer ALU reg-reg operation with condition code only 4274 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4275 single_instruction; 4276 cr : E(write); 4277 src1 : R(read); 4278 src2 : R(read); 4279 IALU : R; 4280 %} 4281 4282 // Integer ALU reg-imm operation with condition code only 4283 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4284 single_instruction; 4285 cr : E(write); 4286 src1 : R(read); 4287 IALU : R; 4288 %} 4289 4290 // Integer ALU reg-reg-zero operation with condition code only 4291 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{ 4292 single_instruction; 4293 cr : E(write); 4294 src1 : R(read); 4295 src2 : R(read); 4296 IALU : R; 4297 %} 4298 4299 // Integer ALU reg-imm-zero operation with condition code only 4300 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{ 4301 single_instruction; 4302 cr : E(write); 4303 src1 : R(read); 4304 IALU : R; 4305 %} 4306 4307 // Integer ALU reg-reg operation with condition code, src1 modified 4308 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4309 single_instruction; 4310 cr : E(write); 4311 src1 : E(write); 4312 src1 : R(read); 4313 src2 : R(read); 4314 IALU : R; 4315 %} 4316 4317 // Integer ALU reg-imm operation with condition code, src1 modified 4318 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4319 single_instruction; 4320 cr : E(write); 4321 src1 : E(write); 4322 src1 : R(read); 4323 IALU : R; 4324 %} 4325 4326 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{ 4327 multiple_bundles; 4328 dst : E(write)+4; 4329 cr : E(write); 4330 src1 : R(read); 4331 src2 : R(read); 4332 IALU : R(3); 4333 BR : R(2); 4334 %} 4335 4336 // Integer ALU operation 4337 pipe_class ialu_none(iRegI dst) %{ 4338 single_instruction; 4339 dst : E(write); 4340 IALU : R; 4341 %} 4342 4343 // Integer ALU reg operation 4344 pipe_class ialu_reg(iRegI dst, iRegI src) %{ 4345 single_instruction; may_have_no_code; 4346 dst : E(write); 4347 src : R(read); 4348 IALU : R; 4349 %} 4350 4351 // Integer ALU reg conditional operation 4352 // This instruction has a 1 cycle stall, and cannot execute 4353 // in the same cycle as the instruction setting the condition 4354 // code. We kludge this by pretending to read the condition code 4355 // 1 cycle earlier, and by marking the functional units as busy 4356 // for 2 cycles with the result available 1 cycle later than 4357 // is really the case. 4358 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{ 4359 single_instruction; 4360 op2_out : C(write); 4361 op1 : R(read); 4362 cr : R(read); // This is really E, with a 1 cycle stall 4363 BR : R(2); 4364 MS : R(2); 4365 %} 4366 4367 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ 4368 instruction_count(1); multiple_bundles; 4369 dst : C(write)+1; 4370 src : R(read)+1; 4371 IALU : R(1); 4372 BR : E(2); 4373 MS : E(2); 4374 %} 4375 4376 // Integer ALU reg operation 4377 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ 4378 single_instruction; may_have_no_code; 4379 dst : E(write); 4380 src : R(read); 4381 IALU : R; 4382 %} 4383 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{ 4384 single_instruction; may_have_no_code; 4385 dst : E(write); 4386 src : R(read); 4387 IALU : R; 4388 %} 4389 4390 // Two integer ALU reg operations 4391 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{ 4392 instruction_count(2); 4393 dst : E(write); 4394 src : R(read); 4395 A0 : R; 4396 A1 : R; 4397 %} 4398 4399 // Two integer ALU reg operations 4400 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{ 4401 instruction_count(2); may_have_no_code; 4402 dst : E(write); 4403 src : R(read); 4404 A0 : R; 4405 A1 : R; 4406 %} 4407 4408 // Integer ALU imm operation 4409 pipe_class ialu_imm(iRegI dst, immI13 src) %{ 4410 single_instruction; 4411 dst : E(write); 4412 IALU : R; 4413 %} 4414 4415 // Integer ALU reg-reg with carry operation 4416 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{ 4417 single_instruction; 4418 dst : E(write); 4419 src1 : R(read); 4420 src2 : R(read); 4421 IALU : R; 4422 %} 4423 4424 // Integer ALU cc operation 4425 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{ 4426 single_instruction; 4427 dst : E(write); 4428 cc : R(read); 4429 IALU : R; 4430 %} 4431 4432 // Integer ALU cc / second IALU operation 4433 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{ 4434 instruction_count(1); multiple_bundles; 4435 dst : E(write)+1; 4436 src : R(read); 4437 IALU : R; 4438 %} 4439 4440 // Integer ALU cc / second IALU operation 4441 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{ 4442 instruction_count(1); multiple_bundles; 4443 dst : E(write)+1; 4444 p : R(read); 4445 q : R(read); 4446 IALU : R; 4447 %} 4448 4449 // Integer ALU hi-lo-reg operation 4450 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{ 4451 instruction_count(1); multiple_bundles; 4452 dst : E(write)+1; 4453 IALU : R(2); 4454 %} 4455 4456 // Float ALU hi-lo-reg operation (with temp) 4457 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{ 4458 instruction_count(1); multiple_bundles; 4459 dst : E(write)+1; 4460 IALU : R(2); 4461 %} 4462 4463 // Long Constant 4464 pipe_class loadConL( iRegL dst, immL src ) %{ 4465 instruction_count(2); multiple_bundles; 4466 dst : E(write)+1; 4467 IALU : R(2); 4468 IALU : R(2); 4469 %} 4470 4471 // Pointer Constant 4472 pipe_class loadConP( iRegP dst, immP src ) %{ 4473 instruction_count(0); multiple_bundles; 4474 fixed_latency(6); 4475 %} 4476 4477 // Polling Address 4478 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ 4479 instruction_count(0); multiple_bundles; 4480 fixed_latency(6); 4481 %} 4482 4483 // Long Constant small 4484 pipe_class loadConLlo( iRegL dst, immL src ) %{ 4485 instruction_count(2); 4486 dst : E(write); 4487 IALU : R; 4488 IALU : R; 4489 %} 4490 4491 // [PHH] This is wrong for 64-bit. See LdImmF/D. 4492 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{ 4493 instruction_count(1); multiple_bundles; 4494 src : R(read); 4495 dst : M(write)+1; 4496 IALU : R; 4497 MS : E; 4498 %} 4499 4500 // Integer ALU nop operation 4501 pipe_class ialu_nop() %{ 4502 single_instruction; 4503 IALU : R; 4504 %} 4505 4506 // Integer ALU nop operation 4507 pipe_class ialu_nop_A0() %{ 4508 single_instruction; 4509 A0 : R; 4510 %} 4511 4512 // Integer ALU nop operation 4513 pipe_class ialu_nop_A1() %{ 4514 single_instruction; 4515 A1 : R; 4516 %} 4517 4518 // Integer Multiply reg-reg operation 4519 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4520 single_instruction; 4521 dst : E(write); 4522 src1 : R(read); 4523 src2 : R(read); 4524 MS : R(5); 4525 %} 4526 4527 // Integer Multiply reg-imm operation 4528 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4529 single_instruction; 4530 dst : E(write); 4531 src1 : R(read); 4532 MS : R(5); 4533 %} 4534 4535 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4536 single_instruction; 4537 dst : E(write)+4; 4538 src1 : R(read); 4539 src2 : R(read); 4540 MS : R(6); 4541 %} 4542 4543 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4544 single_instruction; 4545 dst : E(write)+4; 4546 src1 : R(read); 4547 MS : R(6); 4548 %} 4549 4550 // Integer Divide reg-reg 4551 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{ 4552 instruction_count(1); multiple_bundles; 4553 dst : E(write); 4554 temp : E(write); 4555 src1 : R(read); 4556 src2 : R(read); 4557 temp : R(read); 4558 MS : R(38); 4559 %} 4560 4561 // Integer Divide reg-imm 4562 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{ 4563 instruction_count(1); multiple_bundles; 4564 dst : E(write); 4565 temp : E(write); 4566 src1 : R(read); 4567 temp : R(read); 4568 MS : R(38); 4569 %} 4570 4571 // Long Divide 4572 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4573 dst : E(write)+71; 4574 src1 : R(read); 4575 src2 : R(read)+1; 4576 MS : R(70); 4577 %} 4578 4579 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4580 dst : E(write)+71; 4581 src1 : R(read); 4582 MS : R(70); 4583 %} 4584 4585 // Floating Point Add Float 4586 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{ 4587 single_instruction; 4588 dst : X(write); 4589 src1 : E(read); 4590 src2 : E(read); 4591 FA : R; 4592 %} 4593 4594 // Floating Point Add Double 4595 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{ 4596 single_instruction; 4597 dst : X(write); 4598 src1 : E(read); 4599 src2 : E(read); 4600 FA : R; 4601 %} 4602 4603 // Floating Point Conditional Move based on integer flags 4604 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{ 4605 single_instruction; 4606 dst : X(write); 4607 src : E(read); 4608 cr : R(read); 4609 FA : R(2); 4610 BR : R(2); 4611 %} 4612 4613 // Floating Point Conditional Move based on integer flags 4614 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{ 4615 single_instruction; 4616 dst : X(write); 4617 src : E(read); 4618 cr : R(read); 4619 FA : R(2); 4620 BR : R(2); 4621 %} 4622 4623 // Floating Point Multiply Float 4624 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{ 4625 single_instruction; 4626 dst : X(write); 4627 src1 : E(read); 4628 src2 : E(read); 4629 FM : R; 4630 %} 4631 4632 // Floating Point Multiply Double 4633 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{ 4634 single_instruction; 4635 dst : X(write); 4636 src1 : E(read); 4637 src2 : E(read); 4638 FM : R; 4639 %} 4640 4641 // Floating Point Divide Float 4642 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{ 4643 single_instruction; 4644 dst : X(write); 4645 src1 : E(read); 4646 src2 : E(read); 4647 FM : R; 4648 FDIV : C(14); 4649 %} 4650 4651 // Floating Point Divide Double 4652 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{ 4653 single_instruction; 4654 dst : X(write); 4655 src1 : E(read); 4656 src2 : E(read); 4657 FM : R; 4658 FDIV : C(17); 4659 %} 4660 4661 // Fused floating-point multiply-add float. 4662 pipe_class fmaF_regx4(regF dst, regF src1, regF src2, regF src3) %{ 4663 single_instruction; 4664 dst : X(write); 4665 src1 : E(read); 4666 src2 : E(read); 4667 src3 : E(read); 4668 FM : R; 4669 %} 4670 4671 // Fused gloating-point multiply-add double. 4672 pipe_class fmaD_regx4(regD dst, regD src1, regD src2, regD src3) %{ 4673 single_instruction; 4674 dst : X(write); 4675 src1 : E(read); 4676 src2 : E(read); 4677 src3 : E(read); 4678 FM : R; 4679 %} 4680 4681 // Floating Point Move/Negate/Abs Float 4682 pipe_class faddF_reg(regF dst, regF src) %{ 4683 single_instruction; 4684 dst : W(write); 4685 src : E(read); 4686 FA : R(1); 4687 %} 4688 4689 // Floating Point Move/Negate/Abs Double 4690 pipe_class faddD_reg(regD dst, regD src) %{ 4691 single_instruction; 4692 dst : W(write); 4693 src : E(read); 4694 FA : R; 4695 %} 4696 4697 // Floating Point Convert F->D 4698 pipe_class fcvtF2D(regD dst, regF src) %{ 4699 single_instruction; 4700 dst : X(write); 4701 src : E(read); 4702 FA : R; 4703 %} 4704 4705 // Floating Point Convert I->D 4706 pipe_class fcvtI2D(regD dst, regF src) %{ 4707 single_instruction; 4708 dst : X(write); 4709 src : E(read); 4710 FA : R; 4711 %} 4712 4713 // Floating Point Convert LHi->D 4714 pipe_class fcvtLHi2D(regD dst, regD src) %{ 4715 single_instruction; 4716 dst : X(write); 4717 src : E(read); 4718 FA : R; 4719 %} 4720 4721 // Floating Point Convert L->D 4722 pipe_class fcvtL2D(regD dst, regF src) %{ 4723 single_instruction; 4724 dst : X(write); 4725 src : E(read); 4726 FA : R; 4727 %} 4728 4729 // Floating Point Convert L->F 4730 pipe_class fcvtL2F(regD dst, regF src) %{ 4731 single_instruction; 4732 dst : X(write); 4733 src : E(read); 4734 FA : R; 4735 %} 4736 4737 // Floating Point Convert D->F 4738 pipe_class fcvtD2F(regD dst, regF src) %{ 4739 single_instruction; 4740 dst : X(write); 4741 src : E(read); 4742 FA : R; 4743 %} 4744 4745 // Floating Point Convert I->L 4746 pipe_class fcvtI2L(regD dst, regF src) %{ 4747 single_instruction; 4748 dst : X(write); 4749 src : E(read); 4750 FA : R; 4751 %} 4752 4753 // Floating Point Convert D->F 4754 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{ 4755 instruction_count(1); multiple_bundles; 4756 dst : X(write)+6; 4757 src : E(read); 4758 FA : R; 4759 %} 4760 4761 // Floating Point Convert D->L 4762 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{ 4763 instruction_count(1); multiple_bundles; 4764 dst : X(write)+6; 4765 src : E(read); 4766 FA : R; 4767 %} 4768 4769 // Floating Point Convert F->I 4770 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{ 4771 instruction_count(1); multiple_bundles; 4772 dst : X(write)+6; 4773 src : E(read); 4774 FA : R; 4775 %} 4776 4777 // Floating Point Convert F->L 4778 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{ 4779 instruction_count(1); multiple_bundles; 4780 dst : X(write)+6; 4781 src : E(read); 4782 FA : R; 4783 %} 4784 4785 // Floating Point Convert I->F 4786 pipe_class fcvtI2F(regF dst, regF src) %{ 4787 single_instruction; 4788 dst : X(write); 4789 src : E(read); 4790 FA : R; 4791 %} 4792 4793 // Floating Point Compare 4794 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{ 4795 single_instruction; 4796 cr : X(write); 4797 src1 : E(read); 4798 src2 : E(read); 4799 FA : R; 4800 %} 4801 4802 // Floating Point Compare 4803 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{ 4804 single_instruction; 4805 cr : X(write); 4806 src1 : E(read); 4807 src2 : E(read); 4808 FA : R; 4809 %} 4810 4811 // Floating Add Nop 4812 pipe_class fadd_nop() %{ 4813 single_instruction; 4814 FA : R; 4815 %} 4816 4817 // Integer Store to Memory 4818 pipe_class istore_mem_reg(memory mem, iRegI src) %{ 4819 single_instruction; 4820 mem : R(read); 4821 src : C(read); 4822 MS : R; 4823 %} 4824 4825 // Integer Store to Memory 4826 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{ 4827 single_instruction; 4828 mem : R(read); 4829 src : C(read); 4830 MS : R; 4831 %} 4832 4833 // Integer Store Zero to Memory 4834 pipe_class istore_mem_zero(memory mem, immI0 src) %{ 4835 single_instruction; 4836 mem : R(read); 4837 MS : R; 4838 %} 4839 4840 // Special Stack Slot Store 4841 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{ 4842 single_instruction; 4843 stkSlot : R(read); 4844 src : C(read); 4845 MS : R; 4846 %} 4847 4848 // Special Stack Slot Store 4849 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{ 4850 instruction_count(2); multiple_bundles; 4851 stkSlot : R(read); 4852 src : C(read); 4853 MS : R(2); 4854 %} 4855 4856 // Float Store 4857 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{ 4858 single_instruction; 4859 mem : R(read); 4860 src : C(read); 4861 MS : R; 4862 %} 4863 4864 // Float Store 4865 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{ 4866 single_instruction; 4867 mem : R(read); 4868 MS : R; 4869 %} 4870 4871 // Double Store 4872 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{ 4873 instruction_count(1); 4874 mem : R(read); 4875 src : C(read); 4876 MS : R; 4877 %} 4878 4879 // Double Store 4880 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{ 4881 single_instruction; 4882 mem : R(read); 4883 MS : R; 4884 %} 4885 4886 // Special Stack Slot Float Store 4887 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{ 4888 single_instruction; 4889 stkSlot : R(read); 4890 src : C(read); 4891 MS : R; 4892 %} 4893 4894 // Special Stack Slot Double Store 4895 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{ 4896 single_instruction; 4897 stkSlot : R(read); 4898 src : C(read); 4899 MS : R; 4900 %} 4901 4902 // Integer Load (when sign bit propagation not needed) 4903 pipe_class iload_mem(iRegI dst, memory mem) %{ 4904 single_instruction; 4905 mem : R(read); 4906 dst : C(write); 4907 MS : R; 4908 %} 4909 4910 // Integer Load from stack operand 4911 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{ 4912 single_instruction; 4913 mem : R(read); 4914 dst : C(write); 4915 MS : R; 4916 %} 4917 4918 // Integer Load (when sign bit propagation or masking is needed) 4919 pipe_class iload_mask_mem(iRegI dst, memory mem) %{ 4920 single_instruction; 4921 mem : R(read); 4922 dst : M(write); 4923 MS : R; 4924 %} 4925 4926 // Float Load 4927 pipe_class floadF_mem(regF dst, memory mem) %{ 4928 single_instruction; 4929 mem : R(read); 4930 dst : M(write); 4931 MS : R; 4932 %} 4933 4934 // Float Load 4935 pipe_class floadD_mem(regD dst, memory mem) %{ 4936 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case 4937 mem : R(read); 4938 dst : M(write); 4939 MS : R; 4940 %} 4941 4942 // Float Load 4943 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{ 4944 single_instruction; 4945 stkSlot : R(read); 4946 dst : M(write); 4947 MS : R; 4948 %} 4949 4950 // Float Load 4951 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{ 4952 single_instruction; 4953 stkSlot : R(read); 4954 dst : M(write); 4955 MS : R; 4956 %} 4957 4958 // Memory Nop 4959 pipe_class mem_nop() %{ 4960 single_instruction; 4961 MS : R; 4962 %} 4963 4964 pipe_class sethi(iRegP dst, immI src) %{ 4965 single_instruction; 4966 dst : E(write); 4967 IALU : R; 4968 %} 4969 4970 pipe_class loadPollP(iRegP poll) %{ 4971 single_instruction; 4972 poll : R(read); 4973 MS : R; 4974 %} 4975 4976 pipe_class br(Universe br, label labl) %{ 4977 single_instruction_with_delay_slot; 4978 BR : R; 4979 %} 4980 4981 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{ 4982 single_instruction_with_delay_slot; 4983 cr : E(read); 4984 BR : R; 4985 %} 4986 4987 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{ 4988 single_instruction_with_delay_slot; 4989 op1 : E(read); 4990 BR : R; 4991 MS : R; 4992 %} 4993 4994 // Compare and branch 4995 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{ 4996 instruction_count(2); has_delay_slot; 4997 cr : E(write); 4998 src1 : R(read); 4999 src2 : R(read); 5000 IALU : R; 5001 BR : R; 5002 %} 5003 5004 // Compare and branch 5005 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{ 5006 instruction_count(2); has_delay_slot; 5007 cr : E(write); 5008 src1 : R(read); 5009 IALU : R; 5010 BR : R; 5011 %} 5012 5013 // Compare and branch using cbcond 5014 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{ 5015 single_instruction; 5016 src1 : E(read); 5017 src2 : E(read); 5018 IALU : R; 5019 BR : R; 5020 %} 5021 5022 // Compare and branch using cbcond 5023 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{ 5024 single_instruction; 5025 src1 : E(read); 5026 IALU : R; 5027 BR : R; 5028 %} 5029 5030 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{ 5031 single_instruction_with_delay_slot; 5032 cr : E(read); 5033 BR : R; 5034 %} 5035 5036 pipe_class br_nop() %{ 5037 single_instruction; 5038 BR : R; 5039 %} 5040 5041 pipe_class simple_call(method meth) %{ 5042 instruction_count(2); multiple_bundles; force_serialization; 5043 fixed_latency(100); 5044 BR : R(1); 5045 MS : R(1); 5046 A0 : R(1); 5047 %} 5048 5049 pipe_class compiled_call(method meth) %{ 5050 instruction_count(1); multiple_bundles; force_serialization; 5051 fixed_latency(100); 5052 MS : R(1); 5053 %} 5054 5055 pipe_class call(method meth) %{ 5056 instruction_count(0); multiple_bundles; force_serialization; 5057 fixed_latency(100); 5058 %} 5059 5060 pipe_class tail_call(Universe ignore, label labl) %{ 5061 single_instruction; has_delay_slot; 5062 fixed_latency(100); 5063 BR : R(1); 5064 MS : R(1); 5065 %} 5066 5067 pipe_class ret(Universe ignore) %{ 5068 single_instruction; has_delay_slot; 5069 BR : R(1); 5070 MS : R(1); 5071 %} 5072 5073 pipe_class ret_poll(g3RegP poll) %{ 5074 instruction_count(3); has_delay_slot; 5075 poll : E(read); 5076 MS : R; 5077 %} 5078 5079 // The real do-nothing guy 5080 pipe_class empty( ) %{ 5081 instruction_count(0); 5082 %} 5083 5084 pipe_class long_memory_op() %{ 5085 instruction_count(0); multiple_bundles; force_serialization; 5086 fixed_latency(25); 5087 MS : R(1); 5088 %} 5089 5090 // Check-cast 5091 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{ 5092 array : R(read); 5093 match : R(read); 5094 IALU : R(2); 5095 BR : R(2); 5096 MS : R; 5097 %} 5098 5099 // Convert FPU flags into +1,0,-1 5100 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{ 5101 src1 : E(read); 5102 src2 : E(read); 5103 dst : E(write); 5104 FA : R; 5105 MS : R(2); 5106 BR : R(2); 5107 %} 5108 5109 // Compare for p < q, and conditionally add y 5110 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{ 5111 p : E(read); 5112 q : E(read); 5113 y : E(read); 5114 IALU : R(3) 5115 %} 5116 5117 // Perform a compare, then move conditionally in a branch delay slot. 5118 pipe_class min_max( iRegI src2, iRegI srcdst ) %{ 5119 src2 : E(read); 5120 srcdst : E(read); 5121 IALU : R; 5122 BR : R; 5123 %} 5124 5125 // Define the class for the Nop node 5126 define %{ 5127 MachNop = ialu_nop; 5128 %} 5129 5130 %} 5131 5132 //----------INSTRUCTIONS------------------------------------------------------- 5133 5134 //------------Special Stack Slot instructions - no match rules----------------- 5135 instruct stkI_to_regF(regF dst, stackSlotI src) %{ 5136 // No match rule to avoid chain rule match. 5137 effect(DEF dst, USE src); 5138 ins_cost(MEMORY_REF_COST); 5139 format %{ "LDF $src,$dst\t! stkI to regF" %} 5140 opcode(Assembler::ldf_op3); 5141 ins_encode(simple_form3_mem_reg(src, dst)); 5142 ins_pipe(floadF_stk); 5143 %} 5144 5145 instruct stkL_to_regD(regD dst, stackSlotL src) %{ 5146 // No match rule to avoid chain rule match. 5147 effect(DEF dst, USE src); 5148 ins_cost(MEMORY_REF_COST); 5149 format %{ "LDDF $src,$dst\t! stkL to regD" %} 5150 opcode(Assembler::lddf_op3); 5151 ins_encode(simple_form3_mem_reg(src, dst)); 5152 ins_pipe(floadD_stk); 5153 %} 5154 5155 instruct regF_to_stkI(stackSlotI dst, regF src) %{ 5156 // No match rule to avoid chain rule match. 5157 effect(DEF dst, USE src); 5158 ins_cost(MEMORY_REF_COST); 5159 format %{ "STF $src,$dst\t! regF to stkI" %} 5160 opcode(Assembler::stf_op3); 5161 ins_encode(simple_form3_mem_reg(dst, src)); 5162 ins_pipe(fstoreF_stk_reg); 5163 %} 5164 5165 instruct regD_to_stkL(stackSlotL dst, regD src) %{ 5166 // No match rule to avoid chain rule match. 5167 effect(DEF dst, USE src); 5168 ins_cost(MEMORY_REF_COST); 5169 format %{ "STDF $src,$dst\t! regD to stkL" %} 5170 opcode(Assembler::stdf_op3); 5171 ins_encode(simple_form3_mem_reg(dst, src)); 5172 ins_pipe(fstoreD_stk_reg); 5173 %} 5174 5175 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{ 5176 effect(DEF dst, USE src); 5177 ins_cost(MEMORY_REF_COST*2); 5178 format %{ "STW $src,$dst.hi\t! long\n\t" 5179 "STW R_G0,$dst.lo" %} 5180 opcode(Assembler::stw_op3); 5181 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); 5182 ins_pipe(lstoreI_stk_reg); 5183 %} 5184 5185 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{ 5186 // No match rule to avoid chain rule match. 5187 effect(DEF dst, USE src); 5188 ins_cost(MEMORY_REF_COST); 5189 format %{ "STX $src,$dst\t! regL to stkD" %} 5190 opcode(Assembler::stx_op3); 5191 ins_encode(simple_form3_mem_reg( dst, src ) ); 5192 ins_pipe(istore_stk_reg); 5193 %} 5194 5195 //---------- Chain stack slots between similar types -------- 5196 5197 // Load integer from stack slot 5198 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{ 5199 match(Set dst src); 5200 ins_cost(MEMORY_REF_COST); 5201 5202 format %{ "LDUW $src,$dst\t!stk" %} 5203 opcode(Assembler::lduw_op3); 5204 ins_encode(simple_form3_mem_reg( src, dst ) ); 5205 ins_pipe(iload_mem); 5206 %} 5207 5208 // Store integer to stack slot 5209 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{ 5210 match(Set dst src); 5211 ins_cost(MEMORY_REF_COST); 5212 5213 format %{ "STW $src,$dst\t!stk" %} 5214 opcode(Assembler::stw_op3); 5215 ins_encode(simple_form3_mem_reg( dst, src ) ); 5216 ins_pipe(istore_mem_reg); 5217 %} 5218 5219 // Load long from stack slot 5220 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{ 5221 match(Set dst src); 5222 5223 ins_cost(MEMORY_REF_COST); 5224 format %{ "LDX $src,$dst\t! long" %} 5225 opcode(Assembler::ldx_op3); 5226 ins_encode(simple_form3_mem_reg( src, dst ) ); 5227 ins_pipe(iload_mem); 5228 %} 5229 5230 // Store long to stack slot 5231 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{ 5232 match(Set dst src); 5233 5234 ins_cost(MEMORY_REF_COST); 5235 format %{ "STX $src,$dst\t! long" %} 5236 opcode(Assembler::stx_op3); 5237 ins_encode(simple_form3_mem_reg( dst, src ) ); 5238 ins_pipe(istore_mem_reg); 5239 %} 5240 5241 // Load pointer from stack slot, 64-bit encoding 5242 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5243 match(Set dst src); 5244 ins_cost(MEMORY_REF_COST); 5245 format %{ "LDX $src,$dst\t!ptr" %} 5246 opcode(Assembler::ldx_op3); 5247 ins_encode(simple_form3_mem_reg( src, dst ) ); 5248 ins_pipe(iload_mem); 5249 %} 5250 5251 // Store pointer to stack slot 5252 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5253 match(Set dst src); 5254 ins_cost(MEMORY_REF_COST); 5255 format %{ "STX $src,$dst\t!ptr" %} 5256 opcode(Assembler::stx_op3); 5257 ins_encode(simple_form3_mem_reg( dst, src ) ); 5258 ins_pipe(istore_mem_reg); 5259 %} 5260 5261 //------------Special Nop instructions for bundling - no match rules----------- 5262 // Nop using the A0 functional unit 5263 instruct Nop_A0() %{ 5264 ins_cost(0); 5265 5266 format %{ "NOP ! Alu Pipeline" %} 5267 opcode(Assembler::or_op3, Assembler::arith_op); 5268 ins_encode( form2_nop() ); 5269 ins_pipe(ialu_nop_A0); 5270 %} 5271 5272 // Nop using the A1 functional unit 5273 instruct Nop_A1( ) %{ 5274 ins_cost(0); 5275 5276 format %{ "NOP ! Alu Pipeline" %} 5277 opcode(Assembler::or_op3, Assembler::arith_op); 5278 ins_encode( form2_nop() ); 5279 ins_pipe(ialu_nop_A1); 5280 %} 5281 5282 // Nop using the memory functional unit 5283 instruct Nop_MS( ) %{ 5284 ins_cost(0); 5285 5286 format %{ "NOP ! Memory Pipeline" %} 5287 ins_encode( emit_mem_nop ); 5288 ins_pipe(mem_nop); 5289 %} 5290 5291 // Nop using the floating add functional unit 5292 instruct Nop_FA( ) %{ 5293 ins_cost(0); 5294 5295 format %{ "NOP ! Floating Add Pipeline" %} 5296 ins_encode( emit_fadd_nop ); 5297 ins_pipe(fadd_nop); 5298 %} 5299 5300 // Nop using the branch functional unit 5301 instruct Nop_BR( ) %{ 5302 ins_cost(0); 5303 5304 format %{ "NOP ! Branch Pipeline" %} 5305 ins_encode( emit_br_nop ); 5306 ins_pipe(br_nop); 5307 %} 5308 5309 //----------Load/Store/Move Instructions--------------------------------------- 5310 //----------Load Instructions-------------------------------------------------- 5311 // Load Byte (8bit signed) 5312 instruct loadB(iRegI dst, memory mem) %{ 5313 match(Set dst (LoadB mem)); 5314 ins_cost(MEMORY_REF_COST); 5315 5316 size(4); 5317 format %{ "LDSB $mem,$dst\t! byte" %} 5318 ins_encode %{ 5319 __ ldsb($mem$$Address, $dst$$Register); 5320 %} 5321 ins_pipe(iload_mask_mem); 5322 %} 5323 5324 // Load Byte (8bit signed) into a Long Register 5325 instruct loadB2L(iRegL dst, memory mem) %{ 5326 match(Set dst (ConvI2L (LoadB mem))); 5327 ins_cost(MEMORY_REF_COST); 5328 5329 size(4); 5330 format %{ "LDSB $mem,$dst\t! byte -> long" %} 5331 ins_encode %{ 5332 __ ldsb($mem$$Address, $dst$$Register); 5333 %} 5334 ins_pipe(iload_mask_mem); 5335 %} 5336 5337 // Load Unsigned Byte (8bit UNsigned) into an int reg 5338 instruct loadUB(iRegI dst, memory mem) %{ 5339 match(Set dst (LoadUB mem)); 5340 ins_cost(MEMORY_REF_COST); 5341 5342 size(4); 5343 format %{ "LDUB $mem,$dst\t! ubyte" %} 5344 ins_encode %{ 5345 __ ldub($mem$$Address, $dst$$Register); 5346 %} 5347 ins_pipe(iload_mem); 5348 %} 5349 5350 // Load Unsigned Byte (8bit UNsigned) into a Long Register 5351 instruct loadUB2L(iRegL dst, memory mem) %{ 5352 match(Set dst (ConvI2L (LoadUB mem))); 5353 ins_cost(MEMORY_REF_COST); 5354 5355 size(4); 5356 format %{ "LDUB $mem,$dst\t! ubyte -> long" %} 5357 ins_encode %{ 5358 __ ldub($mem$$Address, $dst$$Register); 5359 %} 5360 ins_pipe(iload_mem); 5361 %} 5362 5363 // Load Unsigned Byte (8 bit UNsigned) with 32-bit mask into Long Register 5364 instruct loadUB2L_immI(iRegL dst, memory mem, immI mask) %{ 5365 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5366 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5367 5368 size(2*4); 5369 format %{ "LDUB $mem,$dst\t# ubyte & 32-bit mask -> long\n\t" 5370 "AND $dst,right_n_bits($mask, 8),$dst" %} 5371 ins_encode %{ 5372 __ ldub($mem$$Address, $dst$$Register); 5373 __ and3($dst$$Register, $mask$$constant & right_n_bits(8), $dst$$Register); 5374 %} 5375 ins_pipe(iload_mem); 5376 %} 5377 5378 // Load Short (16bit signed) 5379 instruct loadS(iRegI dst, memory mem) %{ 5380 match(Set dst (LoadS mem)); 5381 ins_cost(MEMORY_REF_COST); 5382 5383 size(4); 5384 format %{ "LDSH $mem,$dst\t! short" %} 5385 ins_encode %{ 5386 __ ldsh($mem$$Address, $dst$$Register); 5387 %} 5388 ins_pipe(iload_mask_mem); 5389 %} 5390 5391 // Load Short (16 bit signed) to Byte (8 bit signed) 5392 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5393 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5394 ins_cost(MEMORY_REF_COST); 5395 5396 size(4); 5397 5398 format %{ "LDSB $mem+1,$dst\t! short -> byte" %} 5399 ins_encode %{ 5400 __ ldsb($mem$$Address, $dst$$Register, 1); 5401 %} 5402 ins_pipe(iload_mask_mem); 5403 %} 5404 5405 // Load Short (16bit signed) into a Long Register 5406 instruct loadS2L(iRegL dst, memory mem) %{ 5407 match(Set dst (ConvI2L (LoadS mem))); 5408 ins_cost(MEMORY_REF_COST); 5409 5410 size(4); 5411 format %{ "LDSH $mem,$dst\t! short -> long" %} 5412 ins_encode %{ 5413 __ ldsh($mem$$Address, $dst$$Register); 5414 %} 5415 ins_pipe(iload_mask_mem); 5416 %} 5417 5418 // Load Unsigned Short/Char (16bit UNsigned) 5419 instruct loadUS(iRegI dst, memory mem) %{ 5420 match(Set dst (LoadUS mem)); 5421 ins_cost(MEMORY_REF_COST); 5422 5423 size(4); 5424 format %{ "LDUH $mem,$dst\t! ushort/char" %} 5425 ins_encode %{ 5426 __ lduh($mem$$Address, $dst$$Register); 5427 %} 5428 ins_pipe(iload_mem); 5429 %} 5430 5431 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5432 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5433 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5434 ins_cost(MEMORY_REF_COST); 5435 5436 size(4); 5437 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %} 5438 ins_encode %{ 5439 __ ldsb($mem$$Address, $dst$$Register, 1); 5440 %} 5441 ins_pipe(iload_mask_mem); 5442 %} 5443 5444 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register 5445 instruct loadUS2L(iRegL dst, memory mem) %{ 5446 match(Set dst (ConvI2L (LoadUS mem))); 5447 ins_cost(MEMORY_REF_COST); 5448 5449 size(4); 5450 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} 5451 ins_encode %{ 5452 __ lduh($mem$$Address, $dst$$Register); 5453 %} 5454 ins_pipe(iload_mem); 5455 %} 5456 5457 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register 5458 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5459 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5460 ins_cost(MEMORY_REF_COST); 5461 5462 size(4); 5463 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %} 5464 ins_encode %{ 5465 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE 5466 %} 5467 ins_pipe(iload_mem); 5468 %} 5469 5470 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register 5471 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5472 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5473 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5474 5475 size(2*4); 5476 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t" 5477 "AND $dst,$mask,$dst" %} 5478 ins_encode %{ 5479 Register Rdst = $dst$$Register; 5480 __ lduh($mem$$Address, Rdst); 5481 __ and3(Rdst, $mask$$constant, Rdst); 5482 %} 5483 ins_pipe(iload_mem); 5484 %} 5485 5486 // Load Unsigned Short/Char (16bit UNsigned) with a 32-bit mask into a Long Register 5487 instruct loadUS2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ 5488 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5489 effect(TEMP dst, TEMP tmp); 5490 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5491 5492 format %{ "LDUH $mem,$dst\t! ushort/char & 32-bit mask -> long\n\t" 5493 "SET right_n_bits($mask, 16),$tmp\n\t" 5494 "AND $dst,$tmp,$dst" %} 5495 ins_encode %{ 5496 Register Rdst = $dst$$Register; 5497 Register Rtmp = $tmp$$Register; 5498 __ lduh($mem$$Address, Rdst); 5499 __ set($mask$$constant & right_n_bits(16), Rtmp); 5500 __ and3(Rdst, Rtmp, Rdst); 5501 %} 5502 ins_pipe(iload_mem); 5503 %} 5504 5505 // Load Integer 5506 instruct loadI(iRegI dst, memory mem) %{ 5507 match(Set dst (LoadI mem)); 5508 ins_cost(MEMORY_REF_COST); 5509 5510 size(4); 5511 format %{ "LDUW $mem,$dst\t! int" %} 5512 ins_encode %{ 5513 __ lduw($mem$$Address, $dst$$Register); 5514 %} 5515 ins_pipe(iload_mem); 5516 %} 5517 5518 // Load Integer to Byte (8 bit signed) 5519 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5520 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5521 ins_cost(MEMORY_REF_COST); 5522 5523 size(4); 5524 5525 format %{ "LDSB $mem+3,$dst\t! int -> byte" %} 5526 ins_encode %{ 5527 __ ldsb($mem$$Address, $dst$$Register, 3); 5528 %} 5529 ins_pipe(iload_mask_mem); 5530 %} 5531 5532 // Load Integer to Unsigned Byte (8 bit UNsigned) 5533 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{ 5534 match(Set dst (AndI (LoadI mem) mask)); 5535 ins_cost(MEMORY_REF_COST); 5536 5537 size(4); 5538 5539 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %} 5540 ins_encode %{ 5541 __ ldub($mem$$Address, $dst$$Register, 3); 5542 %} 5543 ins_pipe(iload_mask_mem); 5544 %} 5545 5546 // Load Integer to Short (16 bit signed) 5547 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{ 5548 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5549 ins_cost(MEMORY_REF_COST); 5550 5551 size(4); 5552 5553 format %{ "LDSH $mem+2,$dst\t! int -> short" %} 5554 ins_encode %{ 5555 __ ldsh($mem$$Address, $dst$$Register, 2); 5556 %} 5557 ins_pipe(iload_mask_mem); 5558 %} 5559 5560 // Load Integer to Unsigned Short (16 bit UNsigned) 5561 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{ 5562 match(Set dst (AndI (LoadI mem) mask)); 5563 ins_cost(MEMORY_REF_COST); 5564 5565 size(4); 5566 5567 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %} 5568 ins_encode %{ 5569 __ lduh($mem$$Address, $dst$$Register, 2); 5570 %} 5571 ins_pipe(iload_mask_mem); 5572 %} 5573 5574 // Load Integer into a Long Register 5575 instruct loadI2L(iRegL dst, memory mem) %{ 5576 match(Set dst (ConvI2L (LoadI mem))); 5577 ins_cost(MEMORY_REF_COST); 5578 5579 size(4); 5580 format %{ "LDSW $mem,$dst\t! int -> long" %} 5581 ins_encode %{ 5582 __ ldsw($mem$$Address, $dst$$Register); 5583 %} 5584 ins_pipe(iload_mask_mem); 5585 %} 5586 5587 // Load Integer with mask 0xFF into a Long Register 5588 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5589 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5590 ins_cost(MEMORY_REF_COST); 5591 5592 size(4); 5593 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %} 5594 ins_encode %{ 5595 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE 5596 %} 5597 ins_pipe(iload_mem); 5598 %} 5599 5600 // Load Integer with mask 0xFFFF into a Long Register 5601 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{ 5602 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5603 ins_cost(MEMORY_REF_COST); 5604 5605 size(4); 5606 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %} 5607 ins_encode %{ 5608 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE 5609 %} 5610 ins_pipe(iload_mem); 5611 %} 5612 5613 // Load Integer with a 12-bit mask into a Long Register 5614 instruct loadI2L_immU12(iRegL dst, memory mem, immU12 mask) %{ 5615 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5616 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5617 5618 size(2*4); 5619 format %{ "LDUW $mem,$dst\t! int & 12-bit mask -> long\n\t" 5620 "AND $dst,$mask,$dst" %} 5621 ins_encode %{ 5622 Register Rdst = $dst$$Register; 5623 __ lduw($mem$$Address, Rdst); 5624 __ and3(Rdst, $mask$$constant, Rdst); 5625 %} 5626 ins_pipe(iload_mem); 5627 %} 5628 5629 // Load Integer with a 31-bit mask into a Long Register 5630 instruct loadI2L_immU31(iRegL dst, memory mem, immU31 mask, iRegL tmp) %{ 5631 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5632 effect(TEMP dst, TEMP tmp); 5633 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5634 5635 format %{ "LDUW $mem,$dst\t! int & 31-bit mask -> long\n\t" 5636 "SET $mask,$tmp\n\t" 5637 "AND $dst,$tmp,$dst" %} 5638 ins_encode %{ 5639 Register Rdst = $dst$$Register; 5640 Register Rtmp = $tmp$$Register; 5641 __ lduw($mem$$Address, Rdst); 5642 __ set($mask$$constant, Rtmp); 5643 __ and3(Rdst, Rtmp, Rdst); 5644 %} 5645 ins_pipe(iload_mem); 5646 %} 5647 5648 // Load Unsigned Integer into a Long Register 5649 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{ 5650 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 5651 ins_cost(MEMORY_REF_COST); 5652 5653 size(4); 5654 format %{ "LDUW $mem,$dst\t! uint -> long" %} 5655 ins_encode %{ 5656 __ lduw($mem$$Address, $dst$$Register); 5657 %} 5658 ins_pipe(iload_mem); 5659 %} 5660 5661 // Load Long - aligned 5662 instruct loadL(iRegL dst, memory mem ) %{ 5663 match(Set dst (LoadL mem)); 5664 ins_cost(MEMORY_REF_COST); 5665 5666 size(4); 5667 format %{ "LDX $mem,$dst\t! long" %} 5668 ins_encode %{ 5669 __ ldx($mem$$Address, $dst$$Register); 5670 %} 5671 ins_pipe(iload_mem); 5672 %} 5673 5674 // Load Long - UNaligned 5675 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{ 5676 match(Set dst (LoadL_unaligned mem)); 5677 effect(KILL tmp); 5678 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5679 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n" 5680 "\tLDUW $mem ,$dst\n" 5681 "\tSLLX #32, $dst, $dst\n" 5682 "\tOR $dst, R_O7, $dst" %} 5683 opcode(Assembler::lduw_op3); 5684 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); 5685 ins_pipe(iload_mem); 5686 %} 5687 5688 // Load Range 5689 instruct loadRange(iRegI dst, memory mem) %{ 5690 match(Set dst (LoadRange mem)); 5691 ins_cost(MEMORY_REF_COST); 5692 5693 format %{ "LDUW $mem,$dst\t! range" %} 5694 opcode(Assembler::lduw_op3); 5695 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5696 ins_pipe(iload_mem); 5697 %} 5698 5699 // Load Integer into %f register (for fitos/fitod) 5700 instruct loadI_freg(regF dst, memory mem) %{ 5701 match(Set dst (LoadI mem)); 5702 ins_cost(MEMORY_REF_COST); 5703 5704 format %{ "LDF $mem,$dst\t! for fitos/fitod" %} 5705 opcode(Assembler::ldf_op3); 5706 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5707 ins_pipe(floadF_mem); 5708 %} 5709 5710 // Load Pointer 5711 instruct loadP(iRegP dst, memory mem) %{ 5712 match(Set dst (LoadP mem)); 5713 ins_cost(MEMORY_REF_COST); 5714 size(4); 5715 5716 format %{ "LDX $mem,$dst\t! ptr" %} 5717 ins_encode %{ 5718 __ ldx($mem$$Address, $dst$$Register); 5719 %} 5720 ins_pipe(iload_mem); 5721 %} 5722 5723 // Load Compressed Pointer 5724 instruct loadN(iRegN dst, memory mem) %{ 5725 match(Set dst (LoadN mem)); 5726 ins_cost(MEMORY_REF_COST); 5727 size(4); 5728 5729 format %{ "LDUW $mem,$dst\t! compressed ptr" %} 5730 ins_encode %{ 5731 __ lduw($mem$$Address, $dst$$Register); 5732 %} 5733 ins_pipe(iload_mem); 5734 %} 5735 5736 // Load Klass Pointer 5737 instruct loadKlass(iRegP dst, memory mem) %{ 5738 match(Set dst (LoadKlass mem)); 5739 ins_cost(MEMORY_REF_COST); 5740 size(4); 5741 5742 format %{ "LDX $mem,$dst\t! klass ptr" %} 5743 ins_encode %{ 5744 __ ldx($mem$$Address, $dst$$Register); 5745 %} 5746 ins_pipe(iload_mem); 5747 %} 5748 5749 // Load narrow Klass Pointer 5750 instruct loadNKlass(iRegN dst, memory mem) %{ 5751 match(Set dst (LoadNKlass mem)); 5752 ins_cost(MEMORY_REF_COST); 5753 size(4); 5754 5755 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} 5756 ins_encode %{ 5757 __ lduw($mem$$Address, $dst$$Register); 5758 %} 5759 ins_pipe(iload_mem); 5760 %} 5761 5762 // Load Double 5763 instruct loadD(regD dst, memory mem) %{ 5764 match(Set dst (LoadD mem)); 5765 ins_cost(MEMORY_REF_COST); 5766 5767 format %{ "LDDF $mem,$dst" %} 5768 opcode(Assembler::lddf_op3); 5769 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5770 ins_pipe(floadD_mem); 5771 %} 5772 5773 // Load Double - UNaligned 5774 instruct loadD_unaligned(regD_low dst, memory mem ) %{ 5775 match(Set dst (LoadD_unaligned mem)); 5776 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5777 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" 5778 "\tLDF $mem+4,$dst.lo\t!" %} 5779 opcode(Assembler::ldf_op3); 5780 ins_encode( form3_mem_reg_double_unaligned( mem, dst )); 5781 ins_pipe(iload_mem); 5782 %} 5783 5784 // Load Float 5785 instruct loadF(regF dst, memory mem) %{ 5786 match(Set dst (LoadF mem)); 5787 ins_cost(MEMORY_REF_COST); 5788 5789 format %{ "LDF $mem,$dst" %} 5790 opcode(Assembler::ldf_op3); 5791 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5792 ins_pipe(floadF_mem); 5793 %} 5794 5795 // Load Constant 5796 instruct loadConI( iRegI dst, immI src ) %{ 5797 match(Set dst src); 5798 ins_cost(DEFAULT_COST * 3/2); 5799 format %{ "SET $src,$dst" %} 5800 ins_encode( Set32(src, dst) ); 5801 ins_pipe(ialu_hi_lo_reg); 5802 %} 5803 5804 instruct loadConI13( iRegI dst, immI13 src ) %{ 5805 match(Set dst src); 5806 5807 size(4); 5808 format %{ "MOV $src,$dst" %} 5809 ins_encode( Set13( src, dst ) ); 5810 ins_pipe(ialu_imm); 5811 %} 5812 5813 instruct loadConP_set(iRegP dst, immP_set con) %{ 5814 match(Set dst con); 5815 ins_cost(DEFAULT_COST * 3/2); 5816 format %{ "SET $con,$dst\t! ptr" %} 5817 ins_encode %{ 5818 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 5819 intptr_t val = $con$$constant; 5820 if (constant_reloc == relocInfo::oop_type) { 5821 __ set_oop_constant((jobject) val, $dst$$Register); 5822 } else if (constant_reloc == relocInfo::metadata_type) { 5823 __ set_metadata_constant((Metadata*)val, $dst$$Register); 5824 } else { // non-oop pointers, e.g. card mark base, heap top 5825 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 5826 __ set(val, $dst$$Register); 5827 } 5828 %} 5829 ins_pipe(loadConP); 5830 %} 5831 5832 instruct loadConP_load(iRegP dst, immP_load con) %{ 5833 match(Set dst con); 5834 ins_cost(MEMORY_REF_COST); 5835 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 5836 ins_encode %{ 5837 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 5838 __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 5839 %} 5840 ins_pipe(loadConP); 5841 %} 5842 5843 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{ 5844 match(Set dst con); 5845 ins_cost(DEFAULT_COST * 3/2); 5846 format %{ "SET $con,$dst\t! non-oop ptr" %} 5847 ins_encode %{ 5848 if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) { 5849 __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register); 5850 } else { 5851 __ set($con$$constant, $dst$$Register); 5852 } 5853 %} 5854 ins_pipe(loadConP); 5855 %} 5856 5857 instruct loadConP0(iRegP dst, immP0 src) %{ 5858 match(Set dst src); 5859 5860 size(4); 5861 format %{ "CLR $dst\t!ptr" %} 5862 ins_encode %{ 5863 __ clr($dst$$Register); 5864 %} 5865 ins_pipe(ialu_imm); 5866 %} 5867 5868 instruct loadConP_poll(iRegP dst, immP_poll src) %{ 5869 match(Set dst src); 5870 ins_cost(DEFAULT_COST); 5871 format %{ "SET $src,$dst\t!ptr" %} 5872 ins_encode %{ 5873 AddressLiteral polling_page(os::get_polling_page()); 5874 __ sethi(polling_page, reg_to_register_object($dst$$reg)); 5875 %} 5876 ins_pipe(loadConP_poll); 5877 %} 5878 5879 instruct loadConN0(iRegN dst, immN0 src) %{ 5880 match(Set dst src); 5881 5882 size(4); 5883 format %{ "CLR $dst\t! compressed NULL ptr" %} 5884 ins_encode %{ 5885 __ clr($dst$$Register); 5886 %} 5887 ins_pipe(ialu_imm); 5888 %} 5889 5890 instruct loadConN(iRegN dst, immN src) %{ 5891 match(Set dst src); 5892 ins_cost(DEFAULT_COST * 3/2); 5893 format %{ "SET $src,$dst\t! compressed ptr" %} 5894 ins_encode %{ 5895 Register dst = $dst$$Register; 5896 __ set_narrow_oop((jobject)$src$$constant, dst); 5897 %} 5898 ins_pipe(ialu_hi_lo_reg); 5899 %} 5900 5901 instruct loadConNKlass(iRegN dst, immNKlass src) %{ 5902 match(Set dst src); 5903 ins_cost(DEFAULT_COST * 3/2); 5904 format %{ "SET $src,$dst\t! compressed klass ptr" %} 5905 ins_encode %{ 5906 Register dst = $dst$$Register; 5907 __ set_narrow_klass((Klass*)$src$$constant, dst); 5908 %} 5909 ins_pipe(ialu_hi_lo_reg); 5910 %} 5911 5912 // Materialize long value (predicated by immL_cheap). 5913 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 5914 match(Set dst con); 5915 effect(KILL tmp); 5916 ins_cost(DEFAULT_COST * 3); 5917 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 5918 ins_encode %{ 5919 __ set64($con$$constant, $dst$$Register, $tmp$$Register); 5920 %} 5921 ins_pipe(loadConL); 5922 %} 5923 5924 // Load long value from constant table (predicated by immL_expensive). 5925 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 5926 match(Set dst con); 5927 ins_cost(MEMORY_REF_COST); 5928 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 5929 ins_encode %{ 5930 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 5931 __ ldx($constanttablebase, con_offset, $dst$$Register); 5932 %} 5933 ins_pipe(loadConL); 5934 %} 5935 5936 instruct loadConL0( iRegL dst, immL0 src ) %{ 5937 match(Set dst src); 5938 ins_cost(DEFAULT_COST); 5939 size(4); 5940 format %{ "CLR $dst\t! long" %} 5941 ins_encode( Set13( src, dst ) ); 5942 ins_pipe(ialu_imm); 5943 %} 5944 5945 instruct loadConL13( iRegL dst, immL13 src ) %{ 5946 match(Set dst src); 5947 ins_cost(DEFAULT_COST * 2); 5948 5949 size(4); 5950 format %{ "MOV $src,$dst\t! long" %} 5951 ins_encode( Set13( src, dst ) ); 5952 ins_pipe(ialu_imm); 5953 %} 5954 5955 instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 5956 match(Set dst con); 5957 effect(KILL tmp); 5958 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 5959 ins_encode %{ 5960 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 5961 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 5962 %} 5963 ins_pipe(loadConFD); 5964 %} 5965 5966 instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 5967 match(Set dst con); 5968 effect(KILL tmp); 5969 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 5970 ins_encode %{ 5971 // XXX This is a quick fix for 6833573. 5972 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 5973 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 5974 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 5975 %} 5976 ins_pipe(loadConFD); 5977 %} 5978 5979 // Prefetch instructions for allocation. 5980 // Must be safe to execute with invalid address (cannot fault). 5981 5982 instruct prefetchAlloc( memory mem ) %{ 5983 predicate(AllocatePrefetchInstr == 0); 5984 match( PrefetchAllocation mem ); 5985 ins_cost(MEMORY_REF_COST); 5986 5987 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %} 5988 opcode(Assembler::prefetch_op3); 5989 ins_encode( form3_mem_prefetch_write( mem ) ); 5990 ins_pipe(iload_mem); 5991 %} 5992 5993 // Use BIS instruction to prefetch for allocation. 5994 // Could fault, need space at the end of TLAB. 5995 instruct prefetchAlloc_bis( iRegP dst ) %{ 5996 predicate(AllocatePrefetchInstr == 1); 5997 match( PrefetchAllocation dst ); 5998 ins_cost(MEMORY_REF_COST); 5999 size(4); 6000 6001 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %} 6002 ins_encode %{ 6003 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 6004 %} 6005 ins_pipe(istore_mem_reg); 6006 %} 6007 6008 // Next code is used for finding next cache line address to prefetch. 6009 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ 6010 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 6011 ins_cost(DEFAULT_COST); 6012 size(4); 6013 6014 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6015 ins_encode %{ 6016 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6017 %} 6018 ins_pipe(ialu_reg_imm); 6019 %} 6020 6021 //----------Store Instructions------------------------------------------------- 6022 // Store Byte 6023 instruct storeB(memory mem, iRegI src) %{ 6024 match(Set mem (StoreB mem src)); 6025 ins_cost(MEMORY_REF_COST); 6026 6027 format %{ "STB $src,$mem\t! byte" %} 6028 opcode(Assembler::stb_op3); 6029 ins_encode(simple_form3_mem_reg( mem, src ) ); 6030 ins_pipe(istore_mem_reg); 6031 %} 6032 6033 instruct storeB0(memory mem, immI0 src) %{ 6034 match(Set mem (StoreB mem src)); 6035 ins_cost(MEMORY_REF_COST); 6036 6037 format %{ "STB $src,$mem\t! byte" %} 6038 opcode(Assembler::stb_op3); 6039 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6040 ins_pipe(istore_mem_zero); 6041 %} 6042 6043 instruct storeCM0(memory mem, immI0 src) %{ 6044 match(Set mem (StoreCM mem src)); 6045 ins_cost(MEMORY_REF_COST); 6046 6047 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} 6048 opcode(Assembler::stb_op3); 6049 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6050 ins_pipe(istore_mem_zero); 6051 %} 6052 6053 // Store Char/Short 6054 instruct storeC(memory mem, iRegI src) %{ 6055 match(Set mem (StoreC mem src)); 6056 ins_cost(MEMORY_REF_COST); 6057 6058 format %{ "STH $src,$mem\t! short" %} 6059 opcode(Assembler::sth_op3); 6060 ins_encode(simple_form3_mem_reg( mem, src ) ); 6061 ins_pipe(istore_mem_reg); 6062 %} 6063 6064 instruct storeC0(memory mem, immI0 src) %{ 6065 match(Set mem (StoreC mem src)); 6066 ins_cost(MEMORY_REF_COST); 6067 6068 format %{ "STH $src,$mem\t! short" %} 6069 opcode(Assembler::sth_op3); 6070 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6071 ins_pipe(istore_mem_zero); 6072 %} 6073 6074 // Store Integer 6075 instruct storeI(memory mem, iRegI src) %{ 6076 match(Set mem (StoreI mem src)); 6077 ins_cost(MEMORY_REF_COST); 6078 6079 format %{ "STW $src,$mem" %} 6080 opcode(Assembler::stw_op3); 6081 ins_encode(simple_form3_mem_reg( mem, src ) ); 6082 ins_pipe(istore_mem_reg); 6083 %} 6084 6085 // Store Long 6086 instruct storeL(memory mem, iRegL src) %{ 6087 match(Set mem (StoreL mem src)); 6088 ins_cost(MEMORY_REF_COST); 6089 format %{ "STX $src,$mem\t! long" %} 6090 opcode(Assembler::stx_op3); 6091 ins_encode(simple_form3_mem_reg( mem, src ) ); 6092 ins_pipe(istore_mem_reg); 6093 %} 6094 6095 instruct storeI0(memory mem, immI0 src) %{ 6096 match(Set mem (StoreI mem src)); 6097 ins_cost(MEMORY_REF_COST); 6098 6099 format %{ "STW $src,$mem" %} 6100 opcode(Assembler::stw_op3); 6101 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6102 ins_pipe(istore_mem_zero); 6103 %} 6104 6105 instruct storeL0(memory mem, immL0 src) %{ 6106 match(Set mem (StoreL mem src)); 6107 ins_cost(MEMORY_REF_COST); 6108 6109 format %{ "STX $src,$mem" %} 6110 opcode(Assembler::stx_op3); 6111 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6112 ins_pipe(istore_mem_zero); 6113 %} 6114 6115 // Store Integer from float register (used after fstoi) 6116 instruct storeI_Freg(memory mem, regF src) %{ 6117 match(Set mem (StoreI mem src)); 6118 ins_cost(MEMORY_REF_COST); 6119 6120 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} 6121 opcode(Assembler::stf_op3); 6122 ins_encode(simple_form3_mem_reg( mem, src ) ); 6123 ins_pipe(fstoreF_mem_reg); 6124 %} 6125 6126 // Store Pointer 6127 instruct storeP(memory dst, sp_ptr_RegP src) %{ 6128 match(Set dst (StoreP dst src)); 6129 ins_cost(MEMORY_REF_COST); 6130 6131 format %{ "STX $src,$dst\t! ptr" %} 6132 opcode(Assembler::stx_op3, 0, REGP_OP); 6133 ins_encode( form3_mem_reg( dst, src ) ); 6134 ins_pipe(istore_mem_spORreg); 6135 %} 6136 6137 instruct storeP0(memory dst, immP0 src) %{ 6138 match(Set dst (StoreP dst src)); 6139 ins_cost(MEMORY_REF_COST); 6140 6141 format %{ "STX $src,$dst\t! ptr" %} 6142 opcode(Assembler::stx_op3, 0, REGP_OP); 6143 ins_encode( form3_mem_reg( dst, R_G0 ) ); 6144 ins_pipe(istore_mem_zero); 6145 %} 6146 6147 // Store Compressed Pointer 6148 instruct storeN(memory dst, iRegN src) %{ 6149 match(Set dst (StoreN dst src)); 6150 ins_cost(MEMORY_REF_COST); 6151 size(4); 6152 6153 format %{ "STW $src,$dst\t! compressed ptr" %} 6154 ins_encode %{ 6155 Register base = as_Register($dst$$base); 6156 Register index = as_Register($dst$$index); 6157 Register src = $src$$Register; 6158 if (index != G0) { 6159 __ stw(src, base, index); 6160 } else { 6161 __ stw(src, base, $dst$$disp); 6162 } 6163 %} 6164 ins_pipe(istore_mem_spORreg); 6165 %} 6166 6167 instruct storeNKlass(memory dst, iRegN src) %{ 6168 match(Set dst (StoreNKlass dst src)); 6169 ins_cost(MEMORY_REF_COST); 6170 size(4); 6171 6172 format %{ "STW $src,$dst\t! compressed klass ptr" %} 6173 ins_encode %{ 6174 Register base = as_Register($dst$$base); 6175 Register index = as_Register($dst$$index); 6176 Register src = $src$$Register; 6177 if (index != G0) { 6178 __ stw(src, base, index); 6179 } else { 6180 __ stw(src, base, $dst$$disp); 6181 } 6182 %} 6183 ins_pipe(istore_mem_spORreg); 6184 %} 6185 6186 instruct storeN0(memory dst, immN0 src) %{ 6187 match(Set dst (StoreN dst src)); 6188 ins_cost(MEMORY_REF_COST); 6189 size(4); 6190 6191 format %{ "STW $src,$dst\t! compressed ptr" %} 6192 ins_encode %{ 6193 Register base = as_Register($dst$$base); 6194 Register index = as_Register($dst$$index); 6195 if (index != G0) { 6196 __ stw(0, base, index); 6197 } else { 6198 __ stw(0, base, $dst$$disp); 6199 } 6200 %} 6201 ins_pipe(istore_mem_zero); 6202 %} 6203 6204 // Store Double 6205 instruct storeD( memory mem, regD src) %{ 6206 match(Set mem (StoreD mem src)); 6207 ins_cost(MEMORY_REF_COST); 6208 6209 format %{ "STDF $src,$mem" %} 6210 opcode(Assembler::stdf_op3); 6211 ins_encode(simple_form3_mem_reg( mem, src ) ); 6212 ins_pipe(fstoreD_mem_reg); 6213 %} 6214 6215 instruct storeD0( memory mem, immD0 src) %{ 6216 match(Set mem (StoreD mem src)); 6217 ins_cost(MEMORY_REF_COST); 6218 6219 format %{ "STX $src,$mem" %} 6220 opcode(Assembler::stx_op3); 6221 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6222 ins_pipe(fstoreD_mem_zero); 6223 %} 6224 6225 // Store Float 6226 instruct storeF( memory mem, regF src) %{ 6227 match(Set mem (StoreF mem src)); 6228 ins_cost(MEMORY_REF_COST); 6229 6230 format %{ "STF $src,$mem" %} 6231 opcode(Assembler::stf_op3); 6232 ins_encode(simple_form3_mem_reg( mem, src ) ); 6233 ins_pipe(fstoreF_mem_reg); 6234 %} 6235 6236 instruct storeF0( memory mem, immF0 src) %{ 6237 match(Set mem (StoreF mem src)); 6238 ins_cost(MEMORY_REF_COST); 6239 6240 format %{ "STW $src,$mem\t! storeF0" %} 6241 opcode(Assembler::stw_op3); 6242 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6243 ins_pipe(fstoreF_mem_zero); 6244 %} 6245 6246 // Convert oop pointer into compressed form 6247 instruct encodeHeapOop(iRegN dst, iRegP src) %{ 6248 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6249 match(Set dst (EncodeP src)); 6250 format %{ "encode_heap_oop $src, $dst" %} 6251 ins_encode %{ 6252 __ encode_heap_oop($src$$Register, $dst$$Register); 6253 %} 6254 ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE); 6255 ins_pipe(ialu_reg); 6256 %} 6257 6258 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ 6259 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6260 match(Set dst (EncodeP src)); 6261 format %{ "encode_heap_oop_not_null $src, $dst" %} 6262 ins_encode %{ 6263 __ encode_heap_oop_not_null($src$$Register, $dst$$Register); 6264 %} 6265 ins_pipe(ialu_reg); 6266 %} 6267 6268 instruct decodeHeapOop(iRegP dst, iRegN src) %{ 6269 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 6270 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 6271 match(Set dst (DecodeN src)); 6272 format %{ "decode_heap_oop $src, $dst" %} 6273 ins_encode %{ 6274 __ decode_heap_oop($src$$Register, $dst$$Register); 6275 %} 6276 ins_pipe(ialu_reg); 6277 %} 6278 6279 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ 6280 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 6281 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 6282 match(Set dst (DecodeN src)); 6283 format %{ "decode_heap_oop_not_null $src, $dst" %} 6284 ins_encode %{ 6285 __ decode_heap_oop_not_null($src$$Register, $dst$$Register); 6286 %} 6287 ins_pipe(ialu_reg); 6288 %} 6289 6290 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{ 6291 match(Set dst (EncodePKlass src)); 6292 format %{ "encode_klass_not_null $src, $dst" %} 6293 ins_encode %{ 6294 __ encode_klass_not_null($src$$Register, $dst$$Register); 6295 %} 6296 ins_pipe(ialu_reg); 6297 %} 6298 6299 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{ 6300 match(Set dst (DecodeNKlass src)); 6301 format %{ "decode_klass_not_null $src, $dst" %} 6302 ins_encode %{ 6303 __ decode_klass_not_null($src$$Register, $dst$$Register); 6304 %} 6305 ins_pipe(ialu_reg); 6306 %} 6307 6308 //----------MemBar Instructions----------------------------------------------- 6309 // Memory barrier flavors 6310 6311 instruct membar_acquire() %{ 6312 match(MemBarAcquire); 6313 match(LoadFence); 6314 ins_cost(4*MEMORY_REF_COST); 6315 6316 size(0); 6317 format %{ "MEMBAR-acquire" %} 6318 ins_encode( enc_membar_acquire ); 6319 ins_pipe(long_memory_op); 6320 %} 6321 6322 instruct membar_acquire_lock() %{ 6323 match(MemBarAcquireLock); 6324 ins_cost(0); 6325 6326 size(0); 6327 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %} 6328 ins_encode( ); 6329 ins_pipe(empty); 6330 %} 6331 6332 instruct membar_release() %{ 6333 match(MemBarRelease); 6334 match(StoreFence); 6335 ins_cost(4*MEMORY_REF_COST); 6336 6337 size(0); 6338 format %{ "MEMBAR-release" %} 6339 ins_encode( enc_membar_release ); 6340 ins_pipe(long_memory_op); 6341 %} 6342 6343 instruct membar_release_lock() %{ 6344 match(MemBarReleaseLock); 6345 ins_cost(0); 6346 6347 size(0); 6348 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %} 6349 ins_encode( ); 6350 ins_pipe(empty); 6351 %} 6352 6353 instruct membar_volatile() %{ 6354 match(MemBarVolatile); 6355 ins_cost(4*MEMORY_REF_COST); 6356 6357 size(4); 6358 format %{ "MEMBAR-volatile" %} 6359 ins_encode( enc_membar_volatile ); 6360 ins_pipe(long_memory_op); 6361 %} 6362 6363 instruct unnecessary_membar_volatile() %{ 6364 match(MemBarVolatile); 6365 predicate(Matcher::post_store_load_barrier(n)); 6366 ins_cost(0); 6367 6368 size(0); 6369 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %} 6370 ins_encode( ); 6371 ins_pipe(empty); 6372 %} 6373 6374 instruct membar_storestore() %{ 6375 match(MemBarStoreStore); 6376 ins_cost(0); 6377 6378 size(0); 6379 format %{ "!MEMBAR-storestore (empty encoding)" %} 6380 ins_encode( ); 6381 ins_pipe(empty); 6382 %} 6383 6384 //----------Register Move Instructions----------------------------------------- 6385 instruct roundDouble_nop(regD dst) %{ 6386 match(Set dst (RoundDouble dst)); 6387 ins_cost(0); 6388 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6389 ins_encode( ); 6390 ins_pipe(empty); 6391 %} 6392 6393 6394 instruct roundFloat_nop(regF dst) %{ 6395 match(Set dst (RoundFloat dst)); 6396 ins_cost(0); 6397 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6398 ins_encode( ); 6399 ins_pipe(empty); 6400 %} 6401 6402 6403 // Cast Index to Pointer for unsafe natives 6404 instruct castX2P(iRegX src, iRegP dst) %{ 6405 match(Set dst (CastX2P src)); 6406 6407 format %{ "MOV $src,$dst\t! IntX->Ptr" %} 6408 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6409 ins_pipe(ialu_reg); 6410 %} 6411 6412 // Cast Pointer to Index for unsafe natives 6413 instruct castP2X(iRegP src, iRegX dst) %{ 6414 match(Set dst (CastP2X src)); 6415 6416 format %{ "MOV $src,$dst\t! Ptr->IntX" %} 6417 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6418 ins_pipe(ialu_reg); 6419 %} 6420 6421 instruct stfSSD(stackSlotD stkSlot, regD src) %{ 6422 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6423 match(Set stkSlot src); // chain rule 6424 ins_cost(MEMORY_REF_COST); 6425 format %{ "STDF $src,$stkSlot\t!stk" %} 6426 opcode(Assembler::stdf_op3); 6427 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6428 ins_pipe(fstoreD_stk_reg); 6429 %} 6430 6431 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{ 6432 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6433 match(Set dst stkSlot); // chain rule 6434 ins_cost(MEMORY_REF_COST); 6435 format %{ "LDDF $stkSlot,$dst\t!stk" %} 6436 opcode(Assembler::lddf_op3); 6437 ins_encode(simple_form3_mem_reg(stkSlot, dst)); 6438 ins_pipe(floadD_stk); 6439 %} 6440 6441 instruct stfSSF(stackSlotF stkSlot, regF src) %{ 6442 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6443 match(Set stkSlot src); // chain rule 6444 ins_cost(MEMORY_REF_COST); 6445 format %{ "STF $src,$stkSlot\t!stk" %} 6446 opcode(Assembler::stf_op3); 6447 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6448 ins_pipe(fstoreF_stk_reg); 6449 %} 6450 6451 //----------Conditional Move--------------------------------------------------- 6452 // Conditional move 6453 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{ 6454 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6455 ins_cost(150); 6456 format %{ "MOV$cmp $pcc,$src,$dst" %} 6457 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6458 ins_pipe(ialu_reg); 6459 %} 6460 6461 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{ 6462 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6463 ins_cost(140); 6464 format %{ "MOV$cmp $pcc,$src,$dst" %} 6465 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6466 ins_pipe(ialu_imm); 6467 %} 6468 6469 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{ 6470 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6471 ins_cost(150); 6472 size(4); 6473 format %{ "MOV$cmp $icc,$src,$dst" %} 6474 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6475 ins_pipe(ialu_reg); 6476 %} 6477 6478 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{ 6479 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6480 ins_cost(140); 6481 size(4); 6482 format %{ "MOV$cmp $icc,$src,$dst" %} 6483 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6484 ins_pipe(ialu_imm); 6485 %} 6486 6487 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ 6488 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6489 ins_cost(150); 6490 size(4); 6491 format %{ "MOV$cmp $icc,$src,$dst" %} 6492 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6493 ins_pipe(ialu_reg); 6494 %} 6495 6496 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ 6497 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6498 ins_cost(140); 6499 size(4); 6500 format %{ "MOV$cmp $icc,$src,$dst" %} 6501 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6502 ins_pipe(ialu_imm); 6503 %} 6504 6505 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{ 6506 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6507 ins_cost(150); 6508 size(4); 6509 format %{ "MOV$cmp $fcc,$src,$dst" %} 6510 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6511 ins_pipe(ialu_reg); 6512 %} 6513 6514 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{ 6515 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6516 ins_cost(140); 6517 size(4); 6518 format %{ "MOV$cmp $fcc,$src,$dst" %} 6519 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6520 ins_pipe(ialu_imm); 6521 %} 6522 6523 // Conditional move for RegN. Only cmov(reg,reg). 6524 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ 6525 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); 6526 ins_cost(150); 6527 format %{ "MOV$cmp $pcc,$src,$dst" %} 6528 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6529 ins_pipe(ialu_reg); 6530 %} 6531 6532 // This instruction also works with CmpN so we don't need cmovNN_reg. 6533 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ 6534 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6535 ins_cost(150); 6536 size(4); 6537 format %{ "MOV$cmp $icc,$src,$dst" %} 6538 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6539 ins_pipe(ialu_reg); 6540 %} 6541 6542 // This instruction also works with CmpN so we don't need cmovNN_reg. 6543 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{ 6544 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6545 ins_cost(150); 6546 size(4); 6547 format %{ "MOV$cmp $icc,$src,$dst" %} 6548 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6549 ins_pipe(ialu_reg); 6550 %} 6551 6552 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ 6553 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); 6554 ins_cost(150); 6555 size(4); 6556 format %{ "MOV$cmp $fcc,$src,$dst" %} 6557 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6558 ins_pipe(ialu_reg); 6559 %} 6560 6561 // Conditional move 6562 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ 6563 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6564 ins_cost(150); 6565 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6566 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6567 ins_pipe(ialu_reg); 6568 %} 6569 6570 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{ 6571 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6572 ins_cost(140); 6573 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6574 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6575 ins_pipe(ialu_imm); 6576 %} 6577 6578 // This instruction also works with CmpN so we don't need cmovPN_reg. 6579 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ 6580 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6581 ins_cost(150); 6582 6583 size(4); 6584 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6585 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6586 ins_pipe(ialu_reg); 6587 %} 6588 6589 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{ 6590 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6591 ins_cost(150); 6592 6593 size(4); 6594 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6595 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6596 ins_pipe(ialu_reg); 6597 %} 6598 6599 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{ 6600 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6601 ins_cost(140); 6602 6603 size(4); 6604 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6605 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6606 ins_pipe(ialu_imm); 6607 %} 6608 6609 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{ 6610 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6611 ins_cost(140); 6612 6613 size(4); 6614 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6615 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6616 ins_pipe(ialu_imm); 6617 %} 6618 6619 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{ 6620 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6621 ins_cost(150); 6622 size(4); 6623 format %{ "MOV$cmp $fcc,$src,$dst" %} 6624 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6625 ins_pipe(ialu_imm); 6626 %} 6627 6628 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{ 6629 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6630 ins_cost(140); 6631 size(4); 6632 format %{ "MOV$cmp $fcc,$src,$dst" %} 6633 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6634 ins_pipe(ialu_imm); 6635 %} 6636 6637 // Conditional move 6638 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{ 6639 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src))); 6640 ins_cost(150); 6641 opcode(0x101); 6642 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 6643 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6644 ins_pipe(int_conditional_float_move); 6645 %} 6646 6647 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ 6648 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6649 ins_cost(150); 6650 6651 size(4); 6652 format %{ "FMOVS$cmp $icc,$src,$dst" %} 6653 opcode(0x101); 6654 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6655 ins_pipe(int_conditional_float_move); 6656 %} 6657 6658 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{ 6659 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6660 ins_cost(150); 6661 6662 size(4); 6663 format %{ "FMOVS$cmp $icc,$src,$dst" %} 6664 opcode(0x101); 6665 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6666 ins_pipe(int_conditional_float_move); 6667 %} 6668 6669 // Conditional move, 6670 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ 6671 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); 6672 ins_cost(150); 6673 size(4); 6674 format %{ "FMOVF$cmp $fcc,$src,$dst" %} 6675 opcode(0x1); 6676 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 6677 ins_pipe(int_conditional_double_move); 6678 %} 6679 6680 // Conditional move 6681 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{ 6682 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src))); 6683 ins_cost(150); 6684 size(4); 6685 opcode(0x102); 6686 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 6687 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6688 ins_pipe(int_conditional_double_move); 6689 %} 6690 6691 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{ 6692 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 6693 ins_cost(150); 6694 6695 size(4); 6696 format %{ "FMOVD$cmp $icc,$src,$dst" %} 6697 opcode(0x102); 6698 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6699 ins_pipe(int_conditional_double_move); 6700 %} 6701 6702 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{ 6703 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 6704 ins_cost(150); 6705 6706 size(4); 6707 format %{ "FMOVD$cmp $icc,$src,$dst" %} 6708 opcode(0x102); 6709 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6710 ins_pipe(int_conditional_double_move); 6711 %} 6712 6713 // Conditional move, 6714 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ 6715 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); 6716 ins_cost(150); 6717 size(4); 6718 format %{ "FMOVD$cmp $fcc,$src,$dst" %} 6719 opcode(0x2); 6720 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 6721 ins_pipe(int_conditional_double_move); 6722 %} 6723 6724 // Conditional move 6725 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{ 6726 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 6727 ins_cost(150); 6728 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 6729 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6730 ins_pipe(ialu_reg); 6731 %} 6732 6733 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{ 6734 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 6735 ins_cost(140); 6736 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 6737 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6738 ins_pipe(ialu_imm); 6739 %} 6740 6741 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{ 6742 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 6743 ins_cost(150); 6744 6745 size(4); 6746 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 6747 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6748 ins_pipe(ialu_reg); 6749 %} 6750 6751 6752 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{ 6753 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 6754 ins_cost(150); 6755 6756 size(4); 6757 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 6758 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6759 ins_pipe(ialu_reg); 6760 %} 6761 6762 6763 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{ 6764 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src))); 6765 ins_cost(150); 6766 6767 size(4); 6768 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %} 6769 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6770 ins_pipe(ialu_reg); 6771 %} 6772 6773 6774 6775 //----------OS and Locking Instructions---------------------------------------- 6776 6777 // This name is KNOWN by the ADLC and cannot be changed. 6778 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 6779 // for this guy. 6780 instruct tlsLoadP(g2RegP dst) %{ 6781 match(Set dst (ThreadLocal)); 6782 6783 size(0); 6784 ins_cost(0); 6785 format %{ "# TLS is in G2" %} 6786 ins_encode( /*empty encoding*/ ); 6787 ins_pipe(ialu_none); 6788 %} 6789 6790 instruct checkCastPP( iRegP dst ) %{ 6791 match(Set dst (CheckCastPP dst)); 6792 6793 size(0); 6794 format %{ "# checkcastPP of $dst" %} 6795 ins_encode( /*empty encoding*/ ); 6796 ins_pipe(empty); 6797 %} 6798 6799 6800 instruct castPP( iRegP dst ) %{ 6801 match(Set dst (CastPP dst)); 6802 format %{ "# castPP of $dst" %} 6803 ins_encode( /*empty encoding*/ ); 6804 ins_pipe(empty); 6805 %} 6806 6807 instruct castII( iRegI dst ) %{ 6808 match(Set dst (CastII dst)); 6809 format %{ "# castII of $dst" %} 6810 ins_encode( /*empty encoding*/ ); 6811 ins_cost(0); 6812 ins_pipe(empty); 6813 %} 6814 6815 //----------Arithmetic Instructions-------------------------------------------- 6816 // Addition Instructions 6817 // Register Addition 6818 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 6819 match(Set dst (AddI src1 src2)); 6820 6821 size(4); 6822 format %{ "ADD $src1,$src2,$dst" %} 6823 ins_encode %{ 6824 __ add($src1$$Register, $src2$$Register, $dst$$Register); 6825 %} 6826 ins_pipe(ialu_reg_reg); 6827 %} 6828 6829 // Immediate Addition 6830 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 6831 match(Set dst (AddI src1 src2)); 6832 6833 size(4); 6834 format %{ "ADD $src1,$src2,$dst" %} 6835 opcode(Assembler::add_op3, Assembler::arith_op); 6836 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 6837 ins_pipe(ialu_reg_imm); 6838 %} 6839 6840 // Pointer Register Addition 6841 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{ 6842 match(Set dst (AddP src1 src2)); 6843 6844 size(4); 6845 format %{ "ADD $src1,$src2,$dst" %} 6846 opcode(Assembler::add_op3, Assembler::arith_op); 6847 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 6848 ins_pipe(ialu_reg_reg); 6849 %} 6850 6851 // Pointer Immediate Addition 6852 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{ 6853 match(Set dst (AddP src1 src2)); 6854 6855 size(4); 6856 format %{ "ADD $src1,$src2,$dst" %} 6857 opcode(Assembler::add_op3, Assembler::arith_op); 6858 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 6859 ins_pipe(ialu_reg_imm); 6860 %} 6861 6862 // Long Addition 6863 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 6864 match(Set dst (AddL src1 src2)); 6865 6866 size(4); 6867 format %{ "ADD $src1,$src2,$dst\t! long" %} 6868 opcode(Assembler::add_op3, Assembler::arith_op); 6869 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 6870 ins_pipe(ialu_reg_reg); 6871 %} 6872 6873 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 6874 match(Set dst (AddL src1 con)); 6875 6876 size(4); 6877 format %{ "ADD $src1,$con,$dst" %} 6878 opcode(Assembler::add_op3, Assembler::arith_op); 6879 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 6880 ins_pipe(ialu_reg_imm); 6881 %} 6882 6883 //----------Conditional_store-------------------------------------------------- 6884 // Conditional-store of the updated heap-top. 6885 // Used during allocation of the shared heap. 6886 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 6887 6888 // LoadP-locked. Same as a regular pointer load when used with a compare-swap 6889 instruct loadPLocked(iRegP dst, memory mem) %{ 6890 match(Set dst (LoadPLocked mem)); 6891 ins_cost(MEMORY_REF_COST); 6892 6893 format %{ "LDX $mem,$dst\t! ptr" %} 6894 opcode(Assembler::ldx_op3, 0, REGP_OP); 6895 ins_encode( form3_mem_reg( mem, dst ) ); 6896 ins_pipe(iload_mem); 6897 %} 6898 6899 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ 6900 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); 6901 effect( KILL newval ); 6902 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" 6903 "CMP R_G3,$oldval\t\t! See if we made progress" %} 6904 ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); 6905 ins_pipe( long_memory_op ); 6906 %} 6907 6908 // Conditional-store of an int value. 6909 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ 6910 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); 6911 effect( KILL newval ); 6912 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 6913 "CMP $oldval,$newval\t\t! See if we made progress" %} 6914 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 6915 ins_pipe( long_memory_op ); 6916 %} 6917 6918 // Conditional-store of a long value. 6919 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ 6920 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); 6921 effect( KILL newval ); 6922 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 6923 "CMP $oldval,$newval\t\t! See if we made progress" %} 6924 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 6925 ins_pipe( long_memory_op ); 6926 %} 6927 6928 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 6929 6930 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 6931 predicate(VM_Version::supports_cx8()); 6932 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 6933 match(Set res (WeakCompareAndSwapL mem_ptr (Binary oldval newval))); 6934 effect( USE mem_ptr, KILL ccr, KILL tmp1); 6935 format %{ 6936 "MOV $newval,O7\n\t" 6937 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 6938 "CMP $oldval,O7\t\t! See if we made progress\n\t" 6939 "MOV 1,$res\n\t" 6940 "MOVne xcc,R_G0,$res" 6941 %} 6942 ins_encode( enc_casx(mem_ptr, oldval, newval), 6943 enc_lflags_ne_to_boolean(res) ); 6944 ins_pipe( long_memory_op ); 6945 %} 6946 6947 6948 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 6949 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 6950 match(Set res (WeakCompareAndSwapI mem_ptr (Binary oldval newval))); 6951 effect( USE mem_ptr, KILL ccr, KILL tmp1); 6952 format %{ 6953 "MOV $newval,O7\n\t" 6954 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 6955 "CMP $oldval,O7\t\t! See if we made progress\n\t" 6956 "MOV 1,$res\n\t" 6957 "MOVne icc,R_G0,$res" 6958 %} 6959 ins_encode( enc_casi(mem_ptr, oldval, newval), 6960 enc_iflags_ne_to_boolean(res) ); 6961 ins_pipe( long_memory_op ); 6962 %} 6963 6964 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 6965 predicate(VM_Version::supports_cx8()); 6966 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 6967 match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval))); 6968 effect( USE mem_ptr, KILL ccr, KILL tmp1); 6969 format %{ 6970 "MOV $newval,O7\n\t" 6971 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 6972 "CMP $oldval,O7\t\t! See if we made progress\n\t" 6973 "MOV 1,$res\n\t" 6974 "MOVne xcc,R_G0,$res" 6975 %} 6976 ins_encode( enc_casx(mem_ptr, oldval, newval), 6977 enc_lflags_ne_to_boolean(res) ); 6978 ins_pipe( long_memory_op ); 6979 %} 6980 6981 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 6982 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 6983 match(Set res (WeakCompareAndSwapN mem_ptr (Binary oldval newval))); 6984 effect( USE mem_ptr, KILL ccr, KILL tmp1); 6985 format %{ 6986 "MOV $newval,O7\n\t" 6987 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 6988 "CMP $oldval,O7\t\t! See if we made progress\n\t" 6989 "MOV 1,$res\n\t" 6990 "MOVne icc,R_G0,$res" 6991 %} 6992 ins_encode( enc_casi(mem_ptr, oldval, newval), 6993 enc_iflags_ne_to_boolean(res) ); 6994 ins_pipe( long_memory_op ); 6995 %} 6996 6997 instruct compareAndExchangeI(iRegP mem_ptr, iRegI oldval, iRegI newval) 6998 %{ 6999 match(Set newval (CompareAndExchangeI mem_ptr (Binary oldval newval))); 7000 effect( USE mem_ptr ); 7001 7002 format %{ 7003 "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t" 7004 %} 7005 ins_encode( enc_casi_exch(mem_ptr, oldval, newval) ); 7006 ins_pipe( long_memory_op ); 7007 %} 7008 7009 instruct compareAndExchangeL(iRegP mem_ptr, iRegL oldval, iRegL newval) 7010 %{ 7011 match(Set newval (CompareAndExchangeL mem_ptr (Binary oldval newval))); 7012 effect( USE mem_ptr ); 7013 7014 format %{ 7015 "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t" 7016 %} 7017 ins_encode( enc_casx_exch(mem_ptr, oldval, newval) ); 7018 ins_pipe( long_memory_op ); 7019 %} 7020 7021 instruct compareAndExchangeP(iRegP mem_ptr, iRegP oldval, iRegP newval) 7022 %{ 7023 match(Set newval (CompareAndExchangeP mem_ptr (Binary oldval newval))); 7024 effect( USE mem_ptr ); 7025 7026 format %{ 7027 "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t" 7028 %} 7029 ins_encode( enc_casx_exch(mem_ptr, oldval, newval) ); 7030 ins_pipe( long_memory_op ); 7031 %} 7032 7033 instruct compareAndExchangeN(iRegP mem_ptr, iRegN oldval, iRegN newval) 7034 %{ 7035 match(Set newval (CompareAndExchangeN mem_ptr (Binary oldval newval))); 7036 effect( USE mem_ptr ); 7037 7038 format %{ 7039 "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr] and set $newval=[$mem_ptr]\n\t" 7040 %} 7041 ins_encode( enc_casi_exch(mem_ptr, oldval, newval) ); 7042 ins_pipe( long_memory_op ); 7043 %} 7044 7045 instruct xchgI( memory mem, iRegI newval) %{ 7046 match(Set newval (GetAndSetI mem newval)); 7047 format %{ "SWAP [$mem],$newval" %} 7048 size(4); 7049 ins_encode %{ 7050 __ swap($mem$$Address, $newval$$Register); 7051 %} 7052 ins_pipe( long_memory_op ); 7053 %} 7054 7055 7056 instruct xchgN( memory mem, iRegN newval) %{ 7057 match(Set newval (GetAndSetN mem newval)); 7058 format %{ "SWAP [$mem],$newval" %} 7059 size(4); 7060 ins_encode %{ 7061 __ swap($mem$$Address, $newval$$Register); 7062 %} 7063 ins_pipe( long_memory_op ); 7064 %} 7065 7066 //--------------------- 7067 // Subtraction Instructions 7068 // Register Subtraction 7069 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7070 match(Set dst (SubI src1 src2)); 7071 7072 size(4); 7073 format %{ "SUB $src1,$src2,$dst" %} 7074 opcode(Assembler::sub_op3, Assembler::arith_op); 7075 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7076 ins_pipe(ialu_reg_reg); 7077 %} 7078 7079 // Immediate Subtraction 7080 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7081 match(Set dst (SubI src1 src2)); 7082 7083 size(4); 7084 format %{ "SUB $src1,$src2,$dst" %} 7085 opcode(Assembler::sub_op3, Assembler::arith_op); 7086 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7087 ins_pipe(ialu_reg_imm); 7088 %} 7089 7090 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 7091 match(Set dst (SubI zero src2)); 7092 7093 size(4); 7094 format %{ "NEG $src2,$dst" %} 7095 opcode(Assembler::sub_op3, Assembler::arith_op); 7096 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7097 ins_pipe(ialu_zero_reg); 7098 %} 7099 7100 // Long subtraction 7101 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7102 match(Set dst (SubL src1 src2)); 7103 7104 size(4); 7105 format %{ "SUB $src1,$src2,$dst\t! long" %} 7106 opcode(Assembler::sub_op3, Assembler::arith_op); 7107 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7108 ins_pipe(ialu_reg_reg); 7109 %} 7110 7111 // Immediate Subtraction 7112 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7113 match(Set dst (SubL src1 con)); 7114 7115 size(4); 7116 format %{ "SUB $src1,$con,$dst\t! long" %} 7117 opcode(Assembler::sub_op3, Assembler::arith_op); 7118 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7119 ins_pipe(ialu_reg_imm); 7120 %} 7121 7122 // Long negation 7123 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{ 7124 match(Set dst (SubL zero src2)); 7125 7126 size(4); 7127 format %{ "NEG $src2,$dst\t! long" %} 7128 opcode(Assembler::sub_op3, Assembler::arith_op); 7129 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7130 ins_pipe(ialu_zero_reg); 7131 %} 7132 7133 // Multiplication Instructions 7134 // Integer Multiplication 7135 // Register Multiplication 7136 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7137 match(Set dst (MulI src1 src2)); 7138 7139 size(4); 7140 format %{ "MULX $src1,$src2,$dst" %} 7141 opcode(Assembler::mulx_op3, Assembler::arith_op); 7142 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7143 ins_pipe(imul_reg_reg); 7144 %} 7145 7146 // Immediate Multiplication 7147 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7148 match(Set dst (MulI src1 src2)); 7149 7150 size(4); 7151 format %{ "MULX $src1,$src2,$dst" %} 7152 opcode(Assembler::mulx_op3, Assembler::arith_op); 7153 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7154 ins_pipe(imul_reg_imm); 7155 %} 7156 7157 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7158 match(Set dst (MulL src1 src2)); 7159 ins_cost(DEFAULT_COST * 5); 7160 size(4); 7161 format %{ "MULX $src1,$src2,$dst\t! long" %} 7162 opcode(Assembler::mulx_op3, Assembler::arith_op); 7163 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7164 ins_pipe(mulL_reg_reg); 7165 %} 7166 7167 // Immediate Multiplication 7168 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7169 match(Set dst (MulL src1 src2)); 7170 ins_cost(DEFAULT_COST * 5); 7171 size(4); 7172 format %{ "MULX $src1,$src2,$dst" %} 7173 opcode(Assembler::mulx_op3, Assembler::arith_op); 7174 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7175 ins_pipe(mulL_reg_imm); 7176 %} 7177 7178 // Integer Division 7179 // Register Division 7180 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{ 7181 match(Set dst (DivI src1 src2)); 7182 ins_cost((2+71)*DEFAULT_COST); 7183 7184 format %{ "SRA $src2,0,$src2\n\t" 7185 "SRA $src1,0,$src1\n\t" 7186 "SDIVX $src1,$src2,$dst" %} 7187 ins_encode( idiv_reg( src1, src2, dst ) ); 7188 ins_pipe(sdiv_reg_reg); 7189 %} 7190 7191 // Immediate Division 7192 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{ 7193 match(Set dst (DivI src1 src2)); 7194 ins_cost((2+71)*DEFAULT_COST); 7195 7196 format %{ "SRA $src1,0,$src1\n\t" 7197 "SDIVX $src1,$src2,$dst" %} 7198 ins_encode( idiv_imm( src1, src2, dst ) ); 7199 ins_pipe(sdiv_reg_imm); 7200 %} 7201 7202 //----------Div-By-10-Expansion------------------------------------------------ 7203 // Extract hi bits of a 32x32->64 bit multiply. 7204 // Expand rule only, not matched 7205 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{ 7206 effect( DEF dst, USE src1, USE src2 ); 7207 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t" 7208 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %} 7209 ins_encode( enc_mul_hi(dst,src1,src2)); 7210 ins_pipe(sdiv_reg_reg); 7211 %} 7212 7213 // Magic constant, reciprocal of 10 7214 instruct loadConI_x66666667(iRegIsafe dst) %{ 7215 effect( DEF dst ); 7216 7217 size(8); 7218 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %} 7219 ins_encode( Set32(0x66666667, dst) ); 7220 ins_pipe(ialu_hi_lo_reg); 7221 %} 7222 7223 // Register Shift Right Arithmetic Long by 32-63 7224 instruct sra_31( iRegI dst, iRegI src ) %{ 7225 effect( DEF dst, USE src ); 7226 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 7227 ins_encode( form3_rs1_rd_copysign_hi(src,dst) ); 7228 ins_pipe(ialu_reg_reg); 7229 %} 7230 7231 // Arithmetic Shift Right by 8-bit immediate 7232 instruct sra_reg_2( iRegI dst, iRegI src ) %{ 7233 effect( DEF dst, USE src ); 7234 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} 7235 opcode(Assembler::sra_op3, Assembler::arith_op); 7236 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); 7237 ins_pipe(ialu_reg_imm); 7238 %} 7239 7240 // Integer DIV with 10 7241 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{ 7242 match(Set dst (DivI src div)); 7243 ins_cost((6+6)*DEFAULT_COST); 7244 expand %{ 7245 iRegIsafe tmp1; // Killed temps; 7246 iRegIsafe tmp2; // Killed temps; 7247 iRegI tmp3; // Killed temps; 7248 iRegI tmp4; // Killed temps; 7249 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1 7250 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2 7251 sra_31( tmp3, src ); // SRA src,31 -> tmp3 7252 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4 7253 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst 7254 %} 7255 %} 7256 7257 // Register Long Division 7258 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7259 match(Set dst (DivL src1 src2)); 7260 ins_cost(DEFAULT_COST*71); 7261 size(4); 7262 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7263 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7264 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7265 ins_pipe(divL_reg_reg); 7266 %} 7267 7268 // Register Long Division 7269 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7270 match(Set dst (DivL src1 src2)); 7271 ins_cost(DEFAULT_COST*71); 7272 size(4); 7273 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7274 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7275 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7276 ins_pipe(divL_reg_imm); 7277 %} 7278 7279 // Integer Remainder 7280 // Register Remainder 7281 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{ 7282 match(Set dst (ModI src1 src2)); 7283 effect( KILL ccr, KILL temp); 7284 7285 format %{ "SREM $src1,$src2,$dst" %} 7286 ins_encode( irem_reg(src1, src2, dst, temp) ); 7287 ins_pipe(sdiv_reg_reg); 7288 %} 7289 7290 // Immediate Remainder 7291 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ 7292 match(Set dst (ModI src1 src2)); 7293 effect( KILL ccr, KILL temp); 7294 7295 format %{ "SREM $src1,$src2,$dst" %} 7296 ins_encode( irem_imm(src1, src2, dst, temp) ); 7297 ins_pipe(sdiv_reg_imm); 7298 %} 7299 7300 // Register Long Remainder 7301 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7302 effect(DEF dst, USE src1, USE src2); 7303 size(4); 7304 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7305 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7306 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7307 ins_pipe(divL_reg_reg); 7308 %} 7309 7310 // Register Long Division 7311 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7312 effect(DEF dst, USE src1, USE src2); 7313 size(4); 7314 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7315 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7316 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7317 ins_pipe(divL_reg_imm); 7318 %} 7319 7320 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7321 effect(DEF dst, USE src1, USE src2); 7322 size(4); 7323 format %{ "MULX $src1,$src2,$dst\t! long" %} 7324 opcode(Assembler::mulx_op3, Assembler::arith_op); 7325 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7326 ins_pipe(mulL_reg_reg); 7327 %} 7328 7329 // Immediate Multiplication 7330 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7331 effect(DEF dst, USE src1, USE src2); 7332 size(4); 7333 format %{ "MULX $src1,$src2,$dst" %} 7334 opcode(Assembler::mulx_op3, Assembler::arith_op); 7335 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7336 ins_pipe(mulL_reg_imm); 7337 %} 7338 7339 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7340 effect(DEF dst, USE src1, USE src2); 7341 size(4); 7342 format %{ "SUB $src1,$src2,$dst\t! long" %} 7343 opcode(Assembler::sub_op3, Assembler::arith_op); 7344 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7345 ins_pipe(ialu_reg_reg); 7346 %} 7347 7348 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 7349 effect(DEF dst, USE src1, USE src2); 7350 size(4); 7351 format %{ "SUB $src1,$src2,$dst\t! long" %} 7352 opcode(Assembler::sub_op3, Assembler::arith_op); 7353 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7354 ins_pipe(ialu_reg_reg); 7355 %} 7356 7357 // Register Long Remainder 7358 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7359 match(Set dst (ModL src1 src2)); 7360 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7361 expand %{ 7362 iRegL tmp1; 7363 iRegL tmp2; 7364 divL_reg_reg_1(tmp1, src1, src2); 7365 mulL_reg_reg_1(tmp2, tmp1, src2); 7366 subL_reg_reg_1(dst, src1, tmp2); 7367 %} 7368 %} 7369 7370 // Register Long Remainder 7371 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7372 match(Set dst (ModL src1 src2)); 7373 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7374 expand %{ 7375 iRegL tmp1; 7376 iRegL tmp2; 7377 divL_reg_imm13_1(tmp1, src1, src2); 7378 mulL_reg_imm13_1(tmp2, tmp1, src2); 7379 subL_reg_reg_2 (dst, src1, tmp2); 7380 %} 7381 %} 7382 7383 // Integer Shift Instructions 7384 // Register Shift Left 7385 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7386 match(Set dst (LShiftI src1 src2)); 7387 7388 size(4); 7389 format %{ "SLL $src1,$src2,$dst" %} 7390 opcode(Assembler::sll_op3, Assembler::arith_op); 7391 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7392 ins_pipe(ialu_reg_reg); 7393 %} 7394 7395 // Register Shift Left Immediate 7396 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7397 match(Set dst (LShiftI src1 src2)); 7398 7399 size(4); 7400 format %{ "SLL $src1,$src2,$dst" %} 7401 opcode(Assembler::sll_op3, Assembler::arith_op); 7402 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7403 ins_pipe(ialu_reg_imm); 7404 %} 7405 7406 // Register Shift Left 7407 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7408 match(Set dst (LShiftL src1 src2)); 7409 7410 size(4); 7411 format %{ "SLLX $src1,$src2,$dst" %} 7412 opcode(Assembler::sllx_op3, Assembler::arith_op); 7413 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7414 ins_pipe(ialu_reg_reg); 7415 %} 7416 7417 // Register Shift Left Immediate 7418 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7419 match(Set dst (LShiftL src1 src2)); 7420 7421 size(4); 7422 format %{ "SLLX $src1,$src2,$dst" %} 7423 opcode(Assembler::sllx_op3, Assembler::arith_op); 7424 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7425 ins_pipe(ialu_reg_imm); 7426 %} 7427 7428 // Register Arithmetic Shift Right 7429 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7430 match(Set dst (RShiftI src1 src2)); 7431 size(4); 7432 format %{ "SRA $src1,$src2,$dst" %} 7433 opcode(Assembler::sra_op3, Assembler::arith_op); 7434 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7435 ins_pipe(ialu_reg_reg); 7436 %} 7437 7438 // Register Arithmetic Shift Right Immediate 7439 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7440 match(Set dst (RShiftI src1 src2)); 7441 7442 size(4); 7443 format %{ "SRA $src1,$src2,$dst" %} 7444 opcode(Assembler::sra_op3, Assembler::arith_op); 7445 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7446 ins_pipe(ialu_reg_imm); 7447 %} 7448 7449 // Register Shift Right Arithmatic Long 7450 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7451 match(Set dst (RShiftL src1 src2)); 7452 7453 size(4); 7454 format %{ "SRAX $src1,$src2,$dst" %} 7455 opcode(Assembler::srax_op3, Assembler::arith_op); 7456 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7457 ins_pipe(ialu_reg_reg); 7458 %} 7459 7460 // Register Shift Left Immediate 7461 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7462 match(Set dst (RShiftL src1 src2)); 7463 7464 size(4); 7465 format %{ "SRAX $src1,$src2,$dst" %} 7466 opcode(Assembler::srax_op3, Assembler::arith_op); 7467 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7468 ins_pipe(ialu_reg_imm); 7469 %} 7470 7471 // Register Shift Right 7472 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7473 match(Set dst (URShiftI src1 src2)); 7474 7475 size(4); 7476 format %{ "SRL $src1,$src2,$dst" %} 7477 opcode(Assembler::srl_op3, Assembler::arith_op); 7478 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7479 ins_pipe(ialu_reg_reg); 7480 %} 7481 7482 // Register Shift Right Immediate 7483 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7484 match(Set dst (URShiftI src1 src2)); 7485 7486 size(4); 7487 format %{ "SRL $src1,$src2,$dst" %} 7488 opcode(Assembler::srl_op3, Assembler::arith_op); 7489 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7490 ins_pipe(ialu_reg_imm); 7491 %} 7492 7493 // Register Shift Right 7494 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7495 match(Set dst (URShiftL src1 src2)); 7496 7497 size(4); 7498 format %{ "SRLX $src1,$src2,$dst" %} 7499 opcode(Assembler::srlx_op3, Assembler::arith_op); 7500 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7501 ins_pipe(ialu_reg_reg); 7502 %} 7503 7504 // Register Shift Right Immediate 7505 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7506 match(Set dst (URShiftL src1 src2)); 7507 7508 size(4); 7509 format %{ "SRLX $src1,$src2,$dst" %} 7510 opcode(Assembler::srlx_op3, Assembler::arith_op); 7511 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7512 ins_pipe(ialu_reg_imm); 7513 %} 7514 7515 // Register Shift Right Immediate with a CastP2X 7516 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ 7517 match(Set dst (URShiftL (CastP2X src1) src2)); 7518 size(4); 7519 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} 7520 opcode(Assembler::srlx_op3, Assembler::arith_op); 7521 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7522 ins_pipe(ialu_reg_imm); 7523 %} 7524 7525 7526 //----------Floating Point Arithmetic Instructions----------------------------- 7527 7528 // Add float single precision 7529 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 7530 match(Set dst (AddF src1 src2)); 7531 7532 size(4); 7533 format %{ "FADDS $src1,$src2,$dst" %} 7534 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf); 7535 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7536 ins_pipe(faddF_reg_reg); 7537 %} 7538 7539 // Add float double precision 7540 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 7541 match(Set dst (AddD src1 src2)); 7542 7543 size(4); 7544 format %{ "FADDD $src1,$src2,$dst" %} 7545 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 7546 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7547 ins_pipe(faddD_reg_reg); 7548 %} 7549 7550 // Sub float single precision 7551 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 7552 match(Set dst (SubF src1 src2)); 7553 7554 size(4); 7555 format %{ "FSUBS $src1,$src2,$dst" %} 7556 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf); 7557 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7558 ins_pipe(faddF_reg_reg); 7559 %} 7560 7561 // Sub float double precision 7562 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 7563 match(Set dst (SubD src1 src2)); 7564 7565 size(4); 7566 format %{ "FSUBD $src1,$src2,$dst" %} 7567 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 7568 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7569 ins_pipe(faddD_reg_reg); 7570 %} 7571 7572 // Mul float single precision 7573 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 7574 match(Set dst (MulF src1 src2)); 7575 7576 size(4); 7577 format %{ "FMULS $src1,$src2,$dst" %} 7578 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf); 7579 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7580 ins_pipe(fmulF_reg_reg); 7581 %} 7582 7583 // Mul float double precision 7584 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 7585 match(Set dst (MulD src1 src2)); 7586 7587 size(4); 7588 format %{ "FMULD $src1,$src2,$dst" %} 7589 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 7590 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7591 ins_pipe(fmulD_reg_reg); 7592 %} 7593 7594 // Div float single precision 7595 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 7596 match(Set dst (DivF src1 src2)); 7597 7598 size(4); 7599 format %{ "FDIVS $src1,$src2,$dst" %} 7600 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf); 7601 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7602 ins_pipe(fdivF_reg_reg); 7603 %} 7604 7605 // Div float double precision 7606 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 7607 match(Set dst (DivD src1 src2)); 7608 7609 size(4); 7610 format %{ "FDIVD $src1,$src2,$dst" %} 7611 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf); 7612 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7613 ins_pipe(fdivD_reg_reg); 7614 %} 7615 7616 // Absolute float double precision 7617 instruct absD_reg(regD dst, regD src) %{ 7618 match(Set dst (AbsD src)); 7619 7620 format %{ "FABSd $src,$dst" %} 7621 ins_encode(fabsd(dst, src)); 7622 ins_pipe(faddD_reg); 7623 %} 7624 7625 // Absolute float single precision 7626 instruct absF_reg(regF dst, regF src) %{ 7627 match(Set dst (AbsF src)); 7628 7629 format %{ "FABSs $src,$dst" %} 7630 ins_encode(fabss(dst, src)); 7631 ins_pipe(faddF_reg); 7632 %} 7633 7634 instruct negF_reg(regF dst, regF src) %{ 7635 match(Set dst (NegF src)); 7636 7637 size(4); 7638 format %{ "FNEGs $src,$dst" %} 7639 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); 7640 ins_encode(form3_opf_rs2F_rdF(src, dst)); 7641 ins_pipe(faddF_reg); 7642 %} 7643 7644 instruct negD_reg(regD dst, regD src) %{ 7645 match(Set dst (NegD src)); 7646 7647 format %{ "FNEGd $src,$dst" %} 7648 ins_encode(fnegd(dst, src)); 7649 ins_pipe(faddD_reg); 7650 %} 7651 7652 // Sqrt float double precision 7653 instruct sqrtF_reg_reg(regF dst, regF src) %{ 7654 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 7655 7656 size(4); 7657 format %{ "FSQRTS $src,$dst" %} 7658 ins_encode(fsqrts(dst, src)); 7659 ins_pipe(fdivF_reg_reg); 7660 %} 7661 7662 // Sqrt float double precision 7663 instruct sqrtD_reg_reg(regD dst, regD src) %{ 7664 match(Set dst (SqrtD src)); 7665 7666 size(4); 7667 format %{ "FSQRTD $src,$dst" %} 7668 ins_encode(fsqrtd(dst, src)); 7669 ins_pipe(fdivD_reg_reg); 7670 %} 7671 7672 // Single/Double precision fused floating-point multiply-add (d = a * b + c). 7673 instruct fmaF_regx4(regF dst, regF a, regF b, regF c) %{ 7674 predicate(UseFMA); 7675 match(Set dst (FmaF c (Binary a b))); 7676 format %{ "fmadds $a,$b,$c,$dst\t# $dst = $a * $b + $c" %} 7677 ins_encode(fmadds(dst, a, b, c)); 7678 ins_pipe(fmaF_regx4); 7679 %} 7680 7681 instruct fmaD_regx4(regD dst, regD a, regD b, regD c) %{ 7682 predicate(UseFMA); 7683 match(Set dst (FmaD c (Binary a b))); 7684 format %{ "fmaddd $a,$b,$c,$dst\t# $dst = $a * $b + $c" %} 7685 ins_encode(fmaddd(dst, a, b, c)); 7686 ins_pipe(fmaD_regx4); 7687 %} 7688 7689 // Additional patterns matching complement versions that we can map directly to 7690 // variants of the fused multiply-add instructions. 7691 7692 // Single/Double precision fused floating-point multiply-sub (d = a * b - c) 7693 instruct fmsubF_regx4(regF dst, regF a, regF b, regF c) %{ 7694 predicate(UseFMA); 7695 match(Set dst (FmaF (NegF c) (Binary a b))); 7696 format %{ "fmsubs $a,$b,$c,$dst\t# $dst = $a * $b - $c" %} 7697 ins_encode(fmsubs(dst, a, b, c)); 7698 ins_pipe(fmaF_regx4); 7699 %} 7700 7701 instruct fmsubD_regx4(regD dst, regD a, regD b, regD c) %{ 7702 predicate(UseFMA); 7703 match(Set dst (FmaD (NegD c) (Binary a b))); 7704 format %{ "fmsubd $a,$b,$c,$dst\t# $dst = $a * $b - $c" %} 7705 ins_encode(fmsubd(dst, a, b, c)); 7706 ins_pipe(fmaD_regx4); 7707 %} 7708 7709 // Single/Double precision fused floating-point neg. multiply-add, 7710 // d = -1 * a * b - c = -(a * b + c) 7711 instruct fnmaddF_regx4(regF dst, regF a, regF b, regF c) %{ 7712 predicate(UseFMA); 7713 match(Set dst (FmaF (NegF c) (Binary (NegF a) b))); 7714 match(Set dst (FmaF (NegF c) (Binary a (NegF b)))); 7715 format %{ "fnmadds $a,$b,$c,$dst\t# $dst = -($a * $b + $c)" %} 7716 ins_encode(fnmadds(dst, a, b, c)); 7717 ins_pipe(fmaF_regx4); 7718 %} 7719 7720 instruct fnmaddD_regx4(regD dst, regD a, regD b, regD c) %{ 7721 predicate(UseFMA); 7722 match(Set dst (FmaD (NegD c) (Binary (NegD a) b))); 7723 match(Set dst (FmaD (NegD c) (Binary a (NegD b)))); 7724 format %{ "fnmaddd $a,$b,$c,$dst\t# $dst = -($a * $b + $c)" %} 7725 ins_encode(fnmaddd(dst, a, b, c)); 7726 ins_pipe(fmaD_regx4); 7727 %} 7728 7729 // Single/Double precision fused floating-point neg. multiply-sub, 7730 // d = -1 * a * b + c = -(a * b - c) 7731 instruct fnmsubF_regx4(regF dst, regF a, regF b, regF c) %{ 7732 predicate(UseFMA); 7733 match(Set dst (FmaF c (Binary (NegF a) b))); 7734 match(Set dst (FmaF c (Binary a (NegF b)))); 7735 format %{ "fnmsubs $a,$b,$c,$dst\t# $dst = -($a * $b - $c)" %} 7736 ins_encode(fnmsubs(dst, a, b, c)); 7737 ins_pipe(fmaF_regx4); 7738 %} 7739 7740 instruct fnmsubD_regx4(regD dst, regD a, regD b, regD c) %{ 7741 predicate(UseFMA); 7742 match(Set dst (FmaD c (Binary (NegD a) b))); 7743 match(Set dst (FmaD c (Binary a (NegD b)))); 7744 format %{ "fnmsubd $a,$b,$c,$dst\t# $dst = -($a * $b - $c)" %} 7745 ins_encode(fnmsubd(dst, a, b, c)); 7746 ins_pipe(fmaD_regx4); 7747 %} 7748 7749 //----------Logical Instructions----------------------------------------------- 7750 // And Instructions 7751 // Register And 7752 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7753 match(Set dst (AndI src1 src2)); 7754 7755 size(4); 7756 format %{ "AND $src1,$src2,$dst" %} 7757 opcode(Assembler::and_op3, Assembler::arith_op); 7758 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7759 ins_pipe(ialu_reg_reg); 7760 %} 7761 7762 // Immediate And 7763 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7764 match(Set dst (AndI src1 src2)); 7765 7766 size(4); 7767 format %{ "AND $src1,$src2,$dst" %} 7768 opcode(Assembler::and_op3, Assembler::arith_op); 7769 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7770 ins_pipe(ialu_reg_imm); 7771 %} 7772 7773 // Register And Long 7774 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7775 match(Set dst (AndL src1 src2)); 7776 7777 ins_cost(DEFAULT_COST); 7778 size(4); 7779 format %{ "AND $src1,$src2,$dst\t! long" %} 7780 opcode(Assembler::and_op3, Assembler::arith_op); 7781 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7782 ins_pipe(ialu_reg_reg); 7783 %} 7784 7785 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7786 match(Set dst (AndL src1 con)); 7787 7788 ins_cost(DEFAULT_COST); 7789 size(4); 7790 format %{ "AND $src1,$con,$dst\t! long" %} 7791 opcode(Assembler::and_op3, Assembler::arith_op); 7792 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7793 ins_pipe(ialu_reg_imm); 7794 %} 7795 7796 // Or Instructions 7797 // Register Or 7798 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7799 match(Set dst (OrI src1 src2)); 7800 7801 size(4); 7802 format %{ "OR $src1,$src2,$dst" %} 7803 opcode(Assembler::or_op3, Assembler::arith_op); 7804 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7805 ins_pipe(ialu_reg_reg); 7806 %} 7807 7808 // Immediate Or 7809 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7810 match(Set dst (OrI src1 src2)); 7811 7812 size(4); 7813 format %{ "OR $src1,$src2,$dst" %} 7814 opcode(Assembler::or_op3, Assembler::arith_op); 7815 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7816 ins_pipe(ialu_reg_imm); 7817 %} 7818 7819 // Register Or Long 7820 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7821 match(Set dst (OrL src1 src2)); 7822 7823 ins_cost(DEFAULT_COST); 7824 size(4); 7825 format %{ "OR $src1,$src2,$dst\t! long" %} 7826 opcode(Assembler::or_op3, Assembler::arith_op); 7827 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7828 ins_pipe(ialu_reg_reg); 7829 %} 7830 7831 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7832 match(Set dst (OrL src1 con)); 7833 ins_cost(DEFAULT_COST*2); 7834 7835 ins_cost(DEFAULT_COST); 7836 size(4); 7837 format %{ "OR $src1,$con,$dst\t! long" %} 7838 opcode(Assembler::or_op3, Assembler::arith_op); 7839 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7840 ins_pipe(ialu_reg_imm); 7841 %} 7842 7843 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ 7844 match(Set dst (OrL src1 (CastP2X src2))); 7845 7846 ins_cost(DEFAULT_COST); 7847 size(4); 7848 format %{ "OR $src1,$src2,$dst\t! long" %} 7849 opcode(Assembler::or_op3, Assembler::arith_op); 7850 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7851 ins_pipe(ialu_reg_reg); 7852 %} 7853 7854 // Xor Instructions 7855 // Register Xor 7856 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7857 match(Set dst (XorI src1 src2)); 7858 7859 size(4); 7860 format %{ "XOR $src1,$src2,$dst" %} 7861 opcode(Assembler::xor_op3, Assembler::arith_op); 7862 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7863 ins_pipe(ialu_reg_reg); 7864 %} 7865 7866 // Immediate Xor 7867 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7868 match(Set dst (XorI src1 src2)); 7869 7870 size(4); 7871 format %{ "XOR $src1,$src2,$dst" %} 7872 opcode(Assembler::xor_op3, Assembler::arith_op); 7873 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7874 ins_pipe(ialu_reg_imm); 7875 %} 7876 7877 // Register Xor Long 7878 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7879 match(Set dst (XorL src1 src2)); 7880 7881 ins_cost(DEFAULT_COST); 7882 size(4); 7883 format %{ "XOR $src1,$src2,$dst\t! long" %} 7884 opcode(Assembler::xor_op3, Assembler::arith_op); 7885 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7886 ins_pipe(ialu_reg_reg); 7887 %} 7888 7889 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7890 match(Set dst (XorL src1 con)); 7891 7892 ins_cost(DEFAULT_COST); 7893 size(4); 7894 format %{ "XOR $src1,$con,$dst\t! long" %} 7895 opcode(Assembler::xor_op3, Assembler::arith_op); 7896 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7897 ins_pipe(ialu_reg_imm); 7898 %} 7899 7900 //----------Convert to Boolean------------------------------------------------- 7901 // Nice hack for 32-bit tests but doesn't work for 7902 // 64-bit pointers. 7903 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{ 7904 match(Set dst (Conv2B src)); 7905 effect( KILL ccr ); 7906 ins_cost(DEFAULT_COST*2); 7907 format %{ "CMP R_G0,$src\n\t" 7908 "ADDX R_G0,0,$dst" %} 7909 ins_encode( enc_to_bool( src, dst ) ); 7910 ins_pipe(ialu_reg_ialu); 7911 %} 7912 7913 instruct convP2B( iRegI dst, iRegP src ) %{ 7914 match(Set dst (Conv2B src)); 7915 ins_cost(DEFAULT_COST*2); 7916 format %{ "MOV $src,$dst\n\t" 7917 "MOVRNZ $src,1,$dst" %} 7918 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); 7919 ins_pipe(ialu_clr_and_mover); 7920 %} 7921 7922 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ 7923 match(Set dst (CmpLTMask src zero)); 7924 effect(KILL ccr); 7925 size(4); 7926 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %} 7927 ins_encode %{ 7928 __ sra($src$$Register, 31, $dst$$Register); 7929 %} 7930 ins_pipe(ialu_reg_imm); 7931 %} 7932 7933 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{ 7934 match(Set dst (CmpLTMask p q)); 7935 effect( KILL ccr ); 7936 ins_cost(DEFAULT_COST*4); 7937 format %{ "CMP $p,$q\n\t" 7938 "MOV #0,$dst\n\t" 7939 "BLT,a .+8\n\t" 7940 "MOV #-1,$dst" %} 7941 ins_encode( enc_ltmask(p,q,dst) ); 7942 ins_pipe(ialu_reg_reg_ialu); 7943 %} 7944 7945 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ 7946 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 7947 effect(KILL ccr, TEMP tmp); 7948 ins_cost(DEFAULT_COST*3); 7949 7950 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" 7951 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" 7952 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} 7953 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); 7954 ins_pipe(cadd_cmpltmask); 7955 %} 7956 7957 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ 7958 match(Set p (AndI (CmpLTMask p q) y)); 7959 effect(KILL ccr); 7960 ins_cost(DEFAULT_COST*3); 7961 7962 format %{ "CMP $p,$q\n\t" 7963 "MOV $y,$p\n\t" 7964 "MOVge G0,$p" %} 7965 ins_encode %{ 7966 __ cmp($p$$Register, $q$$Register); 7967 __ mov($y$$Register, $p$$Register); 7968 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); 7969 %} 7970 ins_pipe(ialu_reg_reg_ialu); 7971 %} 7972 7973 //----------------------------------------------------------------- 7974 // Direct raw moves between float and general registers using VIS3. 7975 7976 // ins_pipe(faddF_reg); 7977 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{ 7978 predicate(UseVIS >= 3); 7979 match(Set dst (MoveF2I src)); 7980 7981 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %} 7982 ins_encode %{ 7983 __ movstouw($src$$FloatRegister, $dst$$Register); 7984 %} 7985 ins_pipe(ialu_reg_reg); 7986 %} 7987 7988 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{ 7989 predicate(UseVIS >= 3); 7990 match(Set dst (MoveI2F src)); 7991 7992 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %} 7993 ins_encode %{ 7994 __ movwtos($src$$Register, $dst$$FloatRegister); 7995 %} 7996 ins_pipe(ialu_reg_reg); 7997 %} 7998 7999 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{ 8000 predicate(UseVIS >= 3); 8001 match(Set dst (MoveD2L src)); 8002 8003 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %} 8004 ins_encode %{ 8005 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register); 8006 %} 8007 ins_pipe(ialu_reg_reg); 8008 %} 8009 8010 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{ 8011 predicate(UseVIS >= 3); 8012 match(Set dst (MoveL2D src)); 8013 8014 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %} 8015 ins_encode %{ 8016 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg)); 8017 %} 8018 ins_pipe(ialu_reg_reg); 8019 %} 8020 8021 8022 // Raw moves between float and general registers using stack. 8023 8024 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ 8025 match(Set dst (MoveF2I src)); 8026 effect(DEF dst, USE src); 8027 ins_cost(MEMORY_REF_COST); 8028 8029 format %{ "LDUW $src,$dst\t! MoveF2I" %} 8030 opcode(Assembler::lduw_op3); 8031 ins_encode(simple_form3_mem_reg( src, dst ) ); 8032 ins_pipe(iload_mem); 8033 %} 8034 8035 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 8036 match(Set dst (MoveI2F src)); 8037 effect(DEF dst, USE src); 8038 ins_cost(MEMORY_REF_COST); 8039 8040 format %{ "LDF $src,$dst\t! MoveI2F" %} 8041 opcode(Assembler::ldf_op3); 8042 ins_encode(simple_form3_mem_reg(src, dst)); 8043 ins_pipe(floadF_stk); 8044 %} 8045 8046 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{ 8047 match(Set dst (MoveD2L src)); 8048 effect(DEF dst, USE src); 8049 ins_cost(MEMORY_REF_COST); 8050 8051 format %{ "LDX $src,$dst\t! MoveD2L" %} 8052 opcode(Assembler::ldx_op3); 8053 ins_encode(simple_form3_mem_reg( src, dst ) ); 8054 ins_pipe(iload_mem); 8055 %} 8056 8057 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 8058 match(Set dst (MoveL2D src)); 8059 effect(DEF dst, USE src); 8060 ins_cost(MEMORY_REF_COST); 8061 8062 format %{ "LDDF $src,$dst\t! MoveL2D" %} 8063 opcode(Assembler::lddf_op3); 8064 ins_encode(simple_form3_mem_reg(src, dst)); 8065 ins_pipe(floadD_stk); 8066 %} 8067 8068 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 8069 match(Set dst (MoveF2I src)); 8070 effect(DEF dst, USE src); 8071 ins_cost(MEMORY_REF_COST); 8072 8073 format %{ "STF $src,$dst\t! MoveF2I" %} 8074 opcode(Assembler::stf_op3); 8075 ins_encode(simple_form3_mem_reg(dst, src)); 8076 ins_pipe(fstoreF_stk_reg); 8077 %} 8078 8079 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ 8080 match(Set dst (MoveI2F src)); 8081 effect(DEF dst, USE src); 8082 ins_cost(MEMORY_REF_COST); 8083 8084 format %{ "STW $src,$dst\t! MoveI2F" %} 8085 opcode(Assembler::stw_op3); 8086 ins_encode(simple_form3_mem_reg( dst, src ) ); 8087 ins_pipe(istore_mem_reg); 8088 %} 8089 8090 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 8091 match(Set dst (MoveD2L src)); 8092 effect(DEF dst, USE src); 8093 ins_cost(MEMORY_REF_COST); 8094 8095 format %{ "STDF $src,$dst\t! MoveD2L" %} 8096 opcode(Assembler::stdf_op3); 8097 ins_encode(simple_form3_mem_reg(dst, src)); 8098 ins_pipe(fstoreD_stk_reg); 8099 %} 8100 8101 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ 8102 match(Set dst (MoveL2D src)); 8103 effect(DEF dst, USE src); 8104 ins_cost(MEMORY_REF_COST); 8105 8106 format %{ "STX $src,$dst\t! MoveL2D" %} 8107 opcode(Assembler::stx_op3); 8108 ins_encode(simple_form3_mem_reg( dst, src ) ); 8109 ins_pipe(istore_mem_reg); 8110 %} 8111 8112 8113 //----------Arithmetic Conversion Instructions--------------------------------- 8114 // The conversions operations are all Alpha sorted. Please keep it that way! 8115 8116 instruct convD2F_reg(regF dst, regD src) %{ 8117 match(Set dst (ConvD2F src)); 8118 size(4); 8119 format %{ "FDTOS $src,$dst" %} 8120 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); 8121 ins_encode(form3_opf_rs2D_rdF(src, dst)); 8122 ins_pipe(fcvtD2F); 8123 %} 8124 8125 8126 // Convert a double to an int in a float register. 8127 // If the double is a NAN, stuff a zero in instead. 8128 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ 8129 effect(DEF dst, USE src, KILL fcc0); 8130 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8131 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8132 "FDTOI $src,$dst\t! convert in delay slot\n\t" 8133 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8134 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8135 "skip:" %} 8136 ins_encode(form_d2i_helper(src,dst)); 8137 ins_pipe(fcvtD2I); 8138 %} 8139 8140 instruct convD2I_stk(stackSlotI dst, regD src) %{ 8141 match(Set dst (ConvD2I src)); 8142 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8143 expand %{ 8144 regF tmp; 8145 convD2I_helper(tmp, src); 8146 regF_to_stkI(dst, tmp); 8147 %} 8148 %} 8149 8150 instruct convD2I_reg(iRegI dst, regD src) %{ 8151 predicate(UseVIS >= 3); 8152 match(Set dst (ConvD2I src)); 8153 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8154 expand %{ 8155 regF tmp; 8156 convD2I_helper(tmp, src); 8157 MoveF2I_reg_reg(dst, tmp); 8158 %} 8159 %} 8160 8161 8162 // Convert a double to a long in a double register. 8163 // If the double is a NAN, stuff a zero in instead. 8164 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ 8165 effect(DEF dst, USE src, KILL fcc0); 8166 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8167 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8168 "FDTOX $src,$dst\t! convert in delay slot\n\t" 8169 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8170 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8171 "skip:" %} 8172 ins_encode(form_d2l_helper(src,dst)); 8173 ins_pipe(fcvtD2L); 8174 %} 8175 8176 instruct convD2L_stk(stackSlotL dst, regD src) %{ 8177 match(Set dst (ConvD2L src)); 8178 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8179 expand %{ 8180 regD tmp; 8181 convD2L_helper(tmp, src); 8182 regD_to_stkL(dst, tmp); 8183 %} 8184 %} 8185 8186 instruct convD2L_reg(iRegL dst, regD src) %{ 8187 predicate(UseVIS >= 3); 8188 match(Set dst (ConvD2L src)); 8189 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8190 expand %{ 8191 regD tmp; 8192 convD2L_helper(tmp, src); 8193 MoveD2L_reg_reg(dst, tmp); 8194 %} 8195 %} 8196 8197 8198 instruct convF2D_reg(regD dst, regF src) %{ 8199 match(Set dst (ConvF2D src)); 8200 format %{ "FSTOD $src,$dst" %} 8201 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf); 8202 ins_encode(form3_opf_rs2F_rdD(src, dst)); 8203 ins_pipe(fcvtF2D); 8204 %} 8205 8206 8207 // Convert a float to an int in a float register. 8208 // If the float is a NAN, stuff a zero in instead. 8209 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ 8210 effect(DEF dst, USE src, KILL fcc0); 8211 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8212 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8213 "FSTOI $src,$dst\t! convert in delay slot\n\t" 8214 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8215 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8216 "skip:" %} 8217 ins_encode(form_f2i_helper(src,dst)); 8218 ins_pipe(fcvtF2I); 8219 %} 8220 8221 instruct convF2I_stk(stackSlotI dst, regF src) %{ 8222 match(Set dst (ConvF2I src)); 8223 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8224 expand %{ 8225 regF tmp; 8226 convF2I_helper(tmp, src); 8227 regF_to_stkI(dst, tmp); 8228 %} 8229 %} 8230 8231 instruct convF2I_reg(iRegI dst, regF src) %{ 8232 predicate(UseVIS >= 3); 8233 match(Set dst (ConvF2I src)); 8234 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8235 expand %{ 8236 regF tmp; 8237 convF2I_helper(tmp, src); 8238 MoveF2I_reg_reg(dst, tmp); 8239 %} 8240 %} 8241 8242 8243 // Convert a float to a long in a float register. 8244 // If the float is a NAN, stuff a zero in instead. 8245 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ 8246 effect(DEF dst, USE src, KILL fcc0); 8247 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8248 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8249 "FSTOX $src,$dst\t! convert in delay slot\n\t" 8250 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8251 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8252 "skip:" %} 8253 ins_encode(form_f2l_helper(src,dst)); 8254 ins_pipe(fcvtF2L); 8255 %} 8256 8257 instruct convF2L_stk(stackSlotL dst, regF src) %{ 8258 match(Set dst (ConvF2L src)); 8259 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8260 expand %{ 8261 regD tmp; 8262 convF2L_helper(tmp, src); 8263 regD_to_stkL(dst, tmp); 8264 %} 8265 %} 8266 8267 instruct convF2L_reg(iRegL dst, regF src) %{ 8268 predicate(UseVIS >= 3); 8269 match(Set dst (ConvF2L src)); 8270 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8271 expand %{ 8272 regD tmp; 8273 convF2L_helper(tmp, src); 8274 MoveD2L_reg_reg(dst, tmp); 8275 %} 8276 %} 8277 8278 8279 instruct convI2D_helper(regD dst, regF tmp) %{ 8280 effect(USE tmp, DEF dst); 8281 format %{ "FITOD $tmp,$dst" %} 8282 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8283 ins_encode(form3_opf_rs2F_rdD(tmp, dst)); 8284 ins_pipe(fcvtI2D); 8285 %} 8286 8287 instruct convI2D_stk(stackSlotI src, regD dst) %{ 8288 match(Set dst (ConvI2D src)); 8289 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8290 expand %{ 8291 regF tmp; 8292 stkI_to_regF(tmp, src); 8293 convI2D_helper(dst, tmp); 8294 %} 8295 %} 8296 8297 instruct convI2D_reg(regD_low dst, iRegI src) %{ 8298 predicate(UseVIS >= 3); 8299 match(Set dst (ConvI2D src)); 8300 expand %{ 8301 regF tmp; 8302 MoveI2F_reg_reg(tmp, src); 8303 convI2D_helper(dst, tmp); 8304 %} 8305 %} 8306 8307 instruct convI2D_mem(regD_low dst, memory mem) %{ 8308 match(Set dst (ConvI2D (LoadI mem))); 8309 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8310 format %{ "LDF $mem,$dst\n\t" 8311 "FITOD $dst,$dst" %} 8312 opcode(Assembler::ldf_op3, Assembler::fitod_opf); 8313 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8314 ins_pipe(floadF_mem); 8315 %} 8316 8317 8318 instruct convI2F_helper(regF dst, regF tmp) %{ 8319 effect(DEF dst, USE tmp); 8320 format %{ "FITOS $tmp,$dst" %} 8321 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf); 8322 ins_encode(form3_opf_rs2F_rdF(tmp, dst)); 8323 ins_pipe(fcvtI2F); 8324 %} 8325 8326 instruct convI2F_stk(regF dst, stackSlotI src) %{ 8327 match(Set dst (ConvI2F src)); 8328 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8329 expand %{ 8330 regF tmp; 8331 stkI_to_regF(tmp,src); 8332 convI2F_helper(dst, tmp); 8333 %} 8334 %} 8335 8336 instruct convI2F_reg(regF dst, iRegI src) %{ 8337 predicate(UseVIS >= 3); 8338 match(Set dst (ConvI2F src)); 8339 ins_cost(DEFAULT_COST); 8340 expand %{ 8341 regF tmp; 8342 MoveI2F_reg_reg(tmp, src); 8343 convI2F_helper(dst, tmp); 8344 %} 8345 %} 8346 8347 instruct convI2F_mem( regF dst, memory mem ) %{ 8348 match(Set dst (ConvI2F (LoadI mem))); 8349 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8350 format %{ "LDF $mem,$dst\n\t" 8351 "FITOS $dst,$dst" %} 8352 opcode(Assembler::ldf_op3, Assembler::fitos_opf); 8353 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8354 ins_pipe(floadF_mem); 8355 %} 8356 8357 8358 instruct convI2L_reg(iRegL dst, iRegI src) %{ 8359 match(Set dst (ConvI2L src)); 8360 size(4); 8361 format %{ "SRA $src,0,$dst\t! int->long" %} 8362 opcode(Assembler::sra_op3, Assembler::arith_op); 8363 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8364 ins_pipe(ialu_reg_reg); 8365 %} 8366 8367 // Zero-extend convert int to long 8368 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ 8369 match(Set dst (AndL (ConvI2L src) mask) ); 8370 size(4); 8371 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} 8372 opcode(Assembler::srl_op3, Assembler::arith_op); 8373 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8374 ins_pipe(ialu_reg_reg); 8375 %} 8376 8377 // Zero-extend long 8378 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ 8379 match(Set dst (AndL src mask) ); 8380 size(4); 8381 format %{ "SRL $src,0,$dst\t! zero-extend long" %} 8382 opcode(Assembler::srl_op3, Assembler::arith_op); 8383 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8384 ins_pipe(ialu_reg_reg); 8385 %} 8386 8387 8388 //----------- 8389 // Long to Double conversion using V8 opcodes. 8390 // Still useful because cheetah traps and becomes 8391 // amazingly slow for some common numbers. 8392 8393 // Magic constant, 0x43300000 8394 instruct loadConI_x43300000(iRegI dst) %{ 8395 effect(DEF dst); 8396 size(4); 8397 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %} 8398 ins_encode(SetHi22(0x43300000, dst)); 8399 ins_pipe(ialu_none); 8400 %} 8401 8402 // Magic constant, 0x41f00000 8403 instruct loadConI_x41f00000(iRegI dst) %{ 8404 effect(DEF dst); 8405 size(4); 8406 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %} 8407 ins_encode(SetHi22(0x41f00000, dst)); 8408 ins_pipe(ialu_none); 8409 %} 8410 8411 // Construct a double from two float halves 8412 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{ 8413 effect(DEF dst, USE src1, USE src2); 8414 size(8); 8415 format %{ "FMOVS $src1.hi,$dst.hi\n\t" 8416 "FMOVS $src2.lo,$dst.lo" %} 8417 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); 8418 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); 8419 ins_pipe(faddD_reg_reg); 8420 %} 8421 8422 // Convert integer in high half of a double register (in the lower half of 8423 // the double register file) to double 8424 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{ 8425 effect(DEF dst, USE src); 8426 size(4); 8427 format %{ "FITOD $src,$dst" %} 8428 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8429 ins_encode(form3_opf_rs2D_rdD(src, dst)); 8430 ins_pipe(fcvtLHi2D); 8431 %} 8432 8433 // Add float double precision 8434 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{ 8435 effect(DEF dst, USE src1, USE src2); 8436 size(4); 8437 format %{ "FADDD $src1,$src2,$dst" %} 8438 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 8439 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8440 ins_pipe(faddD_reg_reg); 8441 %} 8442 8443 // Sub float double precision 8444 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{ 8445 effect(DEF dst, USE src1, USE src2); 8446 size(4); 8447 format %{ "FSUBD $src1,$src2,$dst" %} 8448 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 8449 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8450 ins_pipe(faddD_reg_reg); 8451 %} 8452 8453 // Mul float double precision 8454 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{ 8455 effect(DEF dst, USE src1, USE src2); 8456 size(4); 8457 format %{ "FMULD $src1,$src2,$dst" %} 8458 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 8459 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8460 ins_pipe(fmulD_reg_reg); 8461 %} 8462 8463 // Long to Double conversion using fast fxtof 8464 instruct convL2D_helper(regD dst, regD tmp) %{ 8465 effect(DEF dst, USE tmp); 8466 size(4); 8467 format %{ "FXTOD $tmp,$dst" %} 8468 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf); 8469 ins_encode(form3_opf_rs2D_rdD(tmp, dst)); 8470 ins_pipe(fcvtL2D); 8471 %} 8472 8473 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{ 8474 match(Set dst (ConvL2D src)); 8475 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); 8476 expand %{ 8477 regD tmp; 8478 stkL_to_regD(tmp, src); 8479 convL2D_helper(dst, tmp); 8480 %} 8481 %} 8482 8483 instruct convL2D_reg(regD dst, iRegL src) %{ 8484 predicate(UseVIS >= 3); 8485 match(Set dst (ConvL2D src)); 8486 expand %{ 8487 regD tmp; 8488 MoveL2D_reg_reg(tmp, src); 8489 convL2D_helper(dst, tmp); 8490 %} 8491 %} 8492 8493 // Long to Float conversion using fast fxtof 8494 instruct convL2F_helper(regF dst, regD tmp) %{ 8495 effect(DEF dst, USE tmp); 8496 size(4); 8497 format %{ "FXTOS $tmp,$dst" %} 8498 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf); 8499 ins_encode(form3_opf_rs2D_rdF(tmp, dst)); 8500 ins_pipe(fcvtL2F); 8501 %} 8502 8503 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{ 8504 match(Set dst (ConvL2F src)); 8505 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8506 expand %{ 8507 regD tmp; 8508 stkL_to_regD(tmp, src); 8509 convL2F_helper(dst, tmp); 8510 %} 8511 %} 8512 8513 instruct convL2F_reg(regF dst, iRegL src) %{ 8514 predicate(UseVIS >= 3); 8515 match(Set dst (ConvL2F src)); 8516 ins_cost(DEFAULT_COST); 8517 expand %{ 8518 regD tmp; 8519 MoveL2D_reg_reg(tmp, src); 8520 convL2F_helper(dst, tmp); 8521 %} 8522 %} 8523 8524 //----------- 8525 8526 instruct convL2I_reg(iRegI dst, iRegL src) %{ 8527 match(Set dst (ConvL2I src)); 8528 size(4); 8529 format %{ "SRA $src,R_G0,$dst\t! long->int" %} 8530 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); 8531 ins_pipe(ialu_reg); 8532 %} 8533 8534 // Register Shift Right Immediate 8535 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{ 8536 match(Set dst (ConvL2I (RShiftL src cnt))); 8537 8538 size(4); 8539 format %{ "SRAX $src,$cnt,$dst" %} 8540 opcode(Assembler::srax_op3, Assembler::arith_op); 8541 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) ); 8542 ins_pipe(ialu_reg_imm); 8543 %} 8544 8545 //----------Control Flow Instructions------------------------------------------ 8546 // Compare Instructions 8547 // Compare Integers 8548 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{ 8549 match(Set icc (CmpI op1 op2)); 8550 effect( DEF icc, USE op1, USE op2 ); 8551 8552 size(4); 8553 format %{ "CMP $op1,$op2" %} 8554 opcode(Assembler::subcc_op3, Assembler::arith_op); 8555 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8556 ins_pipe(ialu_cconly_reg_reg); 8557 %} 8558 8559 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{ 8560 match(Set icc (CmpU op1 op2)); 8561 8562 size(4); 8563 format %{ "CMP $op1,$op2\t! unsigned" %} 8564 opcode(Assembler::subcc_op3, Assembler::arith_op); 8565 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8566 ins_pipe(ialu_cconly_reg_reg); 8567 %} 8568 8569 instruct compUL_iReg(flagsRegUL xcc, iRegL op1, iRegL op2) %{ 8570 match(Set xcc (CmpUL op1 op2)); 8571 effect(DEF xcc, USE op1, USE op2); 8572 8573 size(4); 8574 format %{ "CMP $op1,$op2\t! unsigned long" %} 8575 opcode(Assembler::subcc_op3, Assembler::arith_op); 8576 ins_encode(form3_rs1_rs2_rd(op1, op2, R_G0)); 8577 ins_pipe(ialu_cconly_reg_reg); 8578 %} 8579 8580 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{ 8581 match(Set icc (CmpI op1 op2)); 8582 effect( DEF icc, USE op1 ); 8583 8584 size(4); 8585 format %{ "CMP $op1,$op2" %} 8586 opcode(Assembler::subcc_op3, Assembler::arith_op); 8587 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8588 ins_pipe(ialu_cconly_reg_imm); 8589 %} 8590 8591 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{ 8592 match(Set icc (CmpI (AndI op1 op2) zero)); 8593 8594 size(4); 8595 format %{ "BTST $op2,$op1" %} 8596 opcode(Assembler::andcc_op3, Assembler::arith_op); 8597 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8598 ins_pipe(ialu_cconly_reg_reg_zero); 8599 %} 8600 8601 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{ 8602 match(Set icc (CmpI (AndI op1 op2) zero)); 8603 8604 size(4); 8605 format %{ "BTST $op2,$op1" %} 8606 opcode(Assembler::andcc_op3, Assembler::arith_op); 8607 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8608 ins_pipe(ialu_cconly_reg_imm_zero); 8609 %} 8610 8611 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{ 8612 match(Set xcc (CmpL op1 op2)); 8613 effect( DEF xcc, USE op1, USE op2 ); 8614 8615 size(4); 8616 format %{ "CMP $op1,$op2\t\t! long" %} 8617 opcode(Assembler::subcc_op3, Assembler::arith_op); 8618 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8619 ins_pipe(ialu_cconly_reg_reg); 8620 %} 8621 8622 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{ 8623 match(Set xcc (CmpL op1 con)); 8624 effect( DEF xcc, USE op1, USE con ); 8625 8626 size(4); 8627 format %{ "CMP $op1,$con\t\t! long" %} 8628 opcode(Assembler::subcc_op3, Assembler::arith_op); 8629 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8630 ins_pipe(ialu_cconly_reg_reg); 8631 %} 8632 8633 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ 8634 match(Set xcc (CmpL (AndL op1 op2) zero)); 8635 effect( DEF xcc, USE op1, USE op2 ); 8636 8637 size(4); 8638 format %{ "BTST $op1,$op2\t\t! long" %} 8639 opcode(Assembler::andcc_op3, Assembler::arith_op); 8640 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8641 ins_pipe(ialu_cconly_reg_reg); 8642 %} 8643 8644 // useful for checking the alignment of a pointer: 8645 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{ 8646 match(Set xcc (CmpL (AndL op1 con) zero)); 8647 effect( DEF xcc, USE op1, USE con ); 8648 8649 size(4); 8650 format %{ "BTST $op1,$con\t\t! long" %} 8651 opcode(Assembler::andcc_op3, Assembler::arith_op); 8652 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8653 ins_pipe(ialu_cconly_reg_reg); 8654 %} 8655 8656 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU12 op2 ) %{ 8657 match(Set icc (CmpU op1 op2)); 8658 8659 size(4); 8660 format %{ "CMP $op1,$op2\t! unsigned" %} 8661 opcode(Assembler::subcc_op3, Assembler::arith_op); 8662 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8663 ins_pipe(ialu_cconly_reg_imm); 8664 %} 8665 8666 instruct compUL_iReg_imm13(flagsRegUL xcc, iRegL op1, immUL12 op2) %{ 8667 match(Set xcc (CmpUL op1 op2)); 8668 effect(DEF xcc, USE op1, USE op2); 8669 8670 size(4); 8671 format %{ "CMP $op1,$op2\t! unsigned long" %} 8672 opcode(Assembler::subcc_op3, Assembler::arith_op); 8673 ins_encode(form3_rs1_simm13_rd(op1, op2, R_G0)); 8674 ins_pipe(ialu_cconly_reg_imm); 8675 %} 8676 8677 // Compare Pointers 8678 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{ 8679 match(Set pcc (CmpP op1 op2)); 8680 8681 size(4); 8682 format %{ "CMP $op1,$op2\t! ptr" %} 8683 opcode(Assembler::subcc_op3, Assembler::arith_op); 8684 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8685 ins_pipe(ialu_cconly_reg_reg); 8686 %} 8687 8688 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{ 8689 match(Set pcc (CmpP op1 op2)); 8690 8691 size(4); 8692 format %{ "CMP $op1,$op2\t! ptr" %} 8693 opcode(Assembler::subcc_op3, Assembler::arith_op); 8694 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8695 ins_pipe(ialu_cconly_reg_imm); 8696 %} 8697 8698 // Compare Narrow oops 8699 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ 8700 match(Set icc (CmpN op1 op2)); 8701 8702 size(4); 8703 format %{ "CMP $op1,$op2\t! compressed ptr" %} 8704 opcode(Assembler::subcc_op3, Assembler::arith_op); 8705 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8706 ins_pipe(ialu_cconly_reg_reg); 8707 %} 8708 8709 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ 8710 match(Set icc (CmpN op1 op2)); 8711 8712 size(4); 8713 format %{ "CMP $op1,$op2\t! compressed ptr" %} 8714 opcode(Assembler::subcc_op3, Assembler::arith_op); 8715 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8716 ins_pipe(ialu_cconly_reg_imm); 8717 %} 8718 8719 //----------Max and Min-------------------------------------------------------- 8720 // Min Instructions 8721 // Conditional move for min 8722 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{ 8723 effect( USE_DEF op2, USE op1, USE icc ); 8724 8725 size(4); 8726 format %{ "MOVlt icc,$op1,$op2\t! min" %} 8727 opcode(Assembler::less); 8728 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 8729 ins_pipe(ialu_reg_flags); 8730 %} 8731 8732 // Min Register with Register. 8733 instruct minI_eReg(iRegI op1, iRegI op2) %{ 8734 match(Set op2 (MinI op1 op2)); 8735 ins_cost(DEFAULT_COST*2); 8736 expand %{ 8737 flagsReg icc; 8738 compI_iReg(icc,op1,op2); 8739 cmovI_reg_lt(op2,op1,icc); 8740 %} 8741 %} 8742 8743 // Max Instructions 8744 // Conditional move for max 8745 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{ 8746 effect( USE_DEF op2, USE op1, USE icc ); 8747 format %{ "MOVgt icc,$op1,$op2\t! max" %} 8748 opcode(Assembler::greater); 8749 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 8750 ins_pipe(ialu_reg_flags); 8751 %} 8752 8753 // Max Register with Register 8754 instruct maxI_eReg(iRegI op1, iRegI op2) %{ 8755 match(Set op2 (MaxI op1 op2)); 8756 ins_cost(DEFAULT_COST*2); 8757 expand %{ 8758 flagsReg icc; 8759 compI_iReg(icc,op1,op2); 8760 cmovI_reg_gt(op2,op1,icc); 8761 %} 8762 %} 8763 8764 8765 //----------Float Compares---------------------------------------------------- 8766 // Compare floating, generate condition code 8767 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{ 8768 match(Set fcc (CmpF src1 src2)); 8769 8770 size(4); 8771 format %{ "FCMPs $fcc,$src1,$src2" %} 8772 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); 8773 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); 8774 ins_pipe(faddF_fcc_reg_reg_zero); 8775 %} 8776 8777 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{ 8778 match(Set fcc (CmpD src1 src2)); 8779 8780 size(4); 8781 format %{ "FCMPd $fcc,$src1,$src2" %} 8782 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); 8783 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); 8784 ins_pipe(faddD_fcc_reg_reg_zero); 8785 %} 8786 8787 8788 // Compare floating, generate -1,0,1 8789 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{ 8790 match(Set dst (CmpF3 src1 src2)); 8791 effect(KILL fcc0); 8792 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 8793 format %{ "fcmpl $dst,$src1,$src2" %} 8794 // Primary = float 8795 opcode( true ); 8796 ins_encode( floating_cmp( dst, src1, src2 ) ); 8797 ins_pipe( floating_cmp ); 8798 %} 8799 8800 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{ 8801 match(Set dst (CmpD3 src1 src2)); 8802 effect(KILL fcc0); 8803 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 8804 format %{ "dcmpl $dst,$src1,$src2" %} 8805 // Primary = double (not float) 8806 opcode( false ); 8807 ins_encode( floating_cmp( dst, src1, src2 ) ); 8808 ins_pipe( floating_cmp ); 8809 %} 8810 8811 //----------Branches--------------------------------------------------------- 8812 // Jump 8813 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above) 8814 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{ 8815 match(Jump switch_val); 8816 effect(TEMP table); 8817 8818 ins_cost(350); 8819 8820 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 8821 "LD [O7 + $switch_val], O7\n\t" 8822 "JUMP O7" %} 8823 ins_encode %{ 8824 // Calculate table address into a register. 8825 Register table_reg; 8826 Register label_reg = O7; 8827 // If we are calculating the size of this instruction don't trust 8828 // zero offsets because they might change when 8829 // MachConstantBaseNode decides to optimize the constant table 8830 // base. 8831 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) { 8832 table_reg = $constanttablebase; 8833 } else { 8834 table_reg = O7; 8835 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 8836 __ add($constanttablebase, con_offset, table_reg); 8837 } 8838 8839 // Jump to base address + switch value 8840 __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 8841 __ jmp(label_reg, G0); 8842 __ delayed()->nop(); 8843 %} 8844 ins_pipe(ialu_reg_reg); 8845 %} 8846 8847 // Direct Branch. Use V8 version with longer range. 8848 instruct branch(label labl) %{ 8849 match(Goto); 8850 effect(USE labl); 8851 8852 size(8); 8853 ins_cost(BRANCH_COST); 8854 format %{ "BA $labl" %} 8855 ins_encode %{ 8856 Label* L = $labl$$label; 8857 __ ba(*L); 8858 __ delayed()->nop(); 8859 %} 8860 ins_avoid_back_to_back(AVOID_BEFORE); 8861 ins_pipe(br); 8862 %} 8863 8864 // Direct Branch, short with no delay slot 8865 instruct branch_short(label labl) %{ 8866 match(Goto); 8867 predicate(UseCBCond); 8868 effect(USE labl); 8869 8870 size(4); // Assuming no NOP inserted. 8871 ins_cost(BRANCH_COST); 8872 format %{ "BA $labl\t! short branch" %} 8873 ins_encode %{ 8874 Label* L = $labl$$label; 8875 assert(__ use_cbcond(*L), "back to back cbcond"); 8876 __ ba_short(*L); 8877 %} 8878 ins_short_branch(1); 8879 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 8880 ins_pipe(cbcond_reg_imm); 8881 %} 8882 8883 // Conditional Direct Branch 8884 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{ 8885 match(If cmp icc); 8886 effect(USE labl); 8887 8888 size(8); 8889 ins_cost(BRANCH_COST); 8890 format %{ "BP$cmp $icc,$labl" %} 8891 // Prim = bits 24-22, Secnd = bits 31-30 8892 ins_encode( enc_bp( labl, cmp, icc ) ); 8893 ins_avoid_back_to_back(AVOID_BEFORE); 8894 ins_pipe(br_cc); 8895 %} 8896 8897 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{ 8898 match(If cmp icc); 8899 effect(USE labl); 8900 8901 ins_cost(BRANCH_COST); 8902 format %{ "BP$cmp $icc,$labl" %} 8903 // Prim = bits 24-22, Secnd = bits 31-30 8904 ins_encode( enc_bp( labl, cmp, icc ) ); 8905 ins_avoid_back_to_back(AVOID_BEFORE); 8906 ins_pipe(br_cc); 8907 %} 8908 8909 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{ 8910 match(If cmp pcc); 8911 effect(USE labl); 8912 8913 size(8); 8914 ins_cost(BRANCH_COST); 8915 format %{ "BP$cmp $pcc,$labl" %} 8916 ins_encode %{ 8917 Label* L = $labl$$label; 8918 Assembler::Predict predict_taken = 8919 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 8920 8921 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 8922 __ delayed()->nop(); 8923 %} 8924 ins_avoid_back_to_back(AVOID_BEFORE); 8925 ins_pipe(br_cc); 8926 %} 8927 8928 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{ 8929 match(If cmp fcc); 8930 effect(USE labl); 8931 8932 size(8); 8933 ins_cost(BRANCH_COST); 8934 format %{ "FBP$cmp $fcc,$labl" %} 8935 ins_encode %{ 8936 Label* L = $labl$$label; 8937 Assembler::Predict predict_taken = 8938 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 8939 8940 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L); 8941 __ delayed()->nop(); 8942 %} 8943 ins_avoid_back_to_back(AVOID_BEFORE); 8944 ins_pipe(br_fcc); 8945 %} 8946 8947 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{ 8948 match(CountedLoopEnd cmp icc); 8949 effect(USE labl); 8950 8951 size(8); 8952 ins_cost(BRANCH_COST); 8953 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 8954 // Prim = bits 24-22, Secnd = bits 31-30 8955 ins_encode( enc_bp( labl, cmp, icc ) ); 8956 ins_avoid_back_to_back(AVOID_BEFORE); 8957 ins_pipe(br_cc); 8958 %} 8959 8960 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{ 8961 match(CountedLoopEnd cmp icc); 8962 effect(USE labl); 8963 8964 size(8); 8965 ins_cost(BRANCH_COST); 8966 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 8967 // Prim = bits 24-22, Secnd = bits 31-30 8968 ins_encode( enc_bp( labl, cmp, icc ) ); 8969 ins_avoid_back_to_back(AVOID_BEFORE); 8970 ins_pipe(br_cc); 8971 %} 8972 8973 // Compare and branch instructions 8974 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 8975 match(If cmp (CmpI op1 op2)); 8976 effect(USE labl, KILL icc); 8977 8978 size(12); 8979 ins_cost(BRANCH_COST); 8980 format %{ "CMP $op1,$op2\t! int\n\t" 8981 "BP$cmp $labl" %} 8982 ins_encode %{ 8983 Label* L = $labl$$label; 8984 Assembler::Predict predict_taken = 8985 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 8986 __ cmp($op1$$Register, $op2$$Register); 8987 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 8988 __ delayed()->nop(); 8989 %} 8990 ins_pipe(cmp_br_reg_reg); 8991 %} 8992 8993 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 8994 match(If cmp (CmpI op1 op2)); 8995 effect(USE labl, KILL icc); 8996 8997 size(12); 8998 ins_cost(BRANCH_COST); 8999 format %{ "CMP $op1,$op2\t! int\n\t" 9000 "BP$cmp $labl" %} 9001 ins_encode %{ 9002 Label* L = $labl$$label; 9003 Assembler::Predict predict_taken = 9004 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9005 __ cmp($op1$$Register, $op2$$constant); 9006 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9007 __ delayed()->nop(); 9008 %} 9009 ins_pipe(cmp_br_reg_imm); 9010 %} 9011 9012 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9013 match(If cmp (CmpU op1 op2)); 9014 effect(USE labl, KILL icc); 9015 9016 size(12); 9017 ins_cost(BRANCH_COST); 9018 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9019 "BP$cmp $labl" %} 9020 ins_encode %{ 9021 Label* L = $labl$$label; 9022 Assembler::Predict predict_taken = 9023 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9024 __ cmp($op1$$Register, $op2$$Register); 9025 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9026 __ delayed()->nop(); 9027 %} 9028 ins_pipe(cmp_br_reg_reg); 9029 %} 9030 9031 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9032 match(If cmp (CmpU op1 op2)); 9033 effect(USE labl, KILL icc); 9034 9035 size(12); 9036 ins_cost(BRANCH_COST); 9037 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9038 "BP$cmp $labl" %} 9039 ins_encode %{ 9040 Label* L = $labl$$label; 9041 Assembler::Predict predict_taken = 9042 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9043 __ cmp($op1$$Register, $op2$$constant); 9044 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9045 __ delayed()->nop(); 9046 %} 9047 ins_pipe(cmp_br_reg_imm); 9048 %} 9049 9050 instruct cmpUL_reg_branch(cmpOpU cmp, iRegL op1, iRegL op2, label labl, flagsRegUL xcc) %{ 9051 match(If cmp (CmpUL op1 op2)); 9052 effect(USE labl, KILL xcc); 9053 9054 size(12); 9055 ins_cost(BRANCH_COST); 9056 format %{ "CMP $op1,$op2\t! unsigned long\n\t" 9057 "BP$cmp $labl" %} 9058 ins_encode %{ 9059 Label* L = $labl$$label; 9060 Assembler::Predict predict_taken = 9061 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9062 __ cmp($op1$$Register, $op2$$Register); 9063 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9064 __ delayed()->nop(); 9065 %} 9066 ins_pipe(cmp_br_reg_reg); 9067 %} 9068 9069 instruct cmpUL_imm_branch(cmpOpU cmp, iRegL op1, immL5 op2, label labl, flagsRegUL xcc) %{ 9070 match(If cmp (CmpUL op1 op2)); 9071 effect(USE labl, KILL xcc); 9072 9073 size(12); 9074 ins_cost(BRANCH_COST); 9075 format %{ "CMP $op1,$op2\t! unsigned long\n\t" 9076 "BP$cmp $labl" %} 9077 ins_encode %{ 9078 Label* L = $labl$$label; 9079 Assembler::Predict predict_taken = 9080 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9081 __ cmp($op1$$Register, $op2$$constant); 9082 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9083 __ delayed()->nop(); 9084 %} 9085 ins_pipe(cmp_br_reg_imm); 9086 %} 9087 9088 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9089 match(If cmp (CmpL op1 op2)); 9090 effect(USE labl, KILL xcc); 9091 9092 size(12); 9093 ins_cost(BRANCH_COST); 9094 format %{ "CMP $op1,$op2\t! long\n\t" 9095 "BP$cmp $labl" %} 9096 ins_encode %{ 9097 Label* L = $labl$$label; 9098 Assembler::Predict predict_taken = 9099 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9100 __ cmp($op1$$Register, $op2$$Register); 9101 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9102 __ delayed()->nop(); 9103 %} 9104 ins_pipe(cmp_br_reg_reg); 9105 %} 9106 9107 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9108 match(If cmp (CmpL op1 op2)); 9109 effect(USE labl, KILL xcc); 9110 9111 size(12); 9112 ins_cost(BRANCH_COST); 9113 format %{ "CMP $op1,$op2\t! long\n\t" 9114 "BP$cmp $labl" %} 9115 ins_encode %{ 9116 Label* L = $labl$$label; 9117 Assembler::Predict predict_taken = 9118 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9119 __ cmp($op1$$Register, $op2$$constant); 9120 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9121 __ delayed()->nop(); 9122 %} 9123 ins_pipe(cmp_br_reg_imm); 9124 %} 9125 9126 // Compare Pointers and branch 9127 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9128 match(If cmp (CmpP op1 op2)); 9129 effect(USE labl, KILL pcc); 9130 9131 size(12); 9132 ins_cost(BRANCH_COST); 9133 format %{ "CMP $op1,$op2\t! ptr\n\t" 9134 "B$cmp $labl" %} 9135 ins_encode %{ 9136 Label* L = $labl$$label; 9137 Assembler::Predict predict_taken = 9138 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9139 __ cmp($op1$$Register, $op2$$Register); 9140 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9141 __ delayed()->nop(); 9142 %} 9143 ins_pipe(cmp_br_reg_reg); 9144 %} 9145 9146 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9147 match(If cmp (CmpP op1 null)); 9148 effect(USE labl, KILL pcc); 9149 9150 size(12); 9151 ins_cost(BRANCH_COST); 9152 format %{ "CMP $op1,0\t! ptr\n\t" 9153 "B$cmp $labl" %} 9154 ins_encode %{ 9155 Label* L = $labl$$label; 9156 Assembler::Predict predict_taken = 9157 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9158 __ cmp($op1$$Register, G0); 9159 // bpr() is not used here since it has shorter distance. 9160 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9161 __ delayed()->nop(); 9162 %} 9163 ins_pipe(cmp_br_reg_reg); 9164 %} 9165 9166 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9167 match(If cmp (CmpN op1 op2)); 9168 effect(USE labl, KILL icc); 9169 9170 size(12); 9171 ins_cost(BRANCH_COST); 9172 format %{ "CMP $op1,$op2\t! compressed ptr\n\t" 9173 "BP$cmp $labl" %} 9174 ins_encode %{ 9175 Label* L = $labl$$label; 9176 Assembler::Predict predict_taken = 9177 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9178 __ cmp($op1$$Register, $op2$$Register); 9179 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9180 __ delayed()->nop(); 9181 %} 9182 ins_pipe(cmp_br_reg_reg); 9183 %} 9184 9185 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9186 match(If cmp (CmpN op1 null)); 9187 effect(USE labl, KILL icc); 9188 9189 size(12); 9190 ins_cost(BRANCH_COST); 9191 format %{ "CMP $op1,0\t! compressed ptr\n\t" 9192 "BP$cmp $labl" %} 9193 ins_encode %{ 9194 Label* L = $labl$$label; 9195 Assembler::Predict predict_taken = 9196 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9197 __ cmp($op1$$Register, G0); 9198 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9199 __ delayed()->nop(); 9200 %} 9201 ins_pipe(cmp_br_reg_reg); 9202 %} 9203 9204 // Loop back branch 9205 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9206 match(CountedLoopEnd cmp (CmpI op1 op2)); 9207 effect(USE labl, KILL icc); 9208 9209 size(12); 9210 ins_cost(BRANCH_COST); 9211 format %{ "CMP $op1,$op2\t! int\n\t" 9212 "BP$cmp $labl\t! Loop end" %} 9213 ins_encode %{ 9214 Label* L = $labl$$label; 9215 Assembler::Predict predict_taken = 9216 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9217 __ cmp($op1$$Register, $op2$$Register); 9218 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9219 __ delayed()->nop(); 9220 %} 9221 ins_pipe(cmp_br_reg_reg); 9222 %} 9223 9224 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9225 match(CountedLoopEnd cmp (CmpI op1 op2)); 9226 effect(USE labl, KILL icc); 9227 9228 size(12); 9229 ins_cost(BRANCH_COST); 9230 format %{ "CMP $op1,$op2\t! int\n\t" 9231 "BP$cmp $labl\t! Loop end" %} 9232 ins_encode %{ 9233 Label* L = $labl$$label; 9234 Assembler::Predict predict_taken = 9235 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9236 __ cmp($op1$$Register, $op2$$constant); 9237 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9238 __ delayed()->nop(); 9239 %} 9240 ins_pipe(cmp_br_reg_imm); 9241 %} 9242 9243 // Short compare and branch instructions 9244 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9245 match(If cmp (CmpI op1 op2)); 9246 predicate(UseCBCond); 9247 effect(USE labl, KILL icc); 9248 9249 size(4); // Assuming no NOP inserted. 9250 ins_cost(BRANCH_COST); 9251 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9252 ins_encode %{ 9253 Label* L = $labl$$label; 9254 assert(__ use_cbcond(*L), "back to back cbcond"); 9255 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9256 %} 9257 ins_short_branch(1); 9258 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9259 ins_pipe(cbcond_reg_reg); 9260 %} 9261 9262 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9263 match(If cmp (CmpI op1 op2)); 9264 predicate(UseCBCond); 9265 effect(USE labl, KILL icc); 9266 9267 size(4); // Assuming no NOP inserted. 9268 ins_cost(BRANCH_COST); 9269 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9270 ins_encode %{ 9271 Label* L = $labl$$label; 9272 assert(__ use_cbcond(*L), "back to back cbcond"); 9273 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9274 %} 9275 ins_short_branch(1); 9276 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9277 ins_pipe(cbcond_reg_imm); 9278 %} 9279 9280 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9281 match(If cmp (CmpU op1 op2)); 9282 predicate(UseCBCond); 9283 effect(USE labl, KILL icc); 9284 9285 size(4); // Assuming no NOP inserted. 9286 ins_cost(BRANCH_COST); 9287 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9288 ins_encode %{ 9289 Label* L = $labl$$label; 9290 assert(__ use_cbcond(*L), "back to back cbcond"); 9291 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9292 %} 9293 ins_short_branch(1); 9294 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9295 ins_pipe(cbcond_reg_reg); 9296 %} 9297 9298 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9299 match(If cmp (CmpU op1 op2)); 9300 predicate(UseCBCond); 9301 effect(USE labl, KILL icc); 9302 9303 size(4); // Assuming no NOP inserted. 9304 ins_cost(BRANCH_COST); 9305 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9306 ins_encode %{ 9307 Label* L = $labl$$label; 9308 assert(__ use_cbcond(*L), "back to back cbcond"); 9309 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9310 %} 9311 ins_short_branch(1); 9312 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9313 ins_pipe(cbcond_reg_imm); 9314 %} 9315 9316 instruct cmpUL_reg_branch_short(cmpOpU cmp, iRegL op1, iRegL op2, label labl, flagsRegUL xcc) %{ 9317 match(If cmp (CmpUL op1 op2)); 9318 predicate(UseCBCond); 9319 effect(USE labl, KILL xcc); 9320 9321 size(4); 9322 ins_cost(BRANCH_COST); 9323 format %{ "CXB$cmp $op1,$op2,$labl\t! unsigned long" %} 9324 ins_encode %{ 9325 Label* L = $labl$$label; 9326 assert(__ use_cbcond(*L), "back to back cbcond"); 9327 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9328 %} 9329 ins_short_branch(1); 9330 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9331 ins_pipe(cbcond_reg_reg); 9332 %} 9333 9334 instruct cmpUL_imm_branch_short(cmpOpU cmp, iRegL op1, immL5 op2, label labl, flagsRegUL xcc) %{ 9335 match(If cmp (CmpUL op1 op2)); 9336 predicate(UseCBCond); 9337 effect(USE labl, KILL xcc); 9338 9339 size(4); 9340 ins_cost(BRANCH_COST); 9341 format %{ "CXB$cmp $op1,$op2,$labl\t! unsigned long" %} 9342 ins_encode %{ 9343 Label* L = $labl$$label; 9344 assert(__ use_cbcond(*L), "back to back cbcond"); 9345 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9346 %} 9347 ins_short_branch(1); 9348 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9349 ins_pipe(cbcond_reg_imm); 9350 %} 9351 9352 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9353 match(If cmp (CmpL op1 op2)); 9354 predicate(UseCBCond); 9355 effect(USE labl, KILL xcc); 9356 9357 size(4); // Assuming no NOP inserted. 9358 ins_cost(BRANCH_COST); 9359 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9360 ins_encode %{ 9361 Label* L = $labl$$label; 9362 assert(__ use_cbcond(*L), "back to back cbcond"); 9363 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9364 %} 9365 ins_short_branch(1); 9366 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9367 ins_pipe(cbcond_reg_reg); 9368 %} 9369 9370 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9371 match(If cmp (CmpL op1 op2)); 9372 predicate(UseCBCond); 9373 effect(USE labl, KILL xcc); 9374 9375 size(4); // Assuming no NOP inserted. 9376 ins_cost(BRANCH_COST); 9377 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9378 ins_encode %{ 9379 Label* L = $labl$$label; 9380 assert(__ use_cbcond(*L), "back to back cbcond"); 9381 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9382 %} 9383 ins_short_branch(1); 9384 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9385 ins_pipe(cbcond_reg_imm); 9386 %} 9387 9388 // Compare Pointers and branch 9389 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9390 match(If cmp (CmpP op1 op2)); 9391 predicate(UseCBCond); 9392 effect(USE labl, KILL pcc); 9393 9394 size(4); // Assuming no NOP inserted. 9395 ins_cost(BRANCH_COST); 9396 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} 9397 ins_encode %{ 9398 Label* L = $labl$$label; 9399 assert(__ use_cbcond(*L), "back to back cbcond"); 9400 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L); 9401 %} 9402 ins_short_branch(1); 9403 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9404 ins_pipe(cbcond_reg_reg); 9405 %} 9406 9407 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9408 match(If cmp (CmpP op1 null)); 9409 predicate(UseCBCond); 9410 effect(USE labl, KILL pcc); 9411 9412 size(4); // Assuming no NOP inserted. 9413 ins_cost(BRANCH_COST); 9414 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} 9415 ins_encode %{ 9416 Label* L = $labl$$label; 9417 assert(__ use_cbcond(*L), "back to back cbcond"); 9418 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L); 9419 %} 9420 ins_short_branch(1); 9421 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9422 ins_pipe(cbcond_reg_reg); 9423 %} 9424 9425 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9426 match(If cmp (CmpN op1 op2)); 9427 predicate(UseCBCond); 9428 effect(USE labl, KILL icc); 9429 9430 size(4); // Assuming no NOP inserted. 9431 ins_cost(BRANCH_COST); 9432 format %{ "CWB$cmp $op1,$op2,$labl\t! compressed ptr" %} 9433 ins_encode %{ 9434 Label* L = $labl$$label; 9435 assert(__ use_cbcond(*L), "back to back cbcond"); 9436 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9437 %} 9438 ins_short_branch(1); 9439 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9440 ins_pipe(cbcond_reg_reg); 9441 %} 9442 9443 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9444 match(If cmp (CmpN op1 null)); 9445 predicate(UseCBCond); 9446 effect(USE labl, KILL icc); 9447 9448 size(4); // Assuming no NOP inserted. 9449 ins_cost(BRANCH_COST); 9450 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %} 9451 ins_encode %{ 9452 Label* L = $labl$$label; 9453 assert(__ use_cbcond(*L), "back to back cbcond"); 9454 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L); 9455 %} 9456 ins_short_branch(1); 9457 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9458 ins_pipe(cbcond_reg_reg); 9459 %} 9460 9461 // Loop back branch 9462 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9463 match(CountedLoopEnd cmp (CmpI op1 op2)); 9464 predicate(UseCBCond); 9465 effect(USE labl, KILL icc); 9466 9467 size(4); // Assuming no NOP inserted. 9468 ins_cost(BRANCH_COST); 9469 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9470 ins_encode %{ 9471 Label* L = $labl$$label; 9472 assert(__ use_cbcond(*L), "back to back cbcond"); 9473 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9474 %} 9475 ins_short_branch(1); 9476 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9477 ins_pipe(cbcond_reg_reg); 9478 %} 9479 9480 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9481 match(CountedLoopEnd cmp (CmpI op1 op2)); 9482 predicate(UseCBCond); 9483 effect(USE labl, KILL icc); 9484 9485 size(4); // Assuming no NOP inserted. 9486 ins_cost(BRANCH_COST); 9487 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9488 ins_encode %{ 9489 Label* L = $labl$$label; 9490 assert(__ use_cbcond(*L), "back to back cbcond"); 9491 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9492 %} 9493 ins_short_branch(1); 9494 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9495 ins_pipe(cbcond_reg_imm); 9496 %} 9497 9498 // Branch-on-register tests all 64 bits. We assume that values 9499 // in 64-bit registers always remains zero or sign extended 9500 // unless our code munges the high bits. Interrupts can chop 9501 // the high order bits to zero or sign at any time. 9502 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ 9503 match(If cmp (CmpI op1 zero)); 9504 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9505 effect(USE labl); 9506 9507 size(8); 9508 ins_cost(BRANCH_COST); 9509 format %{ "BR$cmp $op1,$labl" %} 9510 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9511 ins_avoid_back_to_back(AVOID_BEFORE); 9512 ins_pipe(br_reg); 9513 %} 9514 9515 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{ 9516 match(If cmp (CmpP op1 null)); 9517 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9518 effect(USE labl); 9519 9520 size(8); 9521 ins_cost(BRANCH_COST); 9522 format %{ "BR$cmp $op1,$labl" %} 9523 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9524 ins_avoid_back_to_back(AVOID_BEFORE); 9525 ins_pipe(br_reg); 9526 %} 9527 9528 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{ 9529 match(If cmp (CmpL op1 zero)); 9530 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9531 effect(USE labl); 9532 9533 size(8); 9534 ins_cost(BRANCH_COST); 9535 format %{ "BR$cmp $op1,$labl" %} 9536 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9537 ins_avoid_back_to_back(AVOID_BEFORE); 9538 ins_pipe(br_reg); 9539 %} 9540 9541 9542 // ============================================================================ 9543 // Long Compare 9544 // 9545 // Currently we hold longs in 2 registers. Comparing such values efficiently 9546 // is tricky. The flavor of compare used depends on whether we are testing 9547 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit. 9548 // The GE test is the negated LT test. The LE test can be had by commuting 9549 // the operands (yielding a GE test) and then negating; negate again for the 9550 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the 9551 // NE test is negated from that. 9552 9553 // Due to a shortcoming in the ADLC, it mixes up expressions like: 9554 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the 9555 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections 9556 // are collapsed internally in the ADLC's dfa-gen code. The match for 9557 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the 9558 // foo match ends up with the wrong leaf. One fix is to not match both 9559 // reg-reg and reg-zero forms of long-compare. This is unfortunate because 9560 // both forms beat the trinary form of long-compare and both are very useful 9561 // on Intel which has so few registers. 9562 9563 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ 9564 match(If cmp xcc); 9565 effect(USE labl); 9566 9567 size(8); 9568 ins_cost(BRANCH_COST); 9569 format %{ "BP$cmp $xcc,$labl" %} 9570 ins_encode %{ 9571 Label* L = $labl$$label; 9572 Assembler::Predict predict_taken = 9573 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9574 9575 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9576 __ delayed()->nop(); 9577 %} 9578 ins_avoid_back_to_back(AVOID_BEFORE); 9579 ins_pipe(br_cc); 9580 %} 9581 9582 instruct branchConU_long(cmpOpU cmp, flagsRegUL xcc, label labl) %{ 9583 match(If cmp xcc); 9584 effect(USE labl); 9585 9586 size(8); 9587 ins_cost(BRANCH_COST); 9588 format %{ "BP$cmp $xcc,$labl" %} 9589 ins_encode %{ 9590 Label* L = $labl$$label; 9591 Assembler::Predict predict_taken = 9592 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9593 9594 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9595 __ delayed()->nop(); 9596 %} 9597 ins_avoid_back_to_back(AVOID_BEFORE); 9598 ins_pipe(br_cc); 9599 %} 9600 9601 // Manifest a CmpL3 result in an integer register. Very painful. 9602 // This is the test to avoid. 9603 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{ 9604 match(Set dst (CmpL3 src1 src2) ); 9605 effect( KILL ccr ); 9606 ins_cost(6*DEFAULT_COST); 9607 size(24); 9608 format %{ "CMP $src1,$src2\t\t! long\n" 9609 "\tBLT,a,pn done\n" 9610 "\tMOV -1,$dst\t! delay slot\n" 9611 "\tBGT,a,pn done\n" 9612 "\tMOV 1,$dst\t! delay slot\n" 9613 "\tCLR $dst\n" 9614 "done:" %} 9615 ins_encode( cmpl_flag(src1,src2,dst) ); 9616 ins_pipe(cmpL_reg); 9617 %} 9618 9619 // Conditional move 9620 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{ 9621 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9622 ins_cost(150); 9623 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9624 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9625 ins_pipe(ialu_reg); 9626 %} 9627 9628 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{ 9629 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9630 ins_cost(140); 9631 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9632 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9633 ins_pipe(ialu_imm); 9634 %} 9635 9636 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{ 9637 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9638 ins_cost(150); 9639 format %{ "MOV$cmp $xcc,$src,$dst" %} 9640 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9641 ins_pipe(ialu_reg); 9642 %} 9643 9644 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{ 9645 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9646 ins_cost(140); 9647 format %{ "MOV$cmp $xcc,$src,$dst" %} 9648 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9649 ins_pipe(ialu_imm); 9650 %} 9651 9652 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ 9653 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); 9654 ins_cost(150); 9655 format %{ "MOV$cmp $xcc,$src,$dst" %} 9656 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9657 ins_pipe(ialu_reg); 9658 %} 9659 9660 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ 9661 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9662 ins_cost(150); 9663 format %{ "MOV$cmp $xcc,$src,$dst" %} 9664 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9665 ins_pipe(ialu_reg); 9666 %} 9667 9668 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{ 9669 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9670 ins_cost(140); 9671 format %{ "MOV$cmp $xcc,$src,$dst" %} 9672 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9673 ins_pipe(ialu_imm); 9674 %} 9675 9676 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{ 9677 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src))); 9678 ins_cost(150); 9679 opcode(0x101); 9680 format %{ "FMOVS$cmp $xcc,$src,$dst" %} 9681 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9682 ins_pipe(int_conditional_float_move); 9683 %} 9684 9685 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{ 9686 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src))); 9687 ins_cost(150); 9688 opcode(0x102); 9689 format %{ "FMOVD$cmp $xcc,$src,$dst" %} 9690 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9691 ins_pipe(int_conditional_float_move); 9692 %} 9693 9694 // ============================================================================ 9695 // Safepoint Instruction 9696 instruct safePoint_poll(iRegP poll) %{ 9697 match(SafePoint poll); 9698 effect(USE poll); 9699 9700 size(4); 9701 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} 9702 ins_encode %{ 9703 __ relocate(relocInfo::poll_type); 9704 __ ld_ptr($poll$$Register, 0, G0); 9705 %} 9706 ins_pipe(loadPollP); 9707 %} 9708 9709 // ============================================================================ 9710 // Call Instructions 9711 // Call Java Static Instruction 9712 instruct CallStaticJavaDirect( method meth ) %{ 9713 match(CallStaticJava); 9714 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9715 effect(USE meth); 9716 9717 size(8); 9718 ins_cost(CALL_COST); 9719 format %{ "CALL,static ; NOP ==> " %} 9720 ins_encode( Java_Static_Call( meth ), call_epilog ); 9721 ins_avoid_back_to_back(AVOID_BEFORE); 9722 ins_pipe(simple_call); 9723 %} 9724 9725 // Call Java Static Instruction (method handle version) 9726 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ 9727 match(CallStaticJava); 9728 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9729 effect(USE meth, KILL l7_mh_SP_save); 9730 9731 size(16); 9732 ins_cost(CALL_COST); 9733 format %{ "CALL,static/MethodHandle" %} 9734 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); 9735 ins_pipe(simple_call); 9736 %} 9737 9738 // Call Java Dynamic Instruction 9739 instruct CallDynamicJavaDirect( method meth ) %{ 9740 match(CallDynamicJava); 9741 effect(USE meth); 9742 9743 ins_cost(CALL_COST); 9744 format %{ "SET (empty),R_G5\n\t" 9745 "CALL,dynamic ; NOP ==> " %} 9746 ins_encode( Java_Dynamic_Call( meth ), call_epilog ); 9747 ins_pipe(call); 9748 %} 9749 9750 // Call Runtime Instruction 9751 instruct CallRuntimeDirect(method meth, l7RegP l7) %{ 9752 match(CallRuntime); 9753 effect(USE meth, KILL l7); 9754 ins_cost(CALL_COST); 9755 format %{ "CALL,runtime" %} 9756 ins_encode( Java_To_Runtime( meth ), 9757 call_epilog, adjust_long_from_native_call ); 9758 ins_avoid_back_to_back(AVOID_BEFORE); 9759 ins_pipe(simple_call); 9760 %} 9761 9762 // Call runtime without safepoint - same as CallRuntime 9763 instruct CallLeafDirect(method meth, l7RegP l7) %{ 9764 match(CallLeaf); 9765 effect(USE meth, KILL l7); 9766 ins_cost(CALL_COST); 9767 format %{ "CALL,runtime leaf" %} 9768 ins_encode( Java_To_Runtime( meth ), 9769 call_epilog, 9770 adjust_long_from_native_call ); 9771 ins_avoid_back_to_back(AVOID_BEFORE); 9772 ins_pipe(simple_call); 9773 %} 9774 9775 // Call runtime without safepoint - same as CallLeaf 9776 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{ 9777 match(CallLeafNoFP); 9778 effect(USE meth, KILL l7); 9779 ins_cost(CALL_COST); 9780 format %{ "CALL,runtime leaf nofp" %} 9781 ins_encode( Java_To_Runtime( meth ), 9782 call_epilog, 9783 adjust_long_from_native_call ); 9784 ins_avoid_back_to_back(AVOID_BEFORE); 9785 ins_pipe(simple_call); 9786 %} 9787 9788 // Tail Call; Jump from runtime stub to Java code. 9789 // Also known as an 'interprocedural jump'. 9790 // Target of jump will eventually return to caller. 9791 // TailJump below removes the return address. 9792 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{ 9793 match(TailCall jump_target method_oop ); 9794 9795 ins_cost(CALL_COST); 9796 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %} 9797 ins_encode(form_jmpl(jump_target)); 9798 ins_avoid_back_to_back(AVOID_BEFORE); 9799 ins_pipe(tail_call); 9800 %} 9801 9802 9803 // Return Instruction 9804 instruct Ret() %{ 9805 match(Return); 9806 9807 // The epilogue node did the ret already. 9808 size(0); 9809 format %{ "! return" %} 9810 ins_encode(); 9811 ins_pipe(empty); 9812 %} 9813 9814 9815 // Tail Jump; remove the return address; jump to target. 9816 // TailCall above leaves the return address around. 9817 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 9818 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 9819 // "restore" before this instruction (in Epilogue), we need to materialize it 9820 // in %i0. 9821 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{ 9822 match( TailJump jump_target ex_oop ); 9823 ins_cost(CALL_COST); 9824 format %{ "! discard R_O7\n\t" 9825 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} 9826 ins_encode(form_jmpl_set_exception_pc(jump_target)); 9827 // opcode(Assembler::jmpl_op3, Assembler::arith_op); 9828 // The hack duplicates the exception oop into G3, so that CreateEx can use it there. 9829 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); 9830 ins_avoid_back_to_back(AVOID_BEFORE); 9831 ins_pipe(tail_call); 9832 %} 9833 9834 // Create exception oop: created by stack-crawling runtime code. 9835 // Created exception is now available to this handler, and is setup 9836 // just prior to jumping to this handler. No code emitted. 9837 instruct CreateException( o0RegP ex_oop ) 9838 %{ 9839 match(Set ex_oop (CreateEx)); 9840 ins_cost(0); 9841 9842 size(0); 9843 // use the following format syntax 9844 format %{ "! exception oop is in R_O0; no code emitted" %} 9845 ins_encode(); 9846 ins_pipe(empty); 9847 %} 9848 9849 9850 // Rethrow exception: 9851 // The exception oop will come in the first argument position. 9852 // Then JUMP (not call) to the rethrow stub code. 9853 instruct RethrowException() 9854 %{ 9855 match(Rethrow); 9856 ins_cost(CALL_COST); 9857 9858 // use the following format syntax 9859 format %{ "Jmp rethrow_stub" %} 9860 ins_encode(enc_rethrow); 9861 ins_avoid_back_to_back(AVOID_BEFORE); 9862 ins_pipe(tail_call); 9863 %} 9864 9865 9866 // Die now 9867 instruct ShouldNotReachHere( ) 9868 %{ 9869 match(Halt); 9870 ins_cost(CALL_COST); 9871 9872 size(4); 9873 // Use the following format syntax 9874 format %{ "ILLTRAP ; ShouldNotReachHere" %} 9875 ins_encode( form2_illtrap() ); 9876 ins_pipe(tail_call); 9877 %} 9878 9879 // ============================================================================ 9880 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 9881 // array for an instance of the superklass. Set a hidden internal cache on a 9882 // hit (cache is checked with exposed code in gen_subtype_check()). Return 9883 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 9884 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{ 9885 match(Set index (PartialSubtypeCheck sub super)); 9886 effect( KILL pcc, KILL o7 ); 9887 ins_cost(DEFAULT_COST*10); 9888 format %{ "CALL PartialSubtypeCheck\n\tNOP" %} 9889 ins_encode( enc_PartialSubtypeCheck() ); 9890 ins_avoid_back_to_back(AVOID_BEFORE); 9891 ins_pipe(partial_subtype_check_pipe); 9892 %} 9893 9894 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ 9895 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); 9896 effect( KILL idx, KILL o7 ); 9897 ins_cost(DEFAULT_COST*10); 9898 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %} 9899 ins_encode( enc_PartialSubtypeCheck() ); 9900 ins_avoid_back_to_back(AVOID_BEFORE); 9901 ins_pipe(partial_subtype_check_pipe); 9902 %} 9903 9904 9905 // ============================================================================ 9906 // inlined locking and unlocking 9907 9908 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 9909 match(Set pcc (FastLock object box)); 9910 9911 effect(TEMP scratch2, USE_KILL box, KILL scratch); 9912 ins_cost(100); 9913 9914 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 9915 ins_encode( Fast_Lock(object, box, scratch, scratch2) ); 9916 ins_pipe(long_memory_op); 9917 %} 9918 9919 9920 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 9921 match(Set pcc (FastUnlock object box)); 9922 effect(TEMP scratch2, USE_KILL box, KILL scratch); 9923 ins_cost(100); 9924 9925 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 9926 ins_encode( Fast_Unlock(object, box, scratch, scratch2) ); 9927 ins_pipe(long_memory_op); 9928 %} 9929 9930 // The encodings are generic. 9931 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ 9932 predicate(!use_block_zeroing(n->in(2)) ); 9933 match(Set dummy (ClearArray cnt base)); 9934 effect(TEMP temp, KILL ccr); 9935 ins_cost(300); 9936 format %{ "MOV $cnt,$temp\n" 9937 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" 9938 " BRge loop\t\t! Clearing loop\n" 9939 " STX G0,[$base+$temp]\t! delay slot" %} 9940 9941 ins_encode %{ 9942 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 9943 Register nof_bytes_arg = $cnt$$Register; 9944 Register nof_bytes_tmp = $temp$$Register; 9945 Register base_pointer_arg = $base$$Register; 9946 9947 Label loop; 9948 __ mov(nof_bytes_arg, nof_bytes_tmp); 9949 9950 // Loop and clear, walking backwards through the array. 9951 // nof_bytes_tmp (if >0) is always the number of bytes to zero 9952 __ bind(loop); 9953 __ deccc(nof_bytes_tmp, 8); 9954 __ br(Assembler::greaterEqual, true, Assembler::pt, loop); 9955 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp); 9956 // %%%% this mini-loop must not cross a cache boundary! 9957 %} 9958 ins_pipe(long_memory_op); 9959 %} 9960 9961 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{ 9962 predicate(use_block_zeroing(n->in(2))); 9963 match(Set dummy (ClearArray cnt base)); 9964 effect(USE_KILL cnt, USE_KILL base, KILL ccr); 9965 ins_cost(300); 9966 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 9967 9968 ins_encode %{ 9969 9970 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 9971 Register to = $base$$Register; 9972 Register count = $cnt$$Register; 9973 9974 Label Ldone; 9975 __ nop(); // Separate short branches 9976 // Use BIS for zeroing (temp is not used). 9977 __ bis_zeroing(to, count, G0, Ldone); 9978 __ bind(Ldone); 9979 9980 %} 9981 ins_pipe(long_memory_op); 9982 %} 9983 9984 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{ 9985 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit)); 9986 match(Set dummy (ClearArray cnt base)); 9987 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr); 9988 ins_cost(300); 9989 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 9990 9991 ins_encode %{ 9992 9993 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 9994 Register to = $base$$Register; 9995 Register count = $cnt$$Register; 9996 Register temp = $tmp$$Register; 9997 9998 Label Ldone; 9999 __ nop(); // Separate short branches 10000 // Use BIS for zeroing 10001 __ bis_zeroing(to, count, temp, Ldone); 10002 __ bind(Ldone); 10003 10004 %} 10005 ins_pipe(long_memory_op); 10006 %} 10007 10008 instruct string_compareL(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10009 o7RegI tmp, flagsReg ccr) %{ 10010 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL); 10011 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10012 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10013 ins_cost(300); 10014 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10015 ins_encode %{ 10016 __ string_compare($str1$$Register, $str2$$Register, 10017 $cnt1$$Register, $cnt2$$Register, 10018 $tmp$$Register, $tmp$$Register, 10019 $result$$Register, StrIntrinsicNode::LL); 10020 %} 10021 ins_pipe(long_memory_op); 10022 %} 10023 10024 instruct string_compareU(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10025 o7RegI tmp, flagsReg ccr) %{ 10026 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU); 10027 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10028 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10029 ins_cost(300); 10030 format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10031 ins_encode %{ 10032 __ string_compare($str1$$Register, $str2$$Register, 10033 $cnt1$$Register, $cnt2$$Register, 10034 $tmp$$Register, $tmp$$Register, 10035 $result$$Register, StrIntrinsicNode::UU); 10036 %} 10037 ins_pipe(long_memory_op); 10038 %} 10039 10040 instruct string_compareLU(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10041 o7RegI tmp1, g1RegI tmp2, flagsReg ccr) %{ 10042 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU); 10043 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10044 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp1, KILL tmp2); 10045 ins_cost(300); 10046 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1,$tmp2" %} 10047 ins_encode %{ 10048 __ string_compare($str1$$Register, $str2$$Register, 10049 $cnt1$$Register, $cnt2$$Register, 10050 $tmp1$$Register, $tmp2$$Register, 10051 $result$$Register, StrIntrinsicNode::LU); 10052 %} 10053 ins_pipe(long_memory_op); 10054 %} 10055 10056 instruct string_compareUL(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10057 o7RegI tmp1, g1RegI tmp2, flagsReg ccr) %{ 10058 predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL); 10059 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10060 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp1, KILL tmp2); 10061 ins_cost(300); 10062 format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1,$tmp2" %} 10063 ins_encode %{ 10064 __ string_compare($str2$$Register, $str1$$Register, 10065 $cnt2$$Register, $cnt1$$Register, 10066 $tmp1$$Register, $tmp2$$Register, 10067 $result$$Register, StrIntrinsicNode::UL); 10068 %} 10069 ins_pipe(long_memory_op); 10070 %} 10071 10072 instruct string_equalsL(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10073 o7RegI tmp, flagsReg ccr) %{ 10074 predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL); 10075 match(Set result (StrEquals (Binary str1 str2) cnt)); 10076 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10077 ins_cost(300); 10078 format %{ "String Equals byte[] $str1,$str2,$cnt -> $result // KILL $tmp" %} 10079 ins_encode %{ 10080 __ array_equals(false, $str1$$Register, $str2$$Register, 10081 $cnt$$Register, $tmp$$Register, 10082 $result$$Register, true /* byte */); 10083 %} 10084 ins_pipe(long_memory_op); 10085 %} 10086 10087 instruct string_equalsU(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10088 o7RegI tmp, flagsReg ccr) %{ 10089 predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU); 10090 match(Set result (StrEquals (Binary str1 str2) cnt)); 10091 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10092 ins_cost(300); 10093 format %{ "String Equals char[] $str1,$str2,$cnt -> $result // KILL $tmp" %} 10094 ins_encode %{ 10095 __ array_equals(false, $str1$$Register, $str2$$Register, 10096 $cnt$$Register, $tmp$$Register, 10097 $result$$Register, false /* byte */); 10098 %} 10099 ins_pipe(long_memory_op); 10100 %} 10101 10102 instruct array_equalsB(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10103 o7RegI tmp2, flagsReg ccr) %{ 10104 predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); 10105 match(Set result (AryEq ary1 ary2)); 10106 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10107 ins_cost(300); 10108 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10109 ins_encode %{ 10110 __ array_equals(true, $ary1$$Register, $ary2$$Register, 10111 $tmp1$$Register, $tmp2$$Register, 10112 $result$$Register, true /* byte */); 10113 %} 10114 ins_pipe(long_memory_op); 10115 %} 10116 10117 instruct array_equalsC(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10118 o7RegI tmp2, flagsReg ccr) %{ 10119 predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); 10120 match(Set result (AryEq ary1 ary2)); 10121 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10122 ins_cost(300); 10123 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10124 ins_encode %{ 10125 __ array_equals(true, $ary1$$Register, $ary2$$Register, 10126 $tmp1$$Register, $tmp2$$Register, 10127 $result$$Register, false /* byte */); 10128 %} 10129 ins_pipe(long_memory_op); 10130 %} 10131 10132 instruct has_negatives(o0RegP pAryR, g3RegI iSizeR, notemp_iRegI resultR, 10133 iRegL tmp1L, iRegL tmp2L, iRegL tmp3L, iRegL tmp4L, 10134 flagsReg ccr) 10135 %{ 10136 match(Set resultR (HasNegatives pAryR iSizeR)); 10137 effect(TEMP resultR, TEMP tmp1L, TEMP tmp2L, TEMP tmp3L, TEMP tmp4L, USE pAryR, USE iSizeR, KILL ccr); 10138 format %{ "has negatives byte[] $pAryR,$iSizeR -> $resultR // KILL $tmp1L,$tmp2L,$tmp3L,$tmp4L" %} 10139 ins_encode %{ 10140 __ has_negatives($pAryR$$Register, $iSizeR$$Register, 10141 $resultR$$Register, 10142 $tmp1L$$Register, $tmp2L$$Register, 10143 $tmp3L$$Register, $tmp4L$$Register); 10144 %} 10145 ins_pipe(long_memory_op); 10146 %} 10147 10148 // char[] to byte[] compression 10149 instruct string_compress(o0RegP src, o1RegP dst, g3RegI len, notemp_iRegI result, iRegL tmp, flagsReg ccr) %{ 10150 predicate(UseVIS < 3); 10151 match(Set result (StrCompressedCopy src (Binary dst len))); 10152 effect(TEMP result, TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr); 10153 ins_cost(300); 10154 format %{ "String Compress $src,$dst,$len -> $result // KILL $tmp" %} 10155 ins_encode %{ 10156 Label Ldone; 10157 __ signx($len$$Register); 10158 __ cmp_zero_and_br(Assembler::zero, $len$$Register, Ldone, false, Assembler::pn); 10159 __ delayed()->mov($len$$Register, $result$$Register); // copy count 10160 __ string_compress($src$$Register, $dst$$Register, $len$$Register, $result$$Register, $tmp$$Register, Ldone); 10161 __ bind(Ldone); 10162 %} 10163 ins_pipe(long_memory_op); 10164 %} 10165 10166 // fast char[] to byte[] compression using VIS instructions 10167 instruct string_compress_fast(o0RegP src, o1RegP dst, g3RegI len, notemp_iRegI result, 10168 iRegL tmp1, iRegL tmp2, iRegL tmp3, iRegL tmp4, 10169 regD ftmp1, regD ftmp2, regD ftmp3, flagsReg ccr) %{ 10170 predicate(UseVIS >= 3); 10171 match(Set result (StrCompressedCopy src (Binary dst len))); 10172 effect(TEMP result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP ftmp1, TEMP ftmp2, TEMP ftmp3, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr); 10173 ins_cost(300); 10174 format %{ "String Compress Fast $src,$dst,$len -> $result // KILL $tmp1,$tmp2,$tmp3,$tmp4,$ftmp1,$ftmp2,$ftmp3" %} 10175 ins_encode %{ 10176 Label Ldone; 10177 __ signx($len$$Register); 10178 __ string_compress_16($src$$Register, $dst$$Register, $len$$Register, $result$$Register, 10179 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, 10180 $ftmp1$$FloatRegister, $ftmp2$$FloatRegister, $ftmp3$$FloatRegister, Ldone); 10181 __ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone); 10182 __ string_compress($src$$Register, $dst$$Register, $len$$Register, $result$$Register, $tmp1$$Register, Ldone); 10183 __ bind(Ldone); 10184 %} 10185 ins_pipe(long_memory_op); 10186 %} 10187 10188 // byte[] to char[] inflation 10189 instruct string_inflate(Universe dummy, o0RegP src, o1RegP dst, g3RegI len, 10190 iRegL tmp, flagsReg ccr) %{ 10191 match(Set dummy (StrInflatedCopy src (Binary dst len))); 10192 effect(TEMP tmp, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr); 10193 ins_cost(300); 10194 format %{ "String Inflate $src,$dst,$len // KILL $tmp" %} 10195 ins_encode %{ 10196 Label Ldone; 10197 __ signx($len$$Register); 10198 __ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone); 10199 __ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register, Ldone); 10200 __ bind(Ldone); 10201 %} 10202 ins_pipe(long_memory_op); 10203 %} 10204 10205 // fast byte[] to char[] inflation using VIS instructions 10206 instruct string_inflate_fast(Universe dummy, o0RegP src, o1RegP dst, g3RegI len, 10207 iRegL tmp, regD ftmp1, regD ftmp2, regD ftmp3, regD ftmp4, flagsReg ccr) %{ 10208 predicate(UseVIS >= 3); 10209 match(Set dummy (StrInflatedCopy src (Binary dst len))); 10210 effect(TEMP tmp, TEMP ftmp1, TEMP ftmp2, TEMP ftmp3, TEMP ftmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL ccr); 10211 ins_cost(300); 10212 format %{ "String Inflate Fast $src,$dst,$len // KILL $tmp,$ftmp1,$ftmp2,$ftmp3,$ftmp4" %} 10213 ins_encode %{ 10214 Label Ldone; 10215 __ signx($len$$Register); 10216 __ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register, 10217 $ftmp1$$FloatRegister, $ftmp2$$FloatRegister, $ftmp3$$FloatRegister, $ftmp4$$FloatRegister, Ldone); 10218 __ cmp_and_brx_short($len$$Register, 0, Assembler::equal, Assembler::pn, Ldone); 10219 __ string_inflate($src$$Register, $dst$$Register, $len$$Register, $tmp$$Register, Ldone); 10220 __ bind(Ldone); 10221 %} 10222 ins_pipe(long_memory_op); 10223 %} 10224 10225 10226 //---------- Zeros Count Instructions ------------------------------------------ 10227 10228 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{ 10229 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10230 match(Set dst (CountLeadingZerosI src)); 10231 effect(TEMP dst, TEMP tmp, KILL cr); 10232 10233 // x |= (x >> 1); 10234 // x |= (x >> 2); 10235 // x |= (x >> 4); 10236 // x |= (x >> 8); 10237 // x |= (x >> 16); 10238 // return (WORDBITS - popc(x)); 10239 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t" 10240 "SRL $src,0,$dst\t! 32-bit zero extend\n\t" 10241 "OR $dst,$tmp,$dst\n\t" 10242 "SRL $dst,2,$tmp\n\t" 10243 "OR $dst,$tmp,$dst\n\t" 10244 "SRL $dst,4,$tmp\n\t" 10245 "OR $dst,$tmp,$dst\n\t" 10246 "SRL $dst,8,$tmp\n\t" 10247 "OR $dst,$tmp,$dst\n\t" 10248 "SRL $dst,16,$tmp\n\t" 10249 "OR $dst,$tmp,$dst\n\t" 10250 "POPC $dst,$dst\n\t" 10251 "MOV 32,$tmp\n\t" 10252 "SUB $tmp,$dst,$dst" %} 10253 ins_encode %{ 10254 Register Rdst = $dst$$Register; 10255 Register Rsrc = $src$$Register; 10256 Register Rtmp = $tmp$$Register; 10257 __ srl(Rsrc, 1, Rtmp); 10258 __ srl(Rsrc, 0, Rdst); 10259 __ or3(Rdst, Rtmp, Rdst); 10260 __ srl(Rdst, 2, Rtmp); 10261 __ or3(Rdst, Rtmp, Rdst); 10262 __ srl(Rdst, 4, Rtmp); 10263 __ or3(Rdst, Rtmp, Rdst); 10264 __ srl(Rdst, 8, Rtmp); 10265 __ or3(Rdst, Rtmp, Rdst); 10266 __ srl(Rdst, 16, Rtmp); 10267 __ or3(Rdst, Rtmp, Rdst); 10268 __ popc(Rdst, Rdst); 10269 __ mov(BitsPerInt, Rtmp); 10270 __ sub(Rtmp, Rdst, Rdst); 10271 %} 10272 ins_pipe(ialu_reg); 10273 %} 10274 10275 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{ 10276 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10277 match(Set dst (CountLeadingZerosL src)); 10278 effect(TEMP dst, TEMP tmp, KILL cr); 10279 10280 // x |= (x >> 1); 10281 // x |= (x >> 2); 10282 // x |= (x >> 4); 10283 // x |= (x >> 8); 10284 // x |= (x >> 16); 10285 // x |= (x >> 32); 10286 // return (WORDBITS - popc(x)); 10287 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t" 10288 "OR $src,$tmp,$dst\n\t" 10289 "SRLX $dst,2,$tmp\n\t" 10290 "OR $dst,$tmp,$dst\n\t" 10291 "SRLX $dst,4,$tmp\n\t" 10292 "OR $dst,$tmp,$dst\n\t" 10293 "SRLX $dst,8,$tmp\n\t" 10294 "OR $dst,$tmp,$dst\n\t" 10295 "SRLX $dst,16,$tmp\n\t" 10296 "OR $dst,$tmp,$dst\n\t" 10297 "SRLX $dst,32,$tmp\n\t" 10298 "OR $dst,$tmp,$dst\n\t" 10299 "POPC $dst,$dst\n\t" 10300 "MOV 64,$tmp\n\t" 10301 "SUB $tmp,$dst,$dst" %} 10302 ins_encode %{ 10303 Register Rdst = $dst$$Register; 10304 Register Rsrc = $src$$Register; 10305 Register Rtmp = $tmp$$Register; 10306 __ srlx(Rsrc, 1, Rtmp); 10307 __ or3( Rsrc, Rtmp, Rdst); 10308 __ srlx(Rdst, 2, Rtmp); 10309 __ or3( Rdst, Rtmp, Rdst); 10310 __ srlx(Rdst, 4, Rtmp); 10311 __ or3( Rdst, Rtmp, Rdst); 10312 __ srlx(Rdst, 8, Rtmp); 10313 __ or3( Rdst, Rtmp, Rdst); 10314 __ srlx(Rdst, 16, Rtmp); 10315 __ or3( Rdst, Rtmp, Rdst); 10316 __ srlx(Rdst, 32, Rtmp); 10317 __ or3( Rdst, Rtmp, Rdst); 10318 __ popc(Rdst, Rdst); 10319 __ mov(BitsPerLong, Rtmp); 10320 __ sub(Rtmp, Rdst, Rdst); 10321 %} 10322 ins_pipe(ialu_reg); 10323 %} 10324 10325 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{ 10326 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10327 match(Set dst (CountTrailingZerosI src)); 10328 effect(TEMP dst, KILL cr); 10329 10330 // return popc(~x & (x - 1)); 10331 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t" 10332 "ANDN $dst,$src,$dst\n\t" 10333 "SRL $dst,R_G0,$dst\n\t" 10334 "POPC $dst,$dst" %} 10335 ins_encode %{ 10336 Register Rdst = $dst$$Register; 10337 Register Rsrc = $src$$Register; 10338 __ sub(Rsrc, 1, Rdst); 10339 __ andn(Rdst, Rsrc, Rdst); 10340 __ srl(Rdst, G0, Rdst); 10341 __ popc(Rdst, Rdst); 10342 %} 10343 ins_pipe(ialu_reg); 10344 %} 10345 10346 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{ 10347 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10348 match(Set dst (CountTrailingZerosL src)); 10349 effect(TEMP dst, KILL cr); 10350 10351 // return popc(~x & (x - 1)); 10352 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t" 10353 "ANDN $dst,$src,$dst\n\t" 10354 "POPC $dst,$dst" %} 10355 ins_encode %{ 10356 Register Rdst = $dst$$Register; 10357 Register Rsrc = $src$$Register; 10358 __ sub(Rsrc, 1, Rdst); 10359 __ andn(Rdst, Rsrc, Rdst); 10360 __ popc(Rdst, Rdst); 10361 %} 10362 ins_pipe(ialu_reg); 10363 %} 10364 10365 10366 //---------- Population Count Instructions ------------------------------------- 10367 10368 instruct popCountI(iRegIsafe dst, iRegI src) %{ 10369 predicate(UsePopCountInstruction); 10370 match(Set dst (PopCountI src)); 10371 10372 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t" 10373 "POPC $dst, $dst" %} 10374 ins_encode %{ 10375 __ srl($src$$Register, G0, $dst$$Register); 10376 __ popc($dst$$Register, $dst$$Register); 10377 %} 10378 ins_pipe(ialu_reg); 10379 %} 10380 10381 // Note: Long.bitCount(long) returns an int. 10382 instruct popCountL(iRegIsafe dst, iRegL src) %{ 10383 predicate(UsePopCountInstruction); 10384 match(Set dst (PopCountL src)); 10385 10386 format %{ "POPC $src, $dst" %} 10387 ins_encode %{ 10388 __ popc($src$$Register, $dst$$Register); 10389 %} 10390 ins_pipe(ialu_reg); 10391 %} 10392 10393 10394 // ============================================================================ 10395 //------------Bytes reverse-------------------------------------------------- 10396 10397 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ 10398 match(Set dst (ReverseBytesI src)); 10399 10400 // Op cost is artificially doubled to make sure that load or store 10401 // instructions are preferred over this one which requires a spill 10402 // onto a stack slot. 10403 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10404 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10405 10406 ins_encode %{ 10407 __ set($src$$disp + STACK_BIAS, O7); 10408 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10409 %} 10410 ins_pipe( iload_mem ); 10411 %} 10412 10413 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ 10414 match(Set dst (ReverseBytesL src)); 10415 10416 // Op cost is artificially doubled to make sure that load or store 10417 // instructions are preferred over this one which requires a spill 10418 // onto a stack slot. 10419 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10420 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10421 10422 ins_encode %{ 10423 __ set($src$$disp + STACK_BIAS, O7); 10424 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10425 %} 10426 ins_pipe( iload_mem ); 10427 %} 10428 10429 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ 10430 match(Set dst (ReverseBytesUS src)); 10431 10432 // Op cost is artificially doubled to make sure that load or store 10433 // instructions are preferred over this one which requires a spill 10434 // onto a stack slot. 10435 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10436 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} 10437 10438 ins_encode %{ 10439 // the value was spilled as an int so bias the load 10440 __ set($src$$disp + STACK_BIAS + 2, O7); 10441 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10442 %} 10443 ins_pipe( iload_mem ); 10444 %} 10445 10446 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ 10447 match(Set dst (ReverseBytesS src)); 10448 10449 // Op cost is artificially doubled to make sure that load or store 10450 // instructions are preferred over this one which requires a spill 10451 // onto a stack slot. 10452 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10453 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} 10454 10455 ins_encode %{ 10456 // the value was spilled as an int so bias the load 10457 __ set($src$$disp + STACK_BIAS + 2, O7); 10458 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10459 %} 10460 ins_pipe( iload_mem ); 10461 %} 10462 10463 // Load Integer reversed byte order 10464 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ 10465 match(Set dst (ReverseBytesI (LoadI src))); 10466 10467 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 10468 size(4); 10469 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10470 10471 ins_encode %{ 10472 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10473 %} 10474 ins_pipe(iload_mem); 10475 %} 10476 10477 // Load Long - aligned and reversed 10478 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ 10479 match(Set dst (ReverseBytesL (LoadL src))); 10480 10481 ins_cost(MEMORY_REF_COST); 10482 size(4); 10483 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10484 10485 ins_encode %{ 10486 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10487 %} 10488 ins_pipe(iload_mem); 10489 %} 10490 10491 // Load unsigned short / char reversed byte order 10492 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ 10493 match(Set dst (ReverseBytesUS (LoadUS src))); 10494 10495 ins_cost(MEMORY_REF_COST); 10496 size(4); 10497 format %{ "LDUHA $src, $dst\t!asi=primary_little" %} 10498 10499 ins_encode %{ 10500 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10501 %} 10502 ins_pipe(iload_mem); 10503 %} 10504 10505 // Load short reversed byte order 10506 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ 10507 match(Set dst (ReverseBytesS (LoadS src))); 10508 10509 ins_cost(MEMORY_REF_COST); 10510 size(4); 10511 format %{ "LDSHA $src, $dst\t!asi=primary_little" %} 10512 10513 ins_encode %{ 10514 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10515 %} 10516 ins_pipe(iload_mem); 10517 %} 10518 10519 // Store Integer reversed byte order 10520 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ 10521 match(Set dst (StoreI dst (ReverseBytesI src))); 10522 10523 ins_cost(MEMORY_REF_COST); 10524 size(4); 10525 format %{ "STWA $src, $dst\t!asi=primary_little" %} 10526 10527 ins_encode %{ 10528 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10529 %} 10530 ins_pipe(istore_mem_reg); 10531 %} 10532 10533 // Store Long reversed byte order 10534 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ 10535 match(Set dst (StoreL dst (ReverseBytesL src))); 10536 10537 ins_cost(MEMORY_REF_COST); 10538 size(4); 10539 format %{ "STXA $src, $dst\t!asi=primary_little" %} 10540 10541 ins_encode %{ 10542 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10543 %} 10544 ins_pipe(istore_mem_reg); 10545 %} 10546 10547 // Store unsighed short/char reversed byte order 10548 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ 10549 match(Set dst (StoreC dst (ReverseBytesUS src))); 10550 10551 ins_cost(MEMORY_REF_COST); 10552 size(4); 10553 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10554 10555 ins_encode %{ 10556 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10557 %} 10558 ins_pipe(istore_mem_reg); 10559 %} 10560 10561 // Store short reversed byte order 10562 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ 10563 match(Set dst (StoreC dst (ReverseBytesS src))); 10564 10565 ins_cost(MEMORY_REF_COST); 10566 size(4); 10567 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10568 10569 ins_encode %{ 10570 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10571 %} 10572 ins_pipe(istore_mem_reg); 10573 %} 10574 10575 // ====================VECTOR INSTRUCTIONS===================================== 10576 10577 // Load Aligned Packed values into a Double Register 10578 instruct loadV8(regD dst, memory mem) %{ 10579 predicate(n->as_LoadVector()->memory_size() == 8); 10580 match(Set dst (LoadVector mem)); 10581 ins_cost(MEMORY_REF_COST); 10582 size(4); 10583 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %} 10584 ins_encode %{ 10585 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg)); 10586 %} 10587 ins_pipe(floadD_mem); 10588 %} 10589 10590 // Store Vector in Double register to memory 10591 instruct storeV8(memory mem, regD src) %{ 10592 predicate(n->as_StoreVector()->memory_size() == 8); 10593 match(Set mem (StoreVector mem src)); 10594 ins_cost(MEMORY_REF_COST); 10595 size(4); 10596 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %} 10597 ins_encode %{ 10598 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address); 10599 %} 10600 ins_pipe(fstoreD_mem_reg); 10601 %} 10602 10603 // Store Zero into vector in memory 10604 instruct storeV8B_zero(memory mem, immI0 zero) %{ 10605 predicate(n->as_StoreVector()->memory_size() == 8); 10606 match(Set mem (StoreVector mem (ReplicateB zero))); 10607 ins_cost(MEMORY_REF_COST); 10608 size(4); 10609 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %} 10610 ins_encode %{ 10611 __ stx(G0, $mem$$Address); 10612 %} 10613 ins_pipe(fstoreD_mem_zero); 10614 %} 10615 10616 instruct storeV4S_zero(memory mem, immI0 zero) %{ 10617 predicate(n->as_StoreVector()->memory_size() == 8); 10618 match(Set mem (StoreVector mem (ReplicateS zero))); 10619 ins_cost(MEMORY_REF_COST); 10620 size(4); 10621 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %} 10622 ins_encode %{ 10623 __ stx(G0, $mem$$Address); 10624 %} 10625 ins_pipe(fstoreD_mem_zero); 10626 %} 10627 10628 instruct storeV2I_zero(memory mem, immI0 zero) %{ 10629 predicate(n->as_StoreVector()->memory_size() == 8); 10630 match(Set mem (StoreVector mem (ReplicateI zero))); 10631 ins_cost(MEMORY_REF_COST); 10632 size(4); 10633 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %} 10634 ins_encode %{ 10635 __ stx(G0, $mem$$Address); 10636 %} 10637 ins_pipe(fstoreD_mem_zero); 10638 %} 10639 10640 instruct storeV2F_zero(memory mem, immF0 zero) %{ 10641 predicate(n->as_StoreVector()->memory_size() == 8); 10642 match(Set mem (StoreVector mem (ReplicateF zero))); 10643 ins_cost(MEMORY_REF_COST); 10644 size(4); 10645 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %} 10646 ins_encode %{ 10647 __ stx(G0, $mem$$Address); 10648 %} 10649 ins_pipe(fstoreD_mem_zero); 10650 %} 10651 10652 // Replicate scalar to packed byte values into Double register 10653 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10654 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3); 10655 match(Set dst (ReplicateB src)); 10656 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10657 format %{ "SLLX $src,56,$tmp\n\t" 10658 "SRLX $tmp, 8,$tmp2\n\t" 10659 "OR $tmp,$tmp2,$tmp\n\t" 10660 "SRLX $tmp,16,$tmp2\n\t" 10661 "OR $tmp,$tmp2,$tmp\n\t" 10662 "SRLX $tmp,32,$tmp2\n\t" 10663 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10664 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10665 ins_encode %{ 10666 Register Rsrc = $src$$Register; 10667 Register Rtmp = $tmp$$Register; 10668 Register Rtmp2 = $tmp2$$Register; 10669 __ sllx(Rsrc, 56, Rtmp); 10670 __ srlx(Rtmp, 8, Rtmp2); 10671 __ or3 (Rtmp, Rtmp2, Rtmp); 10672 __ srlx(Rtmp, 16, Rtmp2); 10673 __ or3 (Rtmp, Rtmp2, Rtmp); 10674 __ srlx(Rtmp, 32, Rtmp2); 10675 __ or3 (Rtmp, Rtmp2, Rtmp); 10676 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10677 %} 10678 ins_pipe(ialu_reg); 10679 %} 10680 10681 // Replicate scalar to packed byte values into Double stack 10682 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10683 predicate(n->as_Vector()->length() == 8 && UseVIS < 3); 10684 match(Set dst (ReplicateB src)); 10685 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10686 format %{ "SLLX $src,56,$tmp\n\t" 10687 "SRLX $tmp, 8,$tmp2\n\t" 10688 "OR $tmp,$tmp2,$tmp\n\t" 10689 "SRLX $tmp,16,$tmp2\n\t" 10690 "OR $tmp,$tmp2,$tmp\n\t" 10691 "SRLX $tmp,32,$tmp2\n\t" 10692 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10693 "STX $tmp,$dst\t! regL to stkD" %} 10694 ins_encode %{ 10695 Register Rsrc = $src$$Register; 10696 Register Rtmp = $tmp$$Register; 10697 Register Rtmp2 = $tmp2$$Register; 10698 __ sllx(Rsrc, 56, Rtmp); 10699 __ srlx(Rtmp, 8, Rtmp2); 10700 __ or3 (Rtmp, Rtmp2, Rtmp); 10701 __ srlx(Rtmp, 16, Rtmp2); 10702 __ or3 (Rtmp, Rtmp2, Rtmp); 10703 __ srlx(Rtmp, 32, Rtmp2); 10704 __ or3 (Rtmp, Rtmp2, Rtmp); 10705 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10706 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10707 %} 10708 ins_pipe(ialu_reg); 10709 %} 10710 10711 // Replicate scalar constant to packed byte values in Double register 10712 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 10713 predicate(n->as_Vector()->length() == 8); 10714 match(Set dst (ReplicateB con)); 10715 effect(KILL tmp); 10716 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 10717 ins_encode %{ 10718 // XXX This is a quick fix for 6833573. 10719 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 10720 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 10721 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10722 %} 10723 ins_pipe(loadConFD); 10724 %} 10725 10726 // Replicate scalar to packed char/short values into Double register 10727 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10728 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3); 10729 match(Set dst (ReplicateS src)); 10730 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10731 format %{ "SLLX $src,48,$tmp\n\t" 10732 "SRLX $tmp,16,$tmp2\n\t" 10733 "OR $tmp,$tmp2,$tmp\n\t" 10734 "SRLX $tmp,32,$tmp2\n\t" 10735 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10736 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10737 ins_encode %{ 10738 Register Rsrc = $src$$Register; 10739 Register Rtmp = $tmp$$Register; 10740 Register Rtmp2 = $tmp2$$Register; 10741 __ sllx(Rsrc, 48, Rtmp); 10742 __ srlx(Rtmp, 16, Rtmp2); 10743 __ or3 (Rtmp, Rtmp2, Rtmp); 10744 __ srlx(Rtmp, 32, Rtmp2); 10745 __ or3 (Rtmp, Rtmp2, Rtmp); 10746 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10747 %} 10748 ins_pipe(ialu_reg); 10749 %} 10750 10751 // Replicate scalar to packed char/short values into Double stack 10752 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10753 predicate(n->as_Vector()->length() == 4 && UseVIS < 3); 10754 match(Set dst (ReplicateS src)); 10755 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10756 format %{ "SLLX $src,48,$tmp\n\t" 10757 "SRLX $tmp,16,$tmp2\n\t" 10758 "OR $tmp,$tmp2,$tmp\n\t" 10759 "SRLX $tmp,32,$tmp2\n\t" 10760 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10761 "STX $tmp,$dst\t! regL to stkD" %} 10762 ins_encode %{ 10763 Register Rsrc = $src$$Register; 10764 Register Rtmp = $tmp$$Register; 10765 Register Rtmp2 = $tmp2$$Register; 10766 __ sllx(Rsrc, 48, Rtmp); 10767 __ srlx(Rtmp, 16, Rtmp2); 10768 __ or3 (Rtmp, Rtmp2, Rtmp); 10769 __ srlx(Rtmp, 32, Rtmp2); 10770 __ or3 (Rtmp, Rtmp2, Rtmp); 10771 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10772 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10773 %} 10774 ins_pipe(ialu_reg); 10775 %} 10776 10777 // Replicate scalar constant to packed char/short values in Double register 10778 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 10779 predicate(n->as_Vector()->length() == 4); 10780 match(Set dst (ReplicateS con)); 10781 effect(KILL tmp); 10782 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 10783 ins_encode %{ 10784 // XXX This is a quick fix for 6833573. 10785 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 10786 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 10787 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10788 %} 10789 ins_pipe(loadConFD); 10790 %} 10791 10792 // Replicate scalar to packed int values into Double register 10793 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10794 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3); 10795 match(Set dst (ReplicateI src)); 10796 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10797 format %{ "SLLX $src,32,$tmp\n\t" 10798 "SRLX $tmp,32,$tmp2\n\t" 10799 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10800 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10801 ins_encode %{ 10802 Register Rsrc = $src$$Register; 10803 Register Rtmp = $tmp$$Register; 10804 Register Rtmp2 = $tmp2$$Register; 10805 __ sllx(Rsrc, 32, Rtmp); 10806 __ srlx(Rtmp, 32, Rtmp2); 10807 __ or3 (Rtmp, Rtmp2, Rtmp); 10808 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10809 %} 10810 ins_pipe(ialu_reg); 10811 %} 10812 10813 // Replicate scalar to packed int values into Double stack 10814 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10815 predicate(n->as_Vector()->length() == 2 && UseVIS < 3); 10816 match(Set dst (ReplicateI src)); 10817 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10818 format %{ "SLLX $src,32,$tmp\n\t" 10819 "SRLX $tmp,32,$tmp2\n\t" 10820 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10821 "STX $tmp,$dst\t! regL to stkD" %} 10822 ins_encode %{ 10823 Register Rsrc = $src$$Register; 10824 Register Rtmp = $tmp$$Register; 10825 Register Rtmp2 = $tmp2$$Register; 10826 __ sllx(Rsrc, 32, Rtmp); 10827 __ srlx(Rtmp, 32, Rtmp2); 10828 __ or3 (Rtmp, Rtmp2, Rtmp); 10829 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10830 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10831 %} 10832 ins_pipe(ialu_reg); 10833 %} 10834 10835 // Replicate scalar zero constant to packed int values in Double register 10836 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 10837 predicate(n->as_Vector()->length() == 2); 10838 match(Set dst (ReplicateI con)); 10839 effect(KILL tmp); 10840 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 10841 ins_encode %{ 10842 // XXX This is a quick fix for 6833573. 10843 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 10844 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 10845 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10846 %} 10847 ins_pipe(loadConFD); 10848 %} 10849 10850 // Replicate scalar to packed float values into Double stack 10851 instruct Repl2F_stk(stackSlotD dst, regF src) %{ 10852 predicate(n->as_Vector()->length() == 2); 10853 match(Set dst (ReplicateF src)); 10854 ins_cost(MEMORY_REF_COST*2); 10855 format %{ "STF $src,$dst.hi\t! packed2F\n\t" 10856 "STF $src,$dst.lo" %} 10857 opcode(Assembler::stf_op3); 10858 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src)); 10859 ins_pipe(fstoreF_stk_reg); 10860 %} 10861 10862 // Replicate scalar zero constant to packed float values in Double register 10863 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{ 10864 predicate(n->as_Vector()->length() == 2); 10865 match(Set dst (ReplicateF con)); 10866 effect(KILL tmp); 10867 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %} 10868 ins_encode %{ 10869 // XXX This is a quick fix for 6833573. 10870 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister); 10871 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register); 10872 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10873 %} 10874 ins_pipe(loadConFD); 10875 %} 10876 10877 //----------PEEPHOLE RULES----------------------------------------------------- 10878 // These must follow all instruction definitions as they use the names 10879 // defined in the instructions definitions. 10880 // 10881 // peepmatch ( root_instr_name [preceding_instruction]* ); 10882 // 10883 // peepconstraint %{ 10884 // (instruction_number.operand_name relational_op instruction_number.operand_name 10885 // [, ...] ); 10886 // // instruction numbers are zero-based using left to right order in peepmatch 10887 // 10888 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 10889 // // provide an instruction_number.operand_name for each operand that appears 10890 // // in the replacement instruction's match rule 10891 // 10892 // ---------VM FLAGS--------------------------------------------------------- 10893 // 10894 // All peephole optimizations can be turned off using -XX:-OptoPeephole 10895 // 10896 // Each peephole rule is given an identifying number starting with zero and 10897 // increasing by one in the order seen by the parser. An individual peephole 10898 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 10899 // on the command-line. 10900 // 10901 // ---------CURRENT LIMITATIONS---------------------------------------------- 10902 // 10903 // Only match adjacent instructions in same basic block 10904 // Only equality constraints 10905 // Only constraints between operands, not (0.dest_reg == EAX_enc) 10906 // Only one replacement instruction 10907 // 10908 // ---------EXAMPLE---------------------------------------------------------- 10909 // 10910 // // pertinent parts of existing instructions in architecture description 10911 // instruct movI(eRegI dst, eRegI src) %{ 10912 // match(Set dst (CopyI src)); 10913 // %} 10914 // 10915 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 10916 // match(Set dst (AddI dst src)); 10917 // effect(KILL cr); 10918 // %} 10919 // 10920 // // Change (inc mov) to lea 10921 // peephole %{ 10922 // // increment preceeded by register-register move 10923 // peepmatch ( incI_eReg movI ); 10924 // // require that the destination register of the increment 10925 // // match the destination register of the move 10926 // peepconstraint ( 0.dst == 1.dst ); 10927 // // construct a replacement instruction that sets 10928 // // the destination to ( move's source register + one ) 10929 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); 10930 // %} 10931 // 10932 10933 // // Change load of spilled value to only a spill 10934 // instruct storeI(memory mem, eRegI src) %{ 10935 // match(Set mem (StoreI mem src)); 10936 // %} 10937 // 10938 // instruct loadI(eRegI dst, memory mem) %{ 10939 // match(Set dst (LoadI mem)); 10940 // %} 10941 // 10942 // peephole %{ 10943 // peepmatch ( loadI storeI ); 10944 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 10945 // peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 10946 // %} 10947 10948 //----------SMARTSPILL RULES--------------------------------------------------- 10949 // These must follow all instruction definitions as they use the names 10950 // defined in the instructions definitions. 10951 // 10952 // SPARC will probably not have any of these rules due to RISC instruction set. 10953 10954 //----------PIPELINE----------------------------------------------------------- 10955 // Rules which define the behavior of the target architectures pipeline.