1 // 2 // Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // SPARC Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 register %{ 32 //----------Architecture Description Register Definitions---------------------- 33 // General Registers 34 // "reg_def" name ( register save type, C convention save type, 35 // ideal register type, encoding, vm name ); 36 // Register Save Types: 37 // 38 // NS = No-Save: The register allocator assumes that these registers 39 // can be used without saving upon entry to the method, & 40 // that they do not need to be saved at call sites. 41 // 42 // SOC = Save-On-Call: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, 44 // but that they must be saved at call sites. 45 // 46 // SOE = Save-On-Entry: The register allocator assumes that these registers 47 // must be saved before using them upon entry to the 48 // method, but they do not need to be saved at call 49 // sites. 50 // 51 // AS = Always-Save: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, & that they must be saved at call sites. 54 // 55 // Ideal Register Type is used to determine how to save & restore a 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 58 // 59 // The encoding number is the actual bit-pattern placed into the opcodes. 60 61 62 // ---------------------------- 63 // Integer/Long Registers 64 // ---------------------------- 65 66 // Need to expose the hi/lo aspect of 64-bit registers 67 // This register set is used for both the 64-bit build and 68 // the 32-bit build with 1-register longs. 69 70 // Global Registers 0-7 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next()); 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg()); 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next()); 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg()); 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next()); 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg()); 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next()); 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg()); 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next()); 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg()); 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next()); 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg()); 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next()); 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg()); 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next()); 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg()); 87 88 // Output Registers 0-7 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next()); 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg()); 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next()); 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg()); 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next()); 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg()); 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next()); 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg()); 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next()); 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg()); 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next()); 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg()); 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next()); 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg()); 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next()); 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg()); 105 106 // Local Registers 0-7 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next()); 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg()); 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next()); 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg()); 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next()); 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg()); 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next()); 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg()); 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next()); 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg()); 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next()); 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg()); 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next()); 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg()); 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next()); 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg()); 123 124 // Input Registers 0-7 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next()); 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg()); 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next()); 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg()); 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next()); 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg()); 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next()); 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg()); 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next()); 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg()); 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next()); 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg()); 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next()); 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg()); 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next()); 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg()); 141 142 // ---------------------------- 143 // Float/Double Registers 144 // ---------------------------- 145 146 // Float Registers 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); 179 180 // Double Registers 181 // The rules of ADL require that double registers be defined in pairs. 182 // Each pair must be two 32-bit values, but not necessarily a pair of 183 // single float registers. In each pair, ADLC-assigned register numbers 184 // must be adjacent, with the lower number even. Finally, when the 185 // CPU stores such a register pair to memory, the word associated with 186 // the lower ADLC-assigned number must be stored to the lower address. 187 188 // These definitions specify the actual bit encodings of the sparc 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 190 // wants 0-63, so we have to convert every time we want to use fp regs 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 192 // 255 is a flag meaning "don't go here". 193 // I believe we can't handle callee-save doubles D32 and up until 194 // the place in the sparc stack crawler that asserts on the 255 is 195 // fixed up. 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg()); 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next()); 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg()); 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next()); 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg()); 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next()); 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg()); 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next()); 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg()); 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next()); 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()); 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next()); 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()); 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next()); 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()); 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next()); 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()); 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next()); 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()); 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next()); 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()); 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next()); 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()); 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next()); 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()); 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next()); 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()); 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next()); 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()); 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next()); 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()); 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next()); 228 229 230 // ---------------------------- 231 // Special Registers 232 // Condition Codes Flag Registers 233 // I tried to break out ICC and XCC but it's not very pretty. 234 // Every Sparc instruction which defs/kills one also kills the other. 235 // Hence every compare instruction which defs one kind of flags ends 236 // up needing a kill of the other. 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 238 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad()); 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad()); 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad()); 243 244 // ---------------------------- 245 // Specify the enum values for the registers. These enums are only used by the 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed 247 // for visibility to the rest of the vm. The order of this enum influences the 248 // register allocator so having the freedom to set this order and not be stuck 249 // with the order that is natural for the rest of the vm is worth it. 250 alloc_class chunk0( 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H, 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H, 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H, 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H); 255 256 // Note that a register is not allocatable unless it is also mentioned 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg. 258 259 alloc_class chunk1( 260 // The first registers listed here are those most likely to be used 261 // as temporaries. We move F0..F7 away from the front of the list, 262 // to reduce the likelihood of interferences with parameters and 263 // return values. Likewise, we avoid using F0/F1 for parameters, 264 // since they are used for return values. 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean. 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23, 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31, 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x, 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x, 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x); 274 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3); 276 277 //----------Architecture Description Register Classes-------------------------- 278 // Several register classes are automatically defined based upon information in 279 // this architecture description. 280 // 1) reg_class inline_cache_reg ( as defined in frame section ) 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // G0 is not included in integer class since it has special meaning. 286 reg_class g0_reg(R_G0); 287 288 // ---------------------------- 289 // Integer Register Classes 290 // ---------------------------- 291 // Exclusions from i_reg: 292 // R_G0: hardwired zero 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java) 294 // R_G6: reserved by Solaris ABI to tools 295 // R_G7: reserved by Solaris ABI to libthread 296 // R_O7: Used as a temp in many encodings 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 298 299 // Class for all integer registers, except the G registers. This is used for 300 // encodings which use G registers as temps. The regular inputs to such 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator 302 // will not put an input into a temp register. 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 304 305 reg_class g1_regI(R_G1); 306 reg_class g3_regI(R_G3); 307 reg_class g4_regI(R_G4); 308 reg_class o0_regI(R_O0); 309 reg_class o7_regI(R_O7); 310 311 // ---------------------------- 312 // Pointer Register Classes 313 // ---------------------------- 314 #ifdef _LP64 315 // 64-bit build means 64-bit pointers means hi/lo pairs 316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 320 // Lock encodings use G3 and G4 internally 321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5, 322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 325 // Special class for storeP instructions, which can store SP or RPC to TLS. 326 // It is also used for memory addressing, allowing direct TLS addressing. 327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP, 329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP ); 331 // R_L7 is the lowest-priority callee-save (i.e., NS) register 332 // We use it to save R_G2 across calls out of Java. 333 reg_class l7_regP(R_L7H,R_L7); 334 335 // Other special pointer regs 336 reg_class g1_regP(R_G1H,R_G1); 337 reg_class g2_regP(R_G2H,R_G2); 338 reg_class g3_regP(R_G3H,R_G3); 339 reg_class g4_regP(R_G4H,R_G4); 340 reg_class g5_regP(R_G5H,R_G5); 341 reg_class i0_regP(R_I0H,R_I0); 342 reg_class o0_regP(R_O0H,R_O0); 343 reg_class o1_regP(R_O1H,R_O1); 344 reg_class o2_regP(R_O2H,R_O2); 345 reg_class o7_regP(R_O7H,R_O7); 346 347 #else // _LP64 348 // 32-bit build means 32-bit pointers means 1 register. 349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5, 350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 353 // Lock encodings use G3 and G4 internally 354 reg_class lock_ptr_reg(R_G1, R_G5, 355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 358 // Special class for storeP instructions, which can store SP or RPC to TLS. 359 // It is also used for memory addressing, allowing direct TLS addressing. 360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5, 361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP, 362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP); 364 // R_L7 is the lowest-priority callee-save (i.e., NS) register 365 // We use it to save R_G2 across calls out of Java. 366 reg_class l7_regP(R_L7); 367 368 // Other special pointer regs 369 reg_class g1_regP(R_G1); 370 reg_class g2_regP(R_G2); 371 reg_class g3_regP(R_G3); 372 reg_class g4_regP(R_G4); 373 reg_class g5_regP(R_G5); 374 reg_class i0_regP(R_I0); 375 reg_class o0_regP(R_O0); 376 reg_class o1_regP(R_O1); 377 reg_class o2_regP(R_O2); 378 reg_class o7_regP(R_O7); 379 #endif // _LP64 380 381 382 // ---------------------------- 383 // Long Register Classes 384 // ---------------------------- 385 // Longs in 1 register. Aligned adjacent hi/lo pairs. 386 // Note: O7 is never in this class; it is sometimes used as an encoding temp. 387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 389 #ifdef _LP64 390 // 64-bit, longs in 1 register: use all 64-bit integer registers 391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's. 392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 394 #endif // _LP64 395 ); 396 397 reg_class g1_regL(R_G1H,R_G1); 398 reg_class g3_regL(R_G3H,R_G3); 399 reg_class o2_regL(R_O2H,R_O2); 400 reg_class o7_regL(R_O7H,R_O7); 401 402 // ---------------------------- 403 // Special Class for Condition Code Flags Register 404 reg_class int_flags(CCR); 405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3); 406 reg_class float_flag0(FCC0); 407 408 409 // ---------------------------- 410 // Float Point Register Classes 411 // ---------------------------- 412 // Skip F30/F31, they are reserved for mem-mem copies 413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 414 415 // Paired floating point registers--they show up in the same order as the floats, 416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29, 419 /* Use extra V9 double registers; this AD file does not support V8 */ 420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x 422 ); 423 424 // Paired floating point registers--they show up in the same order as the floats, 425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 426 // This class is usable for mis-aligned loads as happen in I2C adapters. 427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 429 %} 430 431 //----------DEFINITION BLOCK--------------------------------------------------- 432 // Define name --> value mappings to inform the ADLC of an integer valued name 433 // Current support includes integer values in the range [0, 0x7FFFFFFF] 434 // Format: 435 // int_def <name> ( <int_value>, <expression>); 436 // Generated Code in ad_<arch>.hpp 437 // #define <name> (<expression>) 438 // // value == <int_value> 439 // Generated code in ad_<arch>.cpp adlc_verification() 440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 441 // 442 definitions %{ 443 // The default cost (of an ALU instruction). 444 int_def DEFAULT_COST ( 100, 100); 445 int_def HUGE_COST (1000000, 1000000); 446 447 // Memory refs are twice as expensive as run-of-the-mill. 448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); 449 450 // Branches are even more expensive. 451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3); 452 int_def CALL_COST ( 300, DEFAULT_COST * 3); 453 %} 454 455 456 //----------SOURCE BLOCK------------------------------------------------------- 457 // This is a block of C++ code which provides values, functions, and 458 // definitions necessary in the rest of the architecture description 459 source_hpp %{ 460 // Must be visible to the DFA in dfa_sparc.cpp 461 extern bool can_branch_register( Node *bol, Node *cmp ); 462 463 extern bool use_block_zeroing(Node* count); 464 465 // Macros to extract hi & lo halves from a long pair. 466 // G0 is not part of any long pair, so assert on that. 467 // Prevents accidentally using G1 instead of G0. 468 #define LONG_HI_REG(x) (x) 469 #define LONG_LO_REG(x) (x) 470 471 %} 472 473 source %{ 474 #define __ _masm. 475 476 // tertiary op of a LoadP or StoreP encoding 477 #define REGP_OP true 478 479 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding); 480 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding); 481 static Register reg_to_register_object(int register_encoding); 482 483 // Used by the DFA in dfa_sparc.cpp. 484 // Check for being able to use a V9 branch-on-register. Requires a 485 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign- 486 // extended. Doesn't work following an integer ADD, for example, because of 487 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On 488 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and 489 // replace them with zero, which could become sign-extension in a different OS 490 // release. There's no obvious reason why an interrupt will ever fill these 491 // bits with non-zero junk (the registers are reloaded with standard LD 492 // instructions which either zero-fill or sign-fill). 493 bool can_branch_register( Node *bol, Node *cmp ) { 494 if( !BranchOnRegister ) return false; 495 #ifdef _LP64 496 if( cmp->Opcode() == Op_CmpP ) 497 return true; // No problems with pointer compares 498 #endif 499 if( cmp->Opcode() == Op_CmpL ) 500 return true; // No problems with long compares 501 502 if( !SparcV9RegsHiBitsZero ) return false; 503 if( bol->as_Bool()->_test._test != BoolTest::ne && 504 bol->as_Bool()->_test._test != BoolTest::eq ) 505 return false; 506 507 // Check for comparing against a 'safe' value. Any operation which 508 // clears out the high word is safe. Thus, loads and certain shifts 509 // are safe, as are non-negative constants. Any operation which 510 // preserves zero bits in the high word is safe as long as each of its 511 // inputs are safe. Thus, phis and bitwise booleans are safe if their 512 // inputs are safe. At present, the only important case to recognize 513 // seems to be loads. Constants should fold away, and shifts & 514 // logicals can use the 'cc' forms. 515 Node *x = cmp->in(1); 516 if( x->is_Load() ) return true; 517 if( x->is_Phi() ) { 518 for( uint i = 1; i < x->req(); i++ ) 519 if( !x->in(i)->is_Load() ) 520 return false; 521 return true; 522 } 523 return false; 524 } 525 526 bool use_block_zeroing(Node* count) { 527 // Use BIS for zeroing if count is not constant 528 // or it is >= BlockZeroingLowLimit. 529 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit); 530 } 531 532 // **************************************************************************** 533 534 // REQUIRED FUNCTIONALITY 535 536 // !!!!! Special hack to get all type of calls to specify the byte offset 537 // from the start of the call to the point where the return address 538 // will point. 539 // The "return address" is the address of the call instruction, plus 8. 540 541 int MachCallStaticJavaNode::ret_addr_offset() { 542 int offset = NativeCall::instruction_size; // call; delay slot 543 if (_method_handle_invoke) 544 offset += 4; // restore SP 545 return offset; 546 } 547 548 int MachCallDynamicJavaNode::ret_addr_offset() { 549 int vtable_index = this->_vtable_index; 550 if (vtable_index < 0) { 551 // must be invalid_vtable_index, not nonvirtual_vtable_index 552 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 553 return (NativeMovConstReg::instruction_size + 554 NativeCall::instruction_size); // sethi; setlo; call; delay slot 555 } else { 556 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 557 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 558 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 559 int klass_load_size; 560 if (UseCompressedKlassPointers) { 561 assert(Universe::heap() != NULL, "java heap should be initialized"); 562 if (Universe::narrow_klass_base() == NULL) 563 klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass() 564 else 565 klass_load_size = 3*BytesPerInstWord; 566 } else { 567 klass_load_size = 1*BytesPerInstWord; 568 } 569 if (Assembler::is_simm13(v_off)) { 570 return klass_load_size + 571 (2*BytesPerInstWord + // ld_ptr, ld_ptr 572 NativeCall::instruction_size); // call; delay slot 573 } else { 574 return klass_load_size + 575 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr 576 NativeCall::instruction_size); // call; delay slot 577 } 578 } 579 } 580 581 int MachCallRuntimeNode::ret_addr_offset() { 582 #ifdef _LP64 583 if (MacroAssembler::is_far_target(entry_point())) { 584 return NativeFarCall::instruction_size; 585 } else { 586 return NativeCall::instruction_size; 587 } 588 #else 589 return NativeCall::instruction_size; // call; delay slot 590 #endif 591 } 592 593 // Indicate if the safepoint node needs the polling page as an input. 594 // Since Sparc does not have absolute addressing, it does. 595 bool SafePointNode::needs_polling_address_input() { 596 return true; 597 } 598 599 // emit an interrupt that is caught by the debugger (for debugging compiler) 600 void emit_break(CodeBuffer &cbuf) { 601 MacroAssembler _masm(&cbuf); 602 __ breakpoint_trap(); 603 } 604 605 #ifndef PRODUCT 606 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const { 607 st->print("TA"); 608 } 609 #endif 610 611 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 612 emit_break(cbuf); 613 } 614 615 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 616 return MachNode::size(ra_); 617 } 618 619 // Traceable jump 620 void emit_jmpl(CodeBuffer &cbuf, int jump_target) { 621 MacroAssembler _masm(&cbuf); 622 Register rdest = reg_to_register_object(jump_target); 623 __ JMP(rdest, 0); 624 __ delayed()->nop(); 625 } 626 627 // Traceable jump and set exception pc 628 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) { 629 MacroAssembler _masm(&cbuf); 630 Register rdest = reg_to_register_object(jump_target); 631 __ JMP(rdest, 0); 632 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc ); 633 } 634 635 void emit_nop(CodeBuffer &cbuf) { 636 MacroAssembler _masm(&cbuf); 637 __ nop(); 638 } 639 640 void emit_illtrap(CodeBuffer &cbuf) { 641 MacroAssembler _masm(&cbuf); 642 __ illtrap(0); 643 } 644 645 646 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) { 647 assert(n->rule() != loadUB_rule, ""); 648 649 intptr_t offset = 0; 650 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP 651 const Node* addr = n->get_base_and_disp(offset, adr_type); 652 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP"); 653 assert(addr != NULL && addr != (Node*)-1, "invalid addr"); 654 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 655 atype = atype->add_offset(offset); 656 assert(disp32 == offset, "wrong disp32"); 657 return atype->_offset; 658 } 659 660 661 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) { 662 assert(n->rule() != loadUB_rule, ""); 663 664 intptr_t offset = 0; 665 Node* addr = n->in(2); 666 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 667 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) { 668 Node* a = addr->in(2/*AddPNode::Address*/); 669 Node* o = addr->in(3/*AddPNode::Offset*/); 670 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot; 671 atype = a->bottom_type()->is_ptr()->add_offset(offset); 672 assert(atype->isa_oop_ptr(), "still an oop"); 673 } 674 offset = atype->is_ptr()->_offset; 675 if (offset != Type::OffsetBot) offset += disp32; 676 return offset; 677 } 678 679 static inline jdouble replicate_immI(int con, int count, int width) { 680 // Load a constant replicated "count" times with width "width" 681 assert(count*width == 8 && width <= 4, "sanity"); 682 int bit_width = width * 8; 683 jlong val = con; 684 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 685 for (int i = 0; i < count - 1; i++) { 686 val |= (val << bit_width); 687 } 688 jdouble dval = *((jdouble*) &val); // coerce to double type 689 return dval; 690 } 691 692 static inline jdouble replicate_immF(float con) { 693 // Replicate float con 2 times and pack into vector. 694 int val = *((int*)&con); 695 jlong lval = val; 696 lval = (lval << 32) | (lval & 0xFFFFFFFFl); 697 jdouble dval = *((jdouble*) &lval); // coerce to double type 698 return dval; 699 } 700 701 // Standard Sparc opcode form2 field breakdown 702 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 703 f0 &= (1<<19)-1; // Mask displacement to 19 bits 704 int op = (f30 << 30) | 705 (f29 << 29) | 706 (f25 << 25) | 707 (f22 << 22) | 708 (f20 << 20) | 709 (f19 << 19) | 710 (f0 << 0); 711 cbuf.insts()->emit_int32(op); 712 } 713 714 // Standard Sparc opcode form2 field breakdown 715 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) { 716 f0 >>= 10; // Drop 10 bits 717 f0 &= (1<<22)-1; // Mask displacement to 22 bits 718 int op = (f30 << 30) | 719 (f25 << 25) | 720 (f22 << 22) | 721 (f0 << 0); 722 cbuf.insts()->emit_int32(op); 723 } 724 725 // Standard Sparc opcode form3 field breakdown 726 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) { 727 int op = (f30 << 30) | 728 (f25 << 25) | 729 (f19 << 19) | 730 (f14 << 14) | 731 (f5 << 5) | 732 (f0 << 0); 733 cbuf.insts()->emit_int32(op); 734 } 735 736 // Standard Sparc opcode form3 field breakdown 737 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) { 738 simm13 &= (1<<13)-1; // Mask to 13 bits 739 int op = (f30 << 30) | 740 (f25 << 25) | 741 (f19 << 19) | 742 (f14 << 14) | 743 (1 << 13) | // bit to indicate immediate-mode 744 (simm13<<0); 745 cbuf.insts()->emit_int32(op); 746 } 747 748 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 749 simm10 &= (1<<10)-1; // Mask to 10 bits 750 emit3_simm13(cbuf,f30,f25,f19,f14,simm10); 751 } 752 753 #ifdef ASSERT 754 // Helper function for VerifyOops in emit_form3_mem_reg 755 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) { 756 warning("VerifyOops encountered unexpected instruction:"); 757 n->dump(2); 758 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]); 759 } 760 #endif 761 762 763 void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary, 764 int src1_enc, int disp32, int src2_enc, int dst_enc) { 765 766 #ifdef ASSERT 767 // The following code implements the +VerifyOops feature. 768 // It verifies oop values which are loaded into or stored out of 769 // the current method activation. +VerifyOops complements techniques 770 // like ScavengeALot, because it eagerly inspects oops in transit, 771 // as they enter or leave the stack, as opposed to ScavengeALot, 772 // which inspects oops "at rest", in the stack or heap, at safepoints. 773 // For this reason, +VerifyOops can sometimes detect bugs very close 774 // to their point of creation. It can also serve as a cross-check 775 // on the validity of oop maps, when used toegether with ScavengeALot. 776 777 // It would be good to verify oops at other points, especially 778 // when an oop is used as a base pointer for a load or store. 779 // This is presently difficult, because it is hard to know when 780 // a base address is biased or not. (If we had such information, 781 // it would be easy and useful to make a two-argument version of 782 // verify_oop which unbiases the base, and performs verification.) 783 784 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary"); 785 bool is_verified_oop_base = false; 786 bool is_verified_oop_load = false; 787 bool is_verified_oop_store = false; 788 int tmp_enc = -1; 789 if (VerifyOops && src1_enc != R_SP_enc) { 790 // classify the op, mainly for an assert check 791 int st_op = 0, ld_op = 0; 792 switch (primary) { 793 case Assembler::stb_op3: st_op = Op_StoreB; break; 794 case Assembler::sth_op3: st_op = Op_StoreC; break; 795 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0 796 case Assembler::stw_op3: st_op = Op_StoreI; break; 797 case Assembler::std_op3: st_op = Op_StoreL; break; 798 case Assembler::stf_op3: st_op = Op_StoreF; break; 799 case Assembler::stdf_op3: st_op = Op_StoreD; break; 800 801 case Assembler::ldsb_op3: ld_op = Op_LoadB; break; 802 case Assembler::ldub_op3: ld_op = Op_LoadUB; break; 803 case Assembler::lduh_op3: ld_op = Op_LoadUS; break; 804 case Assembler::ldsh_op3: ld_op = Op_LoadS; break; 805 case Assembler::ldx_op3: // may become LoadP or stay LoadI 806 case Assembler::ldsw_op3: // may become LoadP or stay LoadI 807 case Assembler::lduw_op3: ld_op = Op_LoadI; break; 808 case Assembler::ldd_op3: ld_op = Op_LoadL; break; 809 case Assembler::ldf_op3: ld_op = Op_LoadF; break; 810 case Assembler::lddf_op3: ld_op = Op_LoadD; break; 811 case Assembler::prefetch_op3: ld_op = Op_LoadI; break; 812 813 default: ShouldNotReachHere(); 814 } 815 if (tertiary == REGP_OP) { 816 if (st_op == Op_StoreI) st_op = Op_StoreP; 817 else if (ld_op == Op_LoadI) ld_op = Op_LoadP; 818 else ShouldNotReachHere(); 819 if (st_op) { 820 // a store 821 // inputs are (0:control, 1:memory, 2:address, 3:value) 822 Node* n2 = n->in(3); 823 if (n2 != NULL) { 824 const Type* t = n2->bottom_type(); 825 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 826 } 827 } else { 828 // a load 829 const Type* t = n->bottom_type(); 830 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 831 } 832 } 833 834 if (ld_op) { 835 // a Load 836 // inputs are (0:control, 1:memory, 2:address) 837 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases 838 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && 839 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && 840 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && 841 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) && 842 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) && 843 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) && 844 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) && 845 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) && 846 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) && 847 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && 848 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) && 849 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) && 850 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) && 851 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) && 852 !(n->rule() == loadUB_rule)) { 853 verify_oops_warning(n, n->ideal_Opcode(), ld_op); 854 } 855 } else if (st_op) { 856 // a Store 857 // inputs are (0:control, 1:memory, 2:address, 3:value) 858 if (!(n->ideal_Opcode()==st_op) && // Following are special cases 859 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) && 860 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && 861 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && 862 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && 863 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) && 864 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { 865 verify_oops_warning(n, n->ideal_Opcode(), st_op); 866 } 867 } 868 869 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) { 870 Node* addr = n->in(2); 871 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) { 872 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr? 873 if (atype != NULL) { 874 intptr_t offset = get_offset_from_base(n, atype, disp32); 875 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32); 876 if (offset != offset_2) { 877 get_offset_from_base(n, atype, disp32); 878 get_offset_from_base_2(n, atype, disp32); 879 } 880 assert(offset == offset_2, "different offsets"); 881 if (offset == disp32) { 882 // we now know that src1 is a true oop pointer 883 is_verified_oop_base = true; 884 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) { 885 if( primary == Assembler::ldd_op3 ) { 886 is_verified_oop_base = false; // Cannot 'ldd' into O7 887 } else { 888 tmp_enc = dst_enc; 889 dst_enc = R_O7_enc; // Load into O7; preserve source oop 890 assert(src1_enc != dst_enc, ""); 891 } 892 } 893 } 894 if (st_op && (( offset == oopDesc::klass_offset_in_bytes()) 895 || offset == oopDesc::mark_offset_in_bytes())) { 896 // loading the mark should not be allowed either, but 897 // we don't check this since it conflicts with InlineObjectHash 898 // usage of LoadINode to get the mark. We could keep the 899 // check if we create a new LoadMarkNode 900 // but do not verify the object before its header is initialized 901 ShouldNotReachHere(); 902 } 903 } 904 } 905 } 906 } 907 #endif 908 909 uint instr; 910 instr = (Assembler::ldst_op << 30) 911 | (dst_enc << 25) 912 | (primary << 19) 913 | (src1_enc << 14); 914 915 uint index = src2_enc; 916 int disp = disp32; 917 918 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) 919 disp += STACK_BIAS; 920 921 // We should have a compiler bailout here rather than a guarantee. 922 // Better yet would be some mechanism to handle variable-size matches correctly. 923 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" ); 924 925 if( disp == 0 ) { 926 // use reg-reg form 927 // bit 13 is already zero 928 instr |= index; 929 } else { 930 // use reg-imm form 931 instr |= 0x00002000; // set bit 13 to one 932 instr |= disp & 0x1FFF; 933 } 934 935 cbuf.insts()->emit_int32(instr); 936 937 #ifdef ASSERT 938 { 939 MacroAssembler _masm(&cbuf); 940 if (is_verified_oop_base) { 941 __ verify_oop(reg_to_register_object(src1_enc)); 942 } 943 if (is_verified_oop_store) { 944 __ verify_oop(reg_to_register_object(dst_enc)); 945 } 946 if (tmp_enc != -1) { 947 __ mov(O7, reg_to_register_object(tmp_enc)); 948 } 949 if (is_verified_oop_load) { 950 __ verify_oop(reg_to_register_object(dst_enc)); 951 } 952 } 953 #endif 954 } 955 956 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) { 957 // The method which records debug information at every safepoint 958 // expects the call to be the first instruction in the snippet as 959 // it creates a PcDesc structure which tracks the offset of a call 960 // from the start of the codeBlob. This offset is computed as 961 // code_end() - code_begin() of the code which has been emitted 962 // so far. 963 // In this particular case we have skirted around the problem by 964 // putting the "mov" instruction in the delay slot but the problem 965 // may bite us again at some other point and a cleaner/generic 966 // solution using relocations would be needed. 967 MacroAssembler _masm(&cbuf); 968 __ set_inst_mark(); 969 970 // We flush the current window just so that there is a valid stack copy 971 // the fact that the current window becomes active again instantly is 972 // not a problem there is nothing live in it. 973 974 #ifdef ASSERT 975 int startpos = __ offset(); 976 #endif /* ASSERT */ 977 978 __ call((address)entry_point, rtype); 979 980 if (preserve_g2) __ delayed()->mov(G2, L7); 981 else __ delayed()->nop(); 982 983 if (preserve_g2) __ mov(L7, G2); 984 985 #ifdef ASSERT 986 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { 987 #ifdef _LP64 988 // Trash argument dump slots. 989 __ set(0xb0b8ac0db0b8ac0d, G1); 990 __ mov(G1, G5); 991 __ stx(G1, SP, STACK_BIAS + 0x80); 992 __ stx(G1, SP, STACK_BIAS + 0x88); 993 __ stx(G1, SP, STACK_BIAS + 0x90); 994 __ stx(G1, SP, STACK_BIAS + 0x98); 995 __ stx(G1, SP, STACK_BIAS + 0xA0); 996 __ stx(G1, SP, STACK_BIAS + 0xA8); 997 #else // _LP64 998 // this is also a native call, so smash the first 7 stack locations, 999 // and the various registers 1000 1001 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset], 1002 // while [SP+0x44..0x58] are the argument dump slots. 1003 __ set((intptr_t)0xbaadf00d, G1); 1004 __ mov(G1, G5); 1005 __ sllx(G1, 32, G1); 1006 __ or3(G1, G5, G1); 1007 __ mov(G1, G5); 1008 __ stx(G1, SP, 0x40); 1009 __ stx(G1, SP, 0x48); 1010 __ stx(G1, SP, 0x50); 1011 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot 1012 #endif // _LP64 1013 } 1014 #endif /*ASSERT*/ 1015 } 1016 1017 //============================================================================= 1018 // REQUIRED FUNCTIONALITY for encoding 1019 void emit_lo(CodeBuffer &cbuf, int val) { } 1020 void emit_hi(CodeBuffer &cbuf, int val) { } 1021 1022 1023 //============================================================================= 1024 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask(); 1025 1026 int Compile::ConstantTable::calculate_table_base_offset() const { 1027 if (UseRDPCForConstantTableBase) { 1028 // The table base offset might be less but then it fits into 1029 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit). 1030 return Assembler::min_simm13(); 1031 } else { 1032 int offset = -(size() / 2); 1033 if (!Assembler::is_simm13(offset)) { 1034 offset = Assembler::min_simm13(); 1035 } 1036 return offset; 1037 } 1038 } 1039 1040 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1041 Compile* C = ra_->C; 1042 Compile::ConstantTable& constant_table = C->constant_table(); 1043 MacroAssembler _masm(&cbuf); 1044 1045 Register r = as_Register(ra_->get_encode(this)); 1046 CodeSection* consts_section = __ code()->consts(); 1047 int consts_size = consts_section->align_at_start(consts_section->size()); 1048 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size)); 1049 1050 if (UseRDPCForConstantTableBase) { 1051 // For the following RDPC logic to work correctly the consts 1052 // section must be allocated right before the insts section. This 1053 // assert checks for that. The layout and the SECT_* constants 1054 // are defined in src/share/vm/asm/codeBuffer.hpp. 1055 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 1056 int insts_offset = __ offset(); 1057 1058 // Layout: 1059 // 1060 // |----------- consts section ------------|----------- insts section -----------... 1061 // |------ constant table -----|- padding -|------------------x---- 1062 // \ current PC (RDPC instruction) 1063 // |<------------- consts_size ----------->|<- insts_offset ->| 1064 // \ table base 1065 // The table base offset is later added to the load displacement 1066 // so it has to be negative. 1067 int table_base_offset = -(consts_size + insts_offset); 1068 int disp; 1069 1070 // If the displacement from the current PC to the constant table 1071 // base fits into simm13 we set the constant table base to the 1072 // current PC. 1073 if (Assembler::is_simm13(table_base_offset)) { 1074 constant_table.set_table_base_offset(table_base_offset); 1075 disp = 0; 1076 } else { 1077 // Otherwise we set the constant table base offset to the 1078 // maximum negative displacement of load instructions to keep 1079 // the disp as small as possible: 1080 // 1081 // |<------------- consts_size ----------->|<- insts_offset ->| 1082 // |<--------- min_simm13 --------->|<-------- disp --------->| 1083 // \ table base 1084 table_base_offset = Assembler::min_simm13(); 1085 constant_table.set_table_base_offset(table_base_offset); 1086 disp = (consts_size + insts_offset) + table_base_offset; 1087 } 1088 1089 __ rdpc(r); 1090 1091 if (disp != 0) { 1092 assert(r != O7, "need temporary"); 1093 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 1094 } 1095 } 1096 else { 1097 // Materialize the constant table base. 1098 address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); 1099 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 1100 AddressLiteral base(baseaddr, rspec); 1101 __ set(base, r); 1102 } 1103 } 1104 1105 uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 1106 if (UseRDPCForConstantTableBase) { 1107 // This is really the worst case but generally it's only 1 instruction. 1108 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord; 1109 } else { 1110 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord; 1111 } 1112 } 1113 1114 #ifndef PRODUCT 1115 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1116 char reg[128]; 1117 ra_->dump_register(this, reg); 1118 if (UseRDPCForConstantTableBase) { 1119 st->print("RDPC %s\t! constant table base", reg); 1120 } else { 1121 st->print("SET &constanttable,%s\t! constant table base", reg); 1122 } 1123 } 1124 #endif 1125 1126 1127 //============================================================================= 1128 1129 #ifndef PRODUCT 1130 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1131 Compile* C = ra_->C; 1132 1133 for (int i = 0; i < OptoPrologueNops; i++) { 1134 st->print_cr("NOP"); st->print("\t"); 1135 } 1136 1137 if( VerifyThread ) { 1138 st->print_cr("Verify_Thread"); st->print("\t"); 1139 } 1140 1141 size_t framesize = C->frame_slots() << LogBytesPerInt; 1142 1143 // Calls to C2R adapters often do not accept exceptional returns. 1144 // We require that their callers must bang for them. But be careful, because 1145 // some VM calls (such as call site linkage) can use several kilobytes of 1146 // stack. But the stack safety zone should account for that. 1147 // See bugs 4446381, 4468289, 4497237. 1148 if (C->need_stack_bang(framesize)) { 1149 st->print_cr("! stack bang"); st->print("\t"); 1150 } 1151 1152 if (Assembler::is_simm13(-framesize)) { 1153 st->print ("SAVE R_SP,-%d,R_SP",framesize); 1154 } else { 1155 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t"); 1156 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t"); 1157 st->print ("SAVE R_SP,R_G3,R_SP"); 1158 } 1159 1160 } 1161 #endif 1162 1163 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1164 Compile* C = ra_->C; 1165 MacroAssembler _masm(&cbuf); 1166 1167 for (int i = 0; i < OptoPrologueNops; i++) { 1168 __ nop(); 1169 } 1170 1171 __ verify_thread(); 1172 1173 size_t framesize = C->frame_slots() << LogBytesPerInt; 1174 assert(framesize >= 16*wordSize, "must have room for reg. save area"); 1175 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1176 1177 // Calls to C2R adapters often do not accept exceptional returns. 1178 // We require that their callers must bang for them. But be careful, because 1179 // some VM calls (such as call site linkage) can use several kilobytes of 1180 // stack. But the stack safety zone should account for that. 1181 // See bugs 4446381, 4468289, 4497237. 1182 if (C->need_stack_bang(framesize)) { 1183 __ generate_stack_overflow_check(framesize); 1184 } 1185 1186 if (Assembler::is_simm13(-framesize)) { 1187 __ save(SP, -framesize, SP); 1188 } else { 1189 __ sethi(-framesize & ~0x3ff, G3); 1190 __ add(G3, -framesize & 0x3ff, G3); 1191 __ save(SP, G3, SP); 1192 } 1193 C->set_frame_complete( __ offset() ); 1194 1195 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) { 1196 // NOTE: We set the table base offset here because users might be 1197 // emitted before MachConstantBaseNode. 1198 Compile::ConstantTable& constant_table = C->constant_table(); 1199 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 1200 } 1201 } 1202 1203 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1204 return MachNode::size(ra_); 1205 } 1206 1207 int MachPrologNode::reloc() const { 1208 return 10; // a large enough number 1209 } 1210 1211 //============================================================================= 1212 #ifndef PRODUCT 1213 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1214 Compile* C = ra_->C; 1215 1216 if( do_polling() && ra_->C->is_method_compilation() ) { 1217 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); 1218 #ifdef _LP64 1219 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); 1220 #else 1221 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t"); 1222 #endif 1223 } 1224 1225 if( do_polling() ) 1226 st->print("RET\n\t"); 1227 1228 st->print("RESTORE"); 1229 } 1230 #endif 1231 1232 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1233 MacroAssembler _masm(&cbuf); 1234 Compile* C = ra_->C; 1235 1236 __ verify_thread(); 1237 1238 // If this does safepoint polling, then do it here 1239 if( do_polling() && ra_->C->is_method_compilation() ) { 1240 AddressLiteral polling_page(os::get_polling_page()); 1241 __ sethi(polling_page, L0); 1242 __ relocate(relocInfo::poll_return_type); 1243 __ ld_ptr( L0, 0, G0 ); 1244 } 1245 1246 // If this is a return, then stuff the restore in the delay slot 1247 if( do_polling() ) { 1248 __ ret(); 1249 __ delayed()->restore(); 1250 } else { 1251 __ restore(); 1252 } 1253 } 1254 1255 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1256 return MachNode::size(ra_); 1257 } 1258 1259 int MachEpilogNode::reloc() const { 1260 return 16; // a large enough number 1261 } 1262 1263 const Pipeline * MachEpilogNode::pipeline() const { 1264 return MachNode::pipeline_class(); 1265 } 1266 1267 int MachEpilogNode::safepoint_offset() const { 1268 assert( do_polling(), "no return for this epilog node"); 1269 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; 1270 } 1271 1272 //============================================================================= 1273 1274 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack 1275 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1276 static enum RC rc_class( OptoReg::Name reg ) { 1277 if( !OptoReg::is_valid(reg) ) return rc_bad; 1278 if (OptoReg::is_stack(reg)) return rc_stack; 1279 VMReg r = OptoReg::as_VMReg(reg); 1280 if (r->is_Register()) return rc_int; 1281 assert(r->is_FloatRegister(), "must be"); 1282 return rc_float; 1283 } 1284 1285 static int impl_helper( const MachNode *mach, CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) { 1286 if( cbuf ) { 1287 // Better yet would be some mechanism to handle variable-size matches correctly 1288 if (!Assembler::is_simm13(offset + STACK_BIAS)) { 1289 ra_->C->record_method_not_compilable("unable to handle large constant offsets"); 1290 } else { 1291 emit_form3_mem_reg(*cbuf, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]); 1292 } 1293 } 1294 #ifndef PRODUCT 1295 else if( !do_size ) { 1296 if( size != 0 ) st->print("\n\t"); 1297 if( is_load ) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg)); 1298 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset); 1299 } 1300 #endif 1301 return size+4; 1302 } 1303 1304 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) { 1305 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] ); 1306 #ifndef PRODUCT 1307 else if( !do_size ) { 1308 if( size != 0 ) st->print("\n\t"); 1309 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst)); 1310 } 1311 #endif 1312 return size+4; 1313 } 1314 1315 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, 1316 PhaseRegAlloc *ra_, 1317 bool do_size, 1318 outputStream* st ) const { 1319 // Get registers to move 1320 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1321 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1322 OptoReg::Name dst_second = ra_->get_reg_second(this ); 1323 OptoReg::Name dst_first = ra_->get_reg_first(this ); 1324 1325 enum RC src_second_rc = rc_class(src_second); 1326 enum RC src_first_rc = rc_class(src_first); 1327 enum RC dst_second_rc = rc_class(dst_second); 1328 enum RC dst_first_rc = rc_class(dst_first); 1329 1330 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" ); 1331 1332 // Generate spill code! 1333 int size = 0; 1334 1335 if( src_first == dst_first && src_second == dst_second ) 1336 return size; // Self copy, no move 1337 1338 // -------------------------------------- 1339 // Check for mem-mem move. Load into unused float registers and fall into 1340 // the float-store case. 1341 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { 1342 int offset = ra_->reg2offset(src_first); 1343 // Further check for aligned-adjacent pair, so we can use a double load 1344 if( (src_first&1)==0 && src_first+1 == src_second ) { 1345 src_second = OptoReg::Name(R_F31_num); 1346 src_second_rc = rc_float; 1347 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st); 1348 } else { 1349 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st); 1350 } 1351 src_first = OptoReg::Name(R_F30_num); 1352 src_first_rc = rc_float; 1353 } 1354 1355 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { 1356 int offset = ra_->reg2offset(src_second); 1357 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st); 1358 src_second = OptoReg::Name(R_F31_num); 1359 src_second_rc = rc_float; 1360 } 1361 1362 // -------------------------------------- 1363 // Check for float->int copy; requires a trip through memory 1364 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) { 1365 int offset = frame::register_save_words*wordSize; 1366 if (cbuf) { 1367 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 ); 1368 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1369 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1370 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 ); 1371 } 1372 #ifndef PRODUCT 1373 else if (!do_size) { 1374 if (size != 0) st->print("\n\t"); 1375 st->print( "SUB R_SP,16,R_SP\n"); 1376 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1377 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1378 st->print("\tADD R_SP,16,R_SP\n"); 1379 } 1380 #endif 1381 size += 16; 1382 } 1383 1384 // Check for float->int copy on T4 1385 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) { 1386 // Further check for aligned-adjacent pair, so we can use a double move 1387 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1388 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st); 1389 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st); 1390 } 1391 // Check for int->float copy on T4 1392 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) { 1393 // Further check for aligned-adjacent pair, so we can use a double move 1394 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1395 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st); 1396 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st); 1397 } 1398 1399 // -------------------------------------- 1400 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. 1401 // In such cases, I have to do the big-endian swap. For aligned targets, the 1402 // hardware does the flop for me. Doubles are always aligned, so no problem 1403 // there. Misaligned sources only come from native-long-returns (handled 1404 // special below). 1405 #ifndef _LP64 1406 if( src_first_rc == rc_int && // source is already big-endian 1407 src_second_rc != rc_bad && // 64-bit move 1408 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst 1409 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" ); 1410 // Do the big-endian flop. 1411 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ; 1412 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc; 1413 } 1414 #endif 1415 1416 // -------------------------------------- 1417 // Check for integer reg-reg copy 1418 if( src_first_rc == rc_int && dst_first_rc == rc_int ) { 1419 #ifndef _LP64 1420 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case 1421 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1422 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1423 // operand contains the least significant word of the 64-bit value and vice versa. 1424 OptoReg::Name tmp = OptoReg::Name(R_O7_num); 1425 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" ); 1426 // Shift O0 left in-place, zero-extend O1, then OR them into the dst 1427 if( cbuf ) { 1428 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 ); 1429 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 ); 1430 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] ); 1431 #ifndef PRODUCT 1432 } else if( !do_size ) { 1433 if( size != 0 ) st->print("\n\t"); 1434 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp)); 1435 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second)); 1436 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first)); 1437 #endif 1438 } 1439 return size+12; 1440 } 1441 else if( dst_first == R_I0_num && dst_second == R_I1_num ) { 1442 // returning a long value in I0/I1 1443 // a SpillCopy must be able to target a return instruction's reg_class 1444 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1445 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1446 // operand contains the least significant word of the 64-bit value and vice versa. 1447 OptoReg::Name tdest = dst_first; 1448 1449 if (src_first == dst_first) { 1450 tdest = OptoReg::Name(R_O7_num); 1451 size += 4; 1452 } 1453 1454 if( cbuf ) { 1455 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg"); 1456 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1 1457 // ShrL_reg_imm6 1458 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 ); 1459 // ShrR_reg_imm6 src, 0, dst 1460 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 ); 1461 if (tdest != dst_first) { 1462 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] ); 1463 } 1464 } 1465 #ifndef PRODUCT 1466 else if( !do_size ) { 1467 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!! 1468 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest)); 1469 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second)); 1470 if (tdest != dst_first) { 1471 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first)); 1472 } 1473 } 1474 #endif // PRODUCT 1475 return size+8; 1476 } 1477 #endif // !_LP64 1478 // Else normal reg-reg copy 1479 assert( src_second != dst_first, "smashed second before evacuating it" ); 1480 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st); 1481 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" ); 1482 // This moves an aligned adjacent pair. 1483 // See if we are done. 1484 if( src_first+1 == src_second && dst_first+1 == dst_second ) 1485 return size; 1486 } 1487 1488 // Check for integer store 1489 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) { 1490 int offset = ra_->reg2offset(dst_first); 1491 // Further check for aligned-adjacent pair, so we can use a double store 1492 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1493 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st); 1494 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st); 1495 } 1496 1497 // Check for integer load 1498 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) { 1499 int offset = ra_->reg2offset(src_first); 1500 // Further check for aligned-adjacent pair, so we can use a double load 1501 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1502 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st); 1503 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1504 } 1505 1506 // Check for float reg-reg copy 1507 if( src_first_rc == rc_float && dst_first_rc == rc_float ) { 1508 // Further check for aligned-adjacent pair, so we can use a double move 1509 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1510 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st); 1511 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st); 1512 } 1513 1514 // Check for float store 1515 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) { 1516 int offset = ra_->reg2offset(dst_first); 1517 // Further check for aligned-adjacent pair, so we can use a double store 1518 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1519 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st); 1520 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1521 } 1522 1523 // Check for float load 1524 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) { 1525 int offset = ra_->reg2offset(src_first); 1526 // Further check for aligned-adjacent pair, so we can use a double load 1527 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1528 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st); 1529 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st); 1530 } 1531 1532 // -------------------------------------------------------------------- 1533 // Check for hi bits still needing moving. Only happens for misaligned 1534 // arguments to native calls. 1535 if( src_second == dst_second ) 1536 return size; // Self copy; no move 1537 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" ); 1538 1539 #ifndef _LP64 1540 // In the LP64 build, all registers can be moved as aligned/adjacent 1541 // pairs, so there's never any need to move the high bits separately. 1542 // The 32-bit builds have to deal with the 32-bit ABI which can force 1543 // all sorts of silly alignment problems. 1544 1545 // Check for integer reg-reg copy. Hi bits are stuck up in the top 1546 // 32-bits of a 64-bit register, but are needed in low bits of another 1547 // register (else it's a hi-bits-to-hi-bits copy which should have 1548 // happened already as part of a 64-bit move) 1549 if( src_second_rc == rc_int && dst_second_rc == rc_int ) { 1550 assert( (src_second&1)==1, "its the evil O0/O1 native return case" ); 1551 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" ); 1552 // Shift src_second down to dst_second's low bits. 1553 if( cbuf ) { 1554 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1555 #ifndef PRODUCT 1556 } else if( !do_size ) { 1557 if( size != 0 ) st->print("\n\t"); 1558 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second)); 1559 #endif 1560 } 1561 return size+4; 1562 } 1563 1564 // Check for high word integer store. Must down-shift the hi bits 1565 // into a temp register, then fall into the case of storing int bits. 1566 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) { 1567 // Shift src_second down to dst_second's low bits. 1568 if( cbuf ) { 1569 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1570 #ifndef PRODUCT 1571 } else if( !do_size ) { 1572 if( size != 0 ) st->print("\n\t"); 1573 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num)); 1574 #endif 1575 } 1576 size+=4; 1577 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num! 1578 } 1579 1580 // Check for high word integer load 1581 if( dst_second_rc == rc_int && src_second_rc == rc_stack ) 1582 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st); 1583 1584 // Check for high word integer store 1585 if( src_second_rc == rc_int && dst_second_rc == rc_stack ) 1586 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st); 1587 1588 // Check for high word float store 1589 if( src_second_rc == rc_float && dst_second_rc == rc_stack ) 1590 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st); 1591 1592 #endif // !_LP64 1593 1594 Unimplemented(); 1595 } 1596 1597 #ifndef PRODUCT 1598 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1599 implementation( NULL, ra_, false, st ); 1600 } 1601 #endif 1602 1603 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1604 implementation( &cbuf, ra_, false, NULL ); 1605 } 1606 1607 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1608 return implementation( NULL, ra_, true, NULL ); 1609 } 1610 1611 //============================================================================= 1612 #ifndef PRODUCT 1613 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const { 1614 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); 1615 } 1616 #endif 1617 1618 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const { 1619 MacroAssembler _masm(&cbuf); 1620 for(int i = 0; i < _count; i += 1) { 1621 __ nop(); 1622 } 1623 } 1624 1625 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 1626 return 4 * _count; 1627 } 1628 1629 1630 //============================================================================= 1631 #ifndef PRODUCT 1632 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1633 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1634 int reg = ra_->get_reg_first(this); 1635 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]); 1636 } 1637 #endif 1638 1639 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1640 MacroAssembler _masm(&cbuf); 1641 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS; 1642 int reg = ra_->get_encode(this); 1643 1644 if (Assembler::is_simm13(offset)) { 1645 __ add(SP, offset, reg_to_register_object(reg)); 1646 } else { 1647 __ set(offset, O7); 1648 __ add(SP, O7, reg_to_register_object(reg)); 1649 } 1650 } 1651 1652 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 1653 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_) 1654 assert(ra_ == ra_->C->regalloc(), "sanity"); 1655 return ra_->C->scratch_emit_size(this); 1656 } 1657 1658 //============================================================================= 1659 #ifndef PRODUCT 1660 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1661 st->print_cr("\nUEP:"); 1662 #ifdef _LP64 1663 if (UseCompressedKlassPointers) { 1664 assert(Universe::heap() != NULL, "java heap should be initialized"); 1665 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 1666 st->print_cr("\tSLL R_G5,3,R_G5"); 1667 if (Universe::narrow_klass_base() != NULL) 1668 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 1669 } else { 1670 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1671 } 1672 st->print_cr("\tCMP R_G5,R_G3" ); 1673 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1674 #else // _LP64 1675 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1676 st->print_cr("\tCMP R_G5,R_G3" ); 1677 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1678 #endif // _LP64 1679 } 1680 #endif 1681 1682 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1683 MacroAssembler _masm(&cbuf); 1684 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 1685 Register temp_reg = G3; 1686 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 1687 1688 // Load klass from receiver 1689 __ load_klass(O0, temp_reg); 1690 // Compare against expected klass 1691 __ cmp(temp_reg, G5_ic_reg); 1692 // Branch to miss code, checks xcc or icc depending 1693 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2); 1694 } 1695 1696 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 1697 return MachNode::size(ra_); 1698 } 1699 1700 1701 //============================================================================= 1702 1703 uint size_exception_handler() { 1704 if (TraceJumps) { 1705 return (400); // just a guess 1706 } 1707 return ( NativeJump::instruction_size ); // sethi;jmp;nop 1708 } 1709 1710 uint size_deopt_handler() { 1711 if (TraceJumps) { 1712 return (400); // just a guess 1713 } 1714 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore 1715 } 1716 1717 // Emit exception handler code. 1718 int emit_exception_handler(CodeBuffer& cbuf) { 1719 Register temp_reg = G3; 1720 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 1721 MacroAssembler _masm(&cbuf); 1722 1723 address base = 1724 __ start_a_stub(size_exception_handler()); 1725 if (base == NULL) return 0; // CodeBuffer::expand failed 1726 1727 int offset = __ offset(); 1728 1729 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp 1730 __ delayed()->nop(); 1731 1732 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1733 1734 __ end_a_stub(); 1735 1736 return offset; 1737 } 1738 1739 int emit_deopt_handler(CodeBuffer& cbuf) { 1740 // Can't use any of the current frame's registers as we may have deopted 1741 // at a poll and everything (including G3) can be live. 1742 Register temp_reg = L0; 1743 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 1744 MacroAssembler _masm(&cbuf); 1745 1746 address base = 1747 __ start_a_stub(size_deopt_handler()); 1748 if (base == NULL) return 0; // CodeBuffer::expand failed 1749 1750 int offset = __ offset(); 1751 __ save_frame(0); 1752 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp 1753 __ delayed()->restore(); 1754 1755 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1756 1757 __ end_a_stub(); 1758 return offset; 1759 1760 } 1761 1762 // Given a register encoding, produce a Integer Register object 1763 static Register reg_to_register_object(int register_encoding) { 1764 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding"); 1765 return as_Register(register_encoding); 1766 } 1767 1768 // Given a register encoding, produce a single-precision Float Register object 1769 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) { 1770 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding"); 1771 return as_SingleFloatRegister(register_encoding); 1772 } 1773 1774 // Given a register encoding, produce a double-precision Float Register object 1775 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) { 1776 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding"); 1777 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding"); 1778 return as_DoubleFloatRegister(register_encoding); 1779 } 1780 1781 const bool Matcher::match_rule_supported(int opcode) { 1782 if (!has_match_rule(opcode)) 1783 return false; 1784 1785 switch (opcode) { 1786 case Op_CountLeadingZerosI: 1787 case Op_CountLeadingZerosL: 1788 case Op_CountTrailingZerosI: 1789 case Op_CountTrailingZerosL: 1790 case Op_PopCountI: 1791 case Op_PopCountL: 1792 if (!UsePopCountInstruction) 1793 return false; 1794 case Op_CompareAndSwapL: 1795 #ifdef _LP64 1796 case Op_CompareAndSwapP: 1797 #endif 1798 if (!VM_Version::supports_cx8()) 1799 return false; 1800 break; 1801 } 1802 1803 return true; // Per default match rules are supported. 1804 } 1805 1806 int Matcher::regnum_to_fpu_offset(int regnum) { 1807 return regnum - 32; // The FP registers are in the second chunk 1808 } 1809 1810 #ifdef ASSERT 1811 address last_rethrow = NULL; // debugging aid for Rethrow encoding 1812 #endif 1813 1814 // Vector width in bytes 1815 const int Matcher::vector_width_in_bytes(BasicType bt) { 1816 assert(MaxVectorSize == 8, ""); 1817 return 8; 1818 } 1819 1820 // Vector ideal reg 1821 const int Matcher::vector_ideal_reg(int size) { 1822 assert(MaxVectorSize == 8, ""); 1823 return Op_RegD; 1824 } 1825 1826 const int Matcher::vector_shift_count_ideal_reg(int size) { 1827 fatal("vector shift is not supported"); 1828 return Node::NotAMachineReg; 1829 } 1830 1831 // Limits on vector size (number of elements) loaded into vector. 1832 const int Matcher::max_vector_size(const BasicType bt) { 1833 assert(is_java_primitive(bt), "only primitive type vectors"); 1834 return vector_width_in_bytes(bt)/type2aelembytes(bt); 1835 } 1836 1837 const int Matcher::min_vector_size(const BasicType bt) { 1838 return max_vector_size(bt); // Same as max. 1839 } 1840 1841 // SPARC doesn't support misaligned vectors store/load. 1842 const bool Matcher::misaligned_vectors_ok() { 1843 return false; 1844 } 1845 1846 // USII supports fxtof through the whole range of number, USIII doesn't 1847 const bool Matcher::convL2FSupported(void) { 1848 return VM_Version::has_fast_fxtof(); 1849 } 1850 1851 // Is this branch offset short enough that a short branch can be used? 1852 // 1853 // NOTE: If the platform does not provide any short branch variants, then 1854 // this method should return false for offset 0. 1855 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1856 // The passed offset is relative to address of the branch. 1857 // Don't need to adjust the offset. 1858 return UseCBCond && Assembler::is_simm12(offset); 1859 } 1860 1861 const bool Matcher::isSimpleConstant64(jlong value) { 1862 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1863 // Depends on optimizations in MacroAssembler::setx. 1864 int hi = (int)(value >> 32); 1865 int lo = (int)(value & ~0); 1866 return (hi == 0) || (hi == -1) || (lo == 0); 1867 } 1868 1869 // No scaling for the parameter the ClearArray node. 1870 const bool Matcher::init_array_count_is_in_bytes = true; 1871 1872 // Threshold size for cleararray. 1873 const int Matcher::init_array_short_size = 8 * BytesPerLong; 1874 1875 // No additional cost for CMOVL. 1876 const int Matcher::long_cmove_cost() { return 0; } 1877 1878 // CMOVF/CMOVD are expensive on T4 and on SPARC64. 1879 const int Matcher::float_cmove_cost() { 1880 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0; 1881 } 1882 1883 // Should the Matcher clone shifts on addressing modes, expecting them to 1884 // be subsumed into complex addressing expressions or compute them into 1885 // registers? True for Intel but false for most RISCs 1886 const bool Matcher::clone_shift_expressions = false; 1887 1888 // Do we need to mask the count passed to shift instructions or does 1889 // the cpu only look at the lower 5/6 bits anyway? 1890 const bool Matcher::need_masked_shift_count = false; 1891 1892 bool Matcher::narrow_oop_use_complex_address() { 1893 NOT_LP64(ShouldNotCallThis()); 1894 assert(UseCompressedOops, "only for compressed oops code"); 1895 return false; 1896 } 1897 1898 bool Matcher::narrow_klass_use_complex_address() { 1899 NOT_LP64(ShouldNotCallThis()); 1900 assert(UseCompressedKlassPointers, "only for compressed klass code"); 1901 return false; 1902 } 1903 1904 // Is it better to copy float constants, or load them directly from memory? 1905 // Intel can load a float constant from a direct address, requiring no 1906 // extra registers. Most RISCs will have to materialize an address into a 1907 // register first, so they would do better to copy the constant from stack. 1908 const bool Matcher::rematerialize_float_constants = false; 1909 1910 // If CPU can load and store mis-aligned doubles directly then no fixup is 1911 // needed. Else we split the double into 2 integer pieces and move it 1912 // piece-by-piece. Only happens when passing doubles into C code as the 1913 // Java calling convention forces doubles to be aligned. 1914 #ifdef _LP64 1915 const bool Matcher::misaligned_doubles_ok = true; 1916 #else 1917 const bool Matcher::misaligned_doubles_ok = false; 1918 #endif 1919 1920 // No-op on SPARC. 1921 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 1922 } 1923 1924 // Advertise here if the CPU requires explicit rounding operations 1925 // to implement the UseStrictFP mode. 1926 const bool Matcher::strict_fp_requires_explicit_rounding = false; 1927 1928 // Are floats conerted to double when stored to stack during deoptimization? 1929 // Sparc does not handle callee-save floats. 1930 bool Matcher::float_in_double() { return false; } 1931 1932 // Do ints take an entire long register or just half? 1933 // Note that we if-def off of _LP64. 1934 // The relevant question is how the int is callee-saved. In _LP64 1935 // the whole long is written but de-opt'ing will have to extract 1936 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written. 1937 #ifdef _LP64 1938 const bool Matcher::int_in_long = true; 1939 #else 1940 const bool Matcher::int_in_long = false; 1941 #endif 1942 1943 // Return whether or not this register is ever used as an argument. This 1944 // function is used on startup to build the trampoline stubs in generateOptoStub. 1945 // Registers not mentioned will be killed by the VM call in the trampoline, and 1946 // arguments in those registers not be available to the callee. 1947 bool Matcher::can_be_java_arg( int reg ) { 1948 // Standard sparc 6 args in registers 1949 if( reg == R_I0_num || 1950 reg == R_I1_num || 1951 reg == R_I2_num || 1952 reg == R_I3_num || 1953 reg == R_I4_num || 1954 reg == R_I5_num ) return true; 1955 #ifdef _LP64 1956 // 64-bit builds can pass 64-bit pointers and longs in 1957 // the high I registers 1958 if( reg == R_I0H_num || 1959 reg == R_I1H_num || 1960 reg == R_I2H_num || 1961 reg == R_I3H_num || 1962 reg == R_I4H_num || 1963 reg == R_I5H_num ) return true; 1964 1965 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) { 1966 return true; 1967 } 1968 1969 #else 1970 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4. 1971 // Longs cannot be passed in O regs, because O regs become I regs 1972 // after a 'save' and I regs get their high bits chopped off on 1973 // interrupt. 1974 if( reg == R_G1H_num || reg == R_G1_num ) return true; 1975 if( reg == R_G4H_num || reg == R_G4_num ) return true; 1976 #endif 1977 // A few float args in registers 1978 if( reg >= R_F0_num && reg <= R_F7_num ) return true; 1979 1980 return false; 1981 } 1982 1983 bool Matcher::is_spillable_arg( int reg ) { 1984 return can_be_java_arg(reg); 1985 } 1986 1987 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 1988 // Use hardware SDIVX instruction when it is 1989 // faster than a code which use multiply. 1990 return VM_Version::has_fast_idiv(); 1991 } 1992 1993 // Register for DIVI projection of divmodI 1994 RegMask Matcher::divI_proj_mask() { 1995 ShouldNotReachHere(); 1996 return RegMask(); 1997 } 1998 1999 // Register for MODI projection of divmodI 2000 RegMask Matcher::modI_proj_mask() { 2001 ShouldNotReachHere(); 2002 return RegMask(); 2003 } 2004 2005 // Register for DIVL projection of divmodL 2006 RegMask Matcher::divL_proj_mask() { 2007 ShouldNotReachHere(); 2008 return RegMask(); 2009 } 2010 2011 // Register for MODL projection of divmodL 2012 RegMask Matcher::modL_proj_mask() { 2013 ShouldNotReachHere(); 2014 return RegMask(); 2015 } 2016 2017 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2018 return L7_REGP_mask(); 2019 } 2020 2021 %} 2022 2023 2024 // The intptr_t operand types, defined by textual substitution. 2025 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) 2026 #ifdef _LP64 2027 #define immX immL 2028 #define immX13 immL13 2029 #define immX13m7 immL13m7 2030 #define iRegX iRegL 2031 #define g1RegX g1RegL 2032 #else 2033 #define immX immI 2034 #define immX13 immI13 2035 #define immX13m7 immI13m7 2036 #define iRegX iRegI 2037 #define g1RegX g1RegI 2038 #endif 2039 2040 //----------ENCODING BLOCK----------------------------------------------------- 2041 // This block specifies the encoding classes used by the compiler to output 2042 // byte streams. Encoding classes are parameterized macros used by 2043 // Machine Instruction Nodes in order to generate the bit encoding of the 2044 // instruction. Operands specify their base encoding interface with the 2045 // interface keyword. There are currently supported four interfaces, 2046 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 2047 // operand to generate a function which returns its register number when 2048 // queried. CONST_INTER causes an operand to generate a function which 2049 // returns the value of the constant when queried. MEMORY_INTER causes an 2050 // operand to generate four functions which return the Base Register, the 2051 // Index Register, the Scale Value, and the Offset Value of the operand when 2052 // queried. COND_INTER causes an operand to generate six functions which 2053 // return the encoding code (ie - encoding bits for the instruction) 2054 // associated with each basic boolean condition for a conditional instruction. 2055 // 2056 // Instructions specify two basic values for encoding. Again, a function 2057 // is available to check if the constant displacement is an oop. They use the 2058 // ins_encode keyword to specify their encoding classes (which must be 2059 // a sequence of enc_class names, and their parameters, specified in 2060 // the encoding block), and they use the 2061 // opcode keyword to specify, in order, their primary, secondary, and 2062 // tertiary opcode. Only the opcode sections which a particular instruction 2063 // needs for encoding need to be specified. 2064 encode %{ 2065 enc_class enc_untested %{ 2066 #ifdef ASSERT 2067 MacroAssembler _masm(&cbuf); 2068 __ untested("encoding"); 2069 #endif 2070 %} 2071 2072 enc_class form3_mem_reg( memory mem, iRegI dst ) %{ 2073 emit_form3_mem_reg(cbuf, this, $primary, $tertiary, 2074 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2075 %} 2076 2077 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{ 2078 emit_form3_mem_reg(cbuf, this, $primary, -1, 2079 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2080 %} 2081 2082 enc_class form3_mem_prefetch_read( memory mem ) %{ 2083 emit_form3_mem_reg(cbuf, this, $primary, -1, 2084 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); 2085 %} 2086 2087 enc_class form3_mem_prefetch_write( memory mem ) %{ 2088 emit_form3_mem_reg(cbuf, this, $primary, -1, 2089 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/); 2090 %} 2091 2092 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{ 2093 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2094 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2095 guarantee($mem$$index == R_G0_enc, "double index?"); 2096 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc ); 2097 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg ); 2098 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 ); 2099 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc ); 2100 %} 2101 2102 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{ 2103 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2104 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2105 guarantee($mem$$index == R_G0_enc, "double index?"); 2106 // Load long with 2 instructions 2107 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 ); 2108 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 ); 2109 %} 2110 2111 //%%% form3_mem_plus_4_reg is a hack--get rid of it 2112 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{ 2113 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4"); 2114 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg); 2115 %} 2116 2117 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{ 2118 // Encode a reg-reg copy. If it is useless, then empty encoding. 2119 if( $rs2$$reg != $rd$$reg ) 2120 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg ); 2121 %} 2122 2123 // Target lo half of long 2124 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{ 2125 // Encode a reg-reg copy. If it is useless, then empty encoding. 2126 if( $rs2$$reg != LONG_LO_REG($rd$$reg) ) 2127 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg ); 2128 %} 2129 2130 // Source lo half of long 2131 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{ 2132 // Encode a reg-reg copy. If it is useless, then empty encoding. 2133 if( LONG_LO_REG($rs2$$reg) != $rd$$reg ) 2134 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) ); 2135 %} 2136 2137 // Target hi half of long 2138 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{ 2139 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 ); 2140 %} 2141 2142 // Source lo half of long, and leave it sign extended. 2143 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{ 2144 // Sign extend low half 2145 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 ); 2146 %} 2147 2148 // Source hi half of long, and leave it sign extended. 2149 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{ 2150 // Shift high half to low half 2151 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 ); 2152 %} 2153 2154 // Source hi half of long 2155 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{ 2156 // Encode a reg-reg copy. If it is useless, then empty encoding. 2157 if( LONG_HI_REG($rs2$$reg) != $rd$$reg ) 2158 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) ); 2159 %} 2160 2161 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{ 2162 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg ); 2163 %} 2164 2165 enc_class enc_to_bool( iRegI src, iRegI dst ) %{ 2166 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg ); 2167 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 ); 2168 %} 2169 2170 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{ 2171 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg ); 2172 // clear if nothing else is happening 2173 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 ); 2174 // blt,a,pn done 2175 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 ); 2176 // mov dst,-1 in delay slot 2177 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2178 %} 2179 2180 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{ 2181 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F ); 2182 %} 2183 2184 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{ 2185 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 ); 2186 %} 2187 2188 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{ 2189 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg ); 2190 %} 2191 2192 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{ 2193 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant ); 2194 %} 2195 2196 enc_class move_return_pc_to_o1() %{ 2197 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); 2198 %} 2199 2200 #ifdef _LP64 2201 /* %%% merge with enc_to_bool */ 2202 enc_class enc_convP2B( iRegI dst, iRegP src ) %{ 2203 MacroAssembler _masm(&cbuf); 2204 2205 Register src_reg = reg_to_register_object($src$$reg); 2206 Register dst_reg = reg_to_register_object($dst$$reg); 2207 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg); 2208 %} 2209 #endif 2210 2211 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ 2212 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) 2213 MacroAssembler _masm(&cbuf); 2214 2215 Register p_reg = reg_to_register_object($p$$reg); 2216 Register q_reg = reg_to_register_object($q$$reg); 2217 Register y_reg = reg_to_register_object($y$$reg); 2218 Register tmp_reg = reg_to_register_object($tmp$$reg); 2219 2220 __ subcc( p_reg, q_reg, p_reg ); 2221 __ add ( p_reg, y_reg, tmp_reg ); 2222 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg ); 2223 %} 2224 2225 enc_class form_d2i_helper(regD src, regF dst) %{ 2226 // fcmp %fcc0,$src,$src 2227 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2228 // branch %fcc0 not-nan, predict taken 2229 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2230 // fdtoi $src,$dst 2231 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg ); 2232 // fitos $dst,$dst (if nan) 2233 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2234 // clear $dst (if nan) 2235 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2236 // carry on here... 2237 %} 2238 2239 enc_class form_d2l_helper(regD src, regD dst) %{ 2240 // fcmp %fcc0,$src,$src check for NAN 2241 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2242 // branch %fcc0 not-nan, predict taken 2243 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2244 // fdtox $src,$dst convert in delay slot 2245 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg ); 2246 // fxtod $dst,$dst (if nan) 2247 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2248 // clear $dst (if nan) 2249 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2250 // carry on here... 2251 %} 2252 2253 enc_class form_f2i_helper(regF src, regF dst) %{ 2254 // fcmps %fcc0,$src,$src 2255 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2256 // branch %fcc0 not-nan, predict taken 2257 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2258 // fstoi $src,$dst 2259 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg ); 2260 // fitos $dst,$dst (if nan) 2261 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2262 // clear $dst (if nan) 2263 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2264 // carry on here... 2265 %} 2266 2267 enc_class form_f2l_helper(regF src, regD dst) %{ 2268 // fcmps %fcc0,$src,$src 2269 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2270 // branch %fcc0 not-nan, predict taken 2271 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2272 // fstox $src,$dst 2273 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg ); 2274 // fxtod $dst,$dst (if nan) 2275 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2276 // clear $dst (if nan) 2277 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2278 // carry on here... 2279 %} 2280 2281 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2282 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2283 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2284 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2285 2286 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %} 2287 2288 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2289 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %} 2290 2291 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{ 2292 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2293 %} 2294 2295 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{ 2296 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2297 %} 2298 2299 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{ 2300 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2301 %} 2302 2303 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{ 2304 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2305 %} 2306 2307 enc_class form3_convI2F(regF rs2, regF rd) %{ 2308 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg); 2309 %} 2310 2311 // Encloding class for traceable jumps 2312 enc_class form_jmpl(g3RegP dest) %{ 2313 emit_jmpl(cbuf, $dest$$reg); 2314 %} 2315 2316 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{ 2317 emit_jmpl_set_exception_pc(cbuf, $dest$$reg); 2318 %} 2319 2320 enc_class form2_nop() %{ 2321 emit_nop(cbuf); 2322 %} 2323 2324 enc_class form2_illtrap() %{ 2325 emit_illtrap(cbuf); 2326 %} 2327 2328 2329 // Compare longs and convert into -1, 0, 1. 2330 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{ 2331 // CMP $src1,$src2 2332 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg ); 2333 // blt,a,pn done 2334 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 ); 2335 // mov dst,-1 in delay slot 2336 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2337 // bgt,a,pn done 2338 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 ); 2339 // mov dst,1 in delay slot 2340 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 ); 2341 // CLR $dst 2342 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 ); 2343 %} 2344 2345 enc_class enc_PartialSubtypeCheck() %{ 2346 MacroAssembler _masm(&cbuf); 2347 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type); 2348 __ delayed()->nop(); 2349 %} 2350 2351 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{ 2352 MacroAssembler _masm(&cbuf); 2353 Label* L = $labl$$label; 2354 Assembler::Predict predict_taken = 2355 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2356 2357 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 2358 __ delayed()->nop(); 2359 %} 2360 2361 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{ 2362 MacroAssembler _masm(&cbuf); 2363 Label* L = $labl$$label; 2364 Assembler::Predict predict_taken = 2365 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2366 2367 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L); 2368 __ delayed()->nop(); 2369 %} 2370 2371 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{ 2372 int op = (Assembler::arith_op << 30) | 2373 ($dst$$reg << 25) | 2374 (Assembler::movcc_op3 << 19) | 2375 (1 << 18) | // cc2 bit for 'icc' 2376 ($cmp$$cmpcode << 14) | 2377 (0 << 13) | // select register move 2378 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 2379 ($src$$reg << 0); 2380 cbuf.insts()->emit_int32(op); 2381 %} 2382 2383 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 2384 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2385 int op = (Assembler::arith_op << 30) | 2386 ($dst$$reg << 25) | 2387 (Assembler::movcc_op3 << 19) | 2388 (1 << 18) | // cc2 bit for 'icc' 2389 ($cmp$$cmpcode << 14) | 2390 (1 << 13) | // select immediate move 2391 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 2392 (simm11 << 0); 2393 cbuf.insts()->emit_int32(op); 2394 %} 2395 2396 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 2397 int op = (Assembler::arith_op << 30) | 2398 ($dst$$reg << 25) | 2399 (Assembler::movcc_op3 << 19) | 2400 (0 << 18) | // cc2 bit for 'fccX' 2401 ($cmp$$cmpcode << 14) | 2402 (0 << 13) | // select register move 2403 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2404 ($src$$reg << 0); 2405 cbuf.insts()->emit_int32(op); 2406 %} 2407 2408 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 2409 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2410 int op = (Assembler::arith_op << 30) | 2411 ($dst$$reg << 25) | 2412 (Assembler::movcc_op3 << 19) | 2413 (0 << 18) | // cc2 bit for 'fccX' 2414 ($cmp$$cmpcode << 14) | 2415 (1 << 13) | // select immediate move 2416 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2417 (simm11 << 0); 2418 cbuf.insts()->emit_int32(op); 2419 %} 2420 2421 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 2422 int op = (Assembler::arith_op << 30) | 2423 ($dst$$reg << 25) | 2424 (Assembler::fpop2_op3 << 19) | 2425 (0 << 18) | 2426 ($cmp$$cmpcode << 14) | 2427 (1 << 13) | // select register move 2428 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 2429 ($primary << 5) | // select single, double or quad 2430 ($src$$reg << 0); 2431 cbuf.insts()->emit_int32(op); 2432 %} 2433 2434 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 2435 int op = (Assembler::arith_op << 30) | 2436 ($dst$$reg << 25) | 2437 (Assembler::fpop2_op3 << 19) | 2438 (0 << 18) | 2439 ($cmp$$cmpcode << 14) | 2440 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 2441 ($primary << 5) | // select single, double or quad 2442 ($src$$reg << 0); 2443 cbuf.insts()->emit_int32(op); 2444 %} 2445 2446 // Used by the MIN/MAX encodings. Same as a CMOV, but 2447 // the condition comes from opcode-field instead of an argument. 2448 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{ 2449 int op = (Assembler::arith_op << 30) | 2450 ($dst$$reg << 25) | 2451 (Assembler::movcc_op3 << 19) | 2452 (1 << 18) | // cc2 bit for 'icc' 2453 ($primary << 14) | 2454 (0 << 13) | // select register move 2455 (0 << 11) | // cc1, cc0 bits for 'icc' 2456 ($src$$reg << 0); 2457 cbuf.insts()->emit_int32(op); 2458 %} 2459 2460 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 2461 int op = (Assembler::arith_op << 30) | 2462 ($dst$$reg << 25) | 2463 (Assembler::movcc_op3 << 19) | 2464 (6 << 16) | // cc2 bit for 'xcc' 2465 ($primary << 14) | 2466 (0 << 13) | // select register move 2467 (0 << 11) | // cc1, cc0 bits for 'icc' 2468 ($src$$reg << 0); 2469 cbuf.insts()->emit_int32(op); 2470 %} 2471 2472 enc_class Set13( immI13 src, iRegI rd ) %{ 2473 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 2474 %} 2475 2476 enc_class SetHi22( immI src, iRegI rd ) %{ 2477 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant ); 2478 %} 2479 2480 enc_class Set32( immI src, iRegI rd ) %{ 2481 MacroAssembler _masm(&cbuf); 2482 __ set($src$$constant, reg_to_register_object($rd$$reg)); 2483 %} 2484 2485 enc_class call_epilog %{ 2486 if( VerifyStackAtCalls ) { 2487 MacroAssembler _masm(&cbuf); 2488 int framesize = ra_->C->frame_slots() << LogBytesPerInt; 2489 Register temp_reg = G3; 2490 __ add(SP, framesize, temp_reg); 2491 __ cmp(temp_reg, FP); 2492 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc); 2493 } 2494 %} 2495 2496 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value 2497 // to G1 so the register allocator will not have to deal with the misaligned register 2498 // pair. 2499 enc_class adjust_long_from_native_call %{ 2500 #ifndef _LP64 2501 if (returns_long()) { 2502 // sllx O0,32,O0 2503 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 ); 2504 // srl O1,0,O1 2505 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 ); 2506 // or O0,O1,G1 2507 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc ); 2508 } 2509 #endif 2510 %} 2511 2512 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime 2513 // CALL directly to the runtime 2514 // The user of this is responsible for ensuring that R_L7 is empty (killed). 2515 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, 2516 /*preserve_g2=*/true); 2517 %} 2518 2519 enc_class preserve_SP %{ 2520 MacroAssembler _masm(&cbuf); 2521 __ mov(SP, L7_mh_SP_save); 2522 %} 2523 2524 enc_class restore_SP %{ 2525 MacroAssembler _masm(&cbuf); 2526 __ mov(L7_mh_SP_save, SP); 2527 %} 2528 2529 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 2530 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2531 // who we intended to call. 2532 if (!_method) { 2533 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); 2534 } else if (_optimized_virtual) { 2535 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); 2536 } else { 2537 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); 2538 } 2539 if (_method) { // Emit stub for static call. 2540 CompiledStaticCall::emit_to_interp_stub(cbuf); 2541 } 2542 %} 2543 2544 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL 2545 MacroAssembler _masm(&cbuf); 2546 __ set_inst_mark(); 2547 int vtable_index = this->_vtable_index; 2548 // MachCallDynamicJavaNode::ret_addr_offset uses this same test 2549 if (vtable_index < 0) { 2550 // must be invalid_vtable_index, not nonvirtual_vtable_index 2551 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 2552 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2553 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); 2554 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); 2555 __ ic_call((address)$meth$$method); 2556 } else { 2557 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 2558 // Just go thru the vtable 2559 // get receiver klass (receiver already checked for non-null) 2560 // If we end up going thru a c2i adapter interpreter expects method in G5 2561 int off = __ offset(); 2562 __ load_klass(O0, G3_scratch); 2563 int klass_load_size; 2564 if (UseCompressedKlassPointers) { 2565 assert(Universe::heap() != NULL, "java heap should be initialized"); 2566 if (Universe::narrow_klass_base() == NULL) 2567 klass_load_size = 2*BytesPerInstWord; 2568 else 2569 klass_load_size = 3*BytesPerInstWord; 2570 } else { 2571 klass_load_size = 1*BytesPerInstWord; 2572 } 2573 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 2574 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 2575 if (Assembler::is_simm13(v_off)) { 2576 __ ld_ptr(G3, v_off, G5_method); 2577 } else { 2578 // Generate 2 instructions 2579 __ Assembler::sethi(v_off & ~0x3ff, G5_method); 2580 __ or3(G5_method, v_off & 0x3ff, G5_method); 2581 // ld_ptr, set_hi, set 2582 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord, 2583 "Unexpected instruction size(s)"); 2584 __ ld_ptr(G3, G5_method, G5_method); 2585 } 2586 // NOTE: for vtable dispatches, the vtable entry will never be null. 2587 // However it may very well end up in handle_wrong_method if the 2588 // method is abstract for the particular class. 2589 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 2590 // jump to target (either compiled code or c2iadapter) 2591 __ jmpl(G3_scratch, G0, O7); 2592 __ delayed()->nop(); 2593 } 2594 %} 2595 2596 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL 2597 MacroAssembler _masm(&cbuf); 2598 2599 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2600 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because 2601 // we might be calling a C2I adapter which needs it. 2602 2603 assert(temp_reg != G5_ic_reg, "conflicting registers"); 2604 // Load nmethod 2605 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg); 2606 2607 // CALL to compiled java, indirect the contents of G3 2608 __ set_inst_mark(); 2609 __ callr(temp_reg, G0); 2610 __ delayed()->nop(); 2611 %} 2612 2613 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{ 2614 MacroAssembler _masm(&cbuf); 2615 Register Rdividend = reg_to_register_object($src1$$reg); 2616 Register Rdivisor = reg_to_register_object($src2$$reg); 2617 Register Rresult = reg_to_register_object($dst$$reg); 2618 2619 __ sra(Rdivisor, 0, Rdivisor); 2620 __ sra(Rdividend, 0, Rdividend); 2621 __ sdivx(Rdividend, Rdivisor, Rresult); 2622 %} 2623 2624 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{ 2625 MacroAssembler _masm(&cbuf); 2626 2627 Register Rdividend = reg_to_register_object($src1$$reg); 2628 int divisor = $imm$$constant; 2629 Register Rresult = reg_to_register_object($dst$$reg); 2630 2631 __ sra(Rdividend, 0, Rdividend); 2632 __ sdivx(Rdividend, divisor, Rresult); 2633 %} 2634 2635 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{ 2636 MacroAssembler _masm(&cbuf); 2637 Register Rsrc1 = reg_to_register_object($src1$$reg); 2638 Register Rsrc2 = reg_to_register_object($src2$$reg); 2639 Register Rdst = reg_to_register_object($dst$$reg); 2640 2641 __ sra( Rsrc1, 0, Rsrc1 ); 2642 __ sra( Rsrc2, 0, Rsrc2 ); 2643 __ mulx( Rsrc1, Rsrc2, Rdst ); 2644 __ srlx( Rdst, 32, Rdst ); 2645 %} 2646 2647 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{ 2648 MacroAssembler _masm(&cbuf); 2649 Register Rdividend = reg_to_register_object($src1$$reg); 2650 Register Rdivisor = reg_to_register_object($src2$$reg); 2651 Register Rresult = reg_to_register_object($dst$$reg); 2652 Register Rscratch = reg_to_register_object($scratch$$reg); 2653 2654 assert(Rdividend != Rscratch, ""); 2655 assert(Rdivisor != Rscratch, ""); 2656 2657 __ sra(Rdividend, 0, Rdividend); 2658 __ sra(Rdivisor, 0, Rdivisor); 2659 __ sdivx(Rdividend, Rdivisor, Rscratch); 2660 __ mulx(Rscratch, Rdivisor, Rscratch); 2661 __ sub(Rdividend, Rscratch, Rresult); 2662 %} 2663 2664 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{ 2665 MacroAssembler _masm(&cbuf); 2666 2667 Register Rdividend = reg_to_register_object($src1$$reg); 2668 int divisor = $imm$$constant; 2669 Register Rresult = reg_to_register_object($dst$$reg); 2670 Register Rscratch = reg_to_register_object($scratch$$reg); 2671 2672 assert(Rdividend != Rscratch, ""); 2673 2674 __ sra(Rdividend, 0, Rdividend); 2675 __ sdivx(Rdividend, divisor, Rscratch); 2676 __ mulx(Rscratch, divisor, Rscratch); 2677 __ sub(Rdividend, Rscratch, Rresult); 2678 %} 2679 2680 enc_class fabss (sflt_reg dst, sflt_reg src) %{ 2681 MacroAssembler _masm(&cbuf); 2682 2683 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2684 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2685 2686 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst); 2687 %} 2688 2689 enc_class fabsd (dflt_reg dst, dflt_reg src) %{ 2690 MacroAssembler _masm(&cbuf); 2691 2692 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2693 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2694 2695 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst); 2696 %} 2697 2698 enc_class fnegd (dflt_reg dst, dflt_reg src) %{ 2699 MacroAssembler _masm(&cbuf); 2700 2701 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2702 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2703 2704 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst); 2705 %} 2706 2707 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{ 2708 MacroAssembler _masm(&cbuf); 2709 2710 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2711 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2712 2713 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst); 2714 %} 2715 2716 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{ 2717 MacroAssembler _masm(&cbuf); 2718 2719 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2720 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2721 2722 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst); 2723 %} 2724 2725 enc_class fmovs (dflt_reg dst, dflt_reg src) %{ 2726 MacroAssembler _masm(&cbuf); 2727 2728 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2729 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2730 2731 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst); 2732 %} 2733 2734 enc_class fmovd (dflt_reg dst, dflt_reg src) %{ 2735 MacroAssembler _masm(&cbuf); 2736 2737 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2738 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2739 2740 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst); 2741 %} 2742 2743 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2744 MacroAssembler _masm(&cbuf); 2745 2746 Register Roop = reg_to_register_object($oop$$reg); 2747 Register Rbox = reg_to_register_object($box$$reg); 2748 Register Rscratch = reg_to_register_object($scratch$$reg); 2749 Register Rmark = reg_to_register_object($scratch2$$reg); 2750 2751 assert(Roop != Rscratch, ""); 2752 assert(Roop != Rmark, ""); 2753 assert(Rbox != Rscratch, ""); 2754 assert(Rbox != Rmark, ""); 2755 2756 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining); 2757 %} 2758 2759 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2760 MacroAssembler _masm(&cbuf); 2761 2762 Register Roop = reg_to_register_object($oop$$reg); 2763 Register Rbox = reg_to_register_object($box$$reg); 2764 Register Rscratch = reg_to_register_object($scratch$$reg); 2765 Register Rmark = reg_to_register_object($scratch2$$reg); 2766 2767 assert(Roop != Rscratch, ""); 2768 assert(Roop != Rmark, ""); 2769 assert(Rbox != Rscratch, ""); 2770 assert(Rbox != Rmark, ""); 2771 2772 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining); 2773 %} 2774 2775 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{ 2776 MacroAssembler _masm(&cbuf); 2777 Register Rmem = reg_to_register_object($mem$$reg); 2778 Register Rold = reg_to_register_object($old$$reg); 2779 Register Rnew = reg_to_register_object($new$$reg); 2780 2781 // casx_under_lock picks 1 of 3 encodings: 2782 // For 32-bit pointers you get a 32-bit CAS 2783 // For 64-bit pointers you get a 64-bit CASX 2784 __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 2785 __ cmp( Rold, Rnew ); 2786 %} 2787 2788 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{ 2789 Register Rmem = reg_to_register_object($mem$$reg); 2790 Register Rold = reg_to_register_object($old$$reg); 2791 Register Rnew = reg_to_register_object($new$$reg); 2792 2793 MacroAssembler _masm(&cbuf); 2794 __ mov(Rnew, O7); 2795 __ casx(Rmem, Rold, O7); 2796 __ cmp( Rold, O7 ); 2797 %} 2798 2799 // raw int cas, used for compareAndSwap 2800 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{ 2801 Register Rmem = reg_to_register_object($mem$$reg); 2802 Register Rold = reg_to_register_object($old$$reg); 2803 Register Rnew = reg_to_register_object($new$$reg); 2804 2805 MacroAssembler _masm(&cbuf); 2806 __ mov(Rnew, O7); 2807 __ cas(Rmem, Rold, O7); 2808 __ cmp( Rold, O7 ); 2809 %} 2810 2811 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{ 2812 Register Rres = reg_to_register_object($res$$reg); 2813 2814 MacroAssembler _masm(&cbuf); 2815 __ mov(1, Rres); 2816 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres ); 2817 %} 2818 2819 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{ 2820 Register Rres = reg_to_register_object($res$$reg); 2821 2822 MacroAssembler _masm(&cbuf); 2823 __ mov(1, Rres); 2824 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres ); 2825 %} 2826 2827 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{ 2828 MacroAssembler _masm(&cbuf); 2829 Register Rdst = reg_to_register_object($dst$$reg); 2830 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg) 2831 : reg_to_DoubleFloatRegister_object($src1$$reg); 2832 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg) 2833 : reg_to_DoubleFloatRegister_object($src2$$reg); 2834 2835 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1) 2836 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 2837 %} 2838 2839 2840 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{ 2841 Label Ldone, Lloop; 2842 MacroAssembler _masm(&cbuf); 2843 2844 Register str1_reg = reg_to_register_object($str1$$reg); 2845 Register str2_reg = reg_to_register_object($str2$$reg); 2846 Register cnt1_reg = reg_to_register_object($cnt1$$reg); 2847 Register cnt2_reg = reg_to_register_object($cnt2$$reg); 2848 Register result_reg = reg_to_register_object($result$$reg); 2849 2850 assert(result_reg != str1_reg && 2851 result_reg != str2_reg && 2852 result_reg != cnt1_reg && 2853 result_reg != cnt2_reg , 2854 "need different registers"); 2855 2856 // Compute the minimum of the string lengths(str1_reg) and the 2857 // difference of the string lengths (stack) 2858 2859 // See if the lengths are different, and calculate min in str1_reg. 2860 // Stash diff in O7 in case we need it for a tie-breaker. 2861 Label Lskip; 2862 __ subcc(cnt1_reg, cnt2_reg, O7); 2863 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2864 __ br(Assembler::greater, true, Assembler::pt, Lskip); 2865 // cnt2 is shorter, so use its count: 2866 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2867 __ bind(Lskip); 2868 2869 // reallocate cnt1_reg, cnt2_reg, result_reg 2870 // Note: limit_reg holds the string length pre-scaled by 2 2871 Register limit_reg = cnt1_reg; 2872 Register chr2_reg = cnt2_reg; 2873 Register chr1_reg = result_reg; 2874 // str{12} are the base pointers 2875 2876 // Is the minimum length zero? 2877 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity 2878 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2879 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2880 2881 // Load first characters 2882 __ lduh(str1_reg, 0, chr1_reg); 2883 __ lduh(str2_reg, 0, chr2_reg); 2884 2885 // Compare first characters 2886 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2887 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2888 assert(chr1_reg == result_reg, "result must be pre-placed"); 2889 __ delayed()->nop(); 2890 2891 { 2892 // Check after comparing first character to see if strings are equivalent 2893 Label LSkip2; 2894 // Check if the strings start at same location 2895 __ cmp(str1_reg, str2_reg); 2896 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2); 2897 __ delayed()->nop(); 2898 2899 // Check if the length difference is zero (in O7) 2900 __ cmp(G0, O7); 2901 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2902 __ delayed()->mov(G0, result_reg); // result is zero 2903 2904 // Strings might not be equal 2905 __ bind(LSkip2); 2906 } 2907 2908 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); 2909 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2910 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2911 2912 // Shift str1_reg and str2_reg to the end of the arrays, negate limit 2913 __ add(str1_reg, limit_reg, str1_reg); 2914 __ add(str2_reg, limit_reg, str2_reg); 2915 __ neg(chr1_reg, limit_reg); // limit = -(limit-2) 2916 2917 // Compare the rest of the characters 2918 __ lduh(str1_reg, limit_reg, chr1_reg); 2919 __ bind(Lloop); 2920 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2921 __ lduh(str2_reg, limit_reg, chr2_reg); 2922 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2923 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2924 assert(chr1_reg == result_reg, "result must be pre-placed"); 2925 __ delayed()->inccc(limit_reg, sizeof(jchar)); 2926 // annul LDUH if branch is not taken to prevent access past end of string 2927 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 2928 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2929 2930 // If strings are equal up to min length, return the length difference. 2931 __ mov(O7, result_reg); 2932 2933 // Otherwise, return the difference between the first mismatched chars. 2934 __ bind(Ldone); 2935 %} 2936 2937 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{ 2938 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone; 2939 MacroAssembler _masm(&cbuf); 2940 2941 Register str1_reg = reg_to_register_object($str1$$reg); 2942 Register str2_reg = reg_to_register_object($str2$$reg); 2943 Register cnt_reg = reg_to_register_object($cnt$$reg); 2944 Register tmp1_reg = O7; 2945 Register result_reg = reg_to_register_object($result$$reg); 2946 2947 assert(result_reg != str1_reg && 2948 result_reg != str2_reg && 2949 result_reg != cnt_reg && 2950 result_reg != tmp1_reg , 2951 "need different registers"); 2952 2953 __ cmp(str1_reg, str2_reg); //same char[] ? 2954 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 2955 __ delayed()->add(G0, 1, result_reg); 2956 2957 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn); 2958 __ delayed()->add(G0, 1, result_reg); // count == 0 2959 2960 //rename registers 2961 Register limit_reg = cnt_reg; 2962 Register chr1_reg = result_reg; 2963 Register chr2_reg = tmp1_reg; 2964 2965 //check for alignment and position the pointers to the ends 2966 __ or3(str1_reg, str2_reg, chr1_reg); 2967 __ andcc(chr1_reg, 0x3, chr1_reg); 2968 // notZero means at least one not 4-byte aligned. 2969 // We could optimize the case when both arrays are not aligned 2970 // but it is not frequent case and it requires additional checks. 2971 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare 2972 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count 2973 2974 // Compare char[] arrays aligned to 4 bytes. 2975 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg, 2976 chr1_reg, chr2_reg, Ldone); 2977 __ ba(Ldone); 2978 __ delayed()->add(G0, 1, result_reg); 2979 2980 // char by char compare 2981 __ bind(Lchar); 2982 __ add(str1_reg, limit_reg, str1_reg); 2983 __ add(str2_reg, limit_reg, str2_reg); 2984 __ neg(limit_reg); //negate count 2985 2986 __ lduh(str1_reg, limit_reg, chr1_reg); 2987 // Lchar_loop 2988 __ bind(Lchar_loop); 2989 __ lduh(str2_reg, limit_reg, chr2_reg); 2990 __ cmp(chr1_reg, chr2_reg); 2991 __ br(Assembler::notEqual, true, Assembler::pt, Ldone); 2992 __ delayed()->mov(G0, result_reg); //not equal 2993 __ inccc(limit_reg, sizeof(jchar)); 2994 // annul LDUH if branch is not taken to prevent access past end of string 2995 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop); 2996 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2997 2998 __ add(G0, 1, result_reg); //equal 2999 3000 __ bind(Ldone); 3001 %} 3002 3003 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{ 3004 Label Lvector, Ldone, Lloop; 3005 MacroAssembler _masm(&cbuf); 3006 3007 Register ary1_reg = reg_to_register_object($ary1$$reg); 3008 Register ary2_reg = reg_to_register_object($ary2$$reg); 3009 Register tmp1_reg = reg_to_register_object($tmp1$$reg); 3010 Register tmp2_reg = O7; 3011 Register result_reg = reg_to_register_object($result$$reg); 3012 3013 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3014 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 3015 3016 // return true if the same array 3017 __ cmp(ary1_reg, ary2_reg); 3018 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3019 __ delayed()->add(G0, 1, result_reg); // equal 3020 3021 __ br_null(ary1_reg, true, Assembler::pn, Ldone); 3022 __ delayed()->mov(G0, result_reg); // not equal 3023 3024 __ br_null(ary2_reg, true, Assembler::pn, Ldone); 3025 __ delayed()->mov(G0, result_reg); // not equal 3026 3027 //load the lengths of arrays 3028 __ ld(Address(ary1_reg, length_offset), tmp1_reg); 3029 __ ld(Address(ary2_reg, length_offset), tmp2_reg); 3030 3031 // return false if the two arrays are not equal length 3032 __ cmp(tmp1_reg, tmp2_reg); 3033 __ br(Assembler::notEqual, true, Assembler::pn, Ldone); 3034 __ delayed()->mov(G0, result_reg); // not equal 3035 3036 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn); 3037 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal 3038 3039 // load array addresses 3040 __ add(ary1_reg, base_offset, ary1_reg); 3041 __ add(ary2_reg, base_offset, ary2_reg); 3042 3043 // renaming registers 3044 Register chr1_reg = result_reg; // for characters in ary1 3045 Register chr2_reg = tmp2_reg; // for characters in ary2 3046 Register limit_reg = tmp1_reg; // length 3047 3048 // set byte count 3049 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); 3050 3051 // Compare char[] arrays aligned to 4 bytes. 3052 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg, 3053 chr1_reg, chr2_reg, Ldone); 3054 __ add(G0, 1, result_reg); // equals 3055 3056 __ bind(Ldone); 3057 %} 3058 3059 enc_class enc_rethrow() %{ 3060 cbuf.set_insts_mark(); 3061 Register temp_reg = G3; 3062 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 3063 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 3064 MacroAssembler _masm(&cbuf); 3065 #ifdef ASSERT 3066 __ save_frame(0); 3067 AddressLiteral last_rethrow_addrlit(&last_rethrow); 3068 __ sethi(last_rethrow_addrlit, L1); 3069 Address addr(L1, last_rethrow_addrlit.low10()); 3070 __ get_pc(L2); 3071 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 3072 __ st_ptr(L2, addr); 3073 __ restore(); 3074 #endif 3075 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp 3076 __ delayed()->nop(); 3077 %} 3078 3079 enc_class emit_mem_nop() %{ 3080 // Generates the instruction LDUXA [o6,g0],#0x82,g0 3081 cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 3082 %} 3083 3084 enc_class emit_fadd_nop() %{ 3085 // Generates the instruction FMOVS f31,f31 3086 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 3087 %} 3088 3089 enc_class emit_br_nop() %{ 3090 // Generates the instruction BPN,PN . 3091 cbuf.insts()->emit_int32((unsigned int) 0x00400000); 3092 %} 3093 3094 enc_class enc_membar_acquire %{ 3095 MacroAssembler _masm(&cbuf); 3096 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) ); 3097 %} 3098 3099 enc_class enc_membar_release %{ 3100 MacroAssembler _masm(&cbuf); 3101 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) ); 3102 %} 3103 3104 enc_class enc_membar_volatile %{ 3105 MacroAssembler _masm(&cbuf); 3106 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3107 %} 3108 3109 %} 3110 3111 //----------FRAME-------------------------------------------------------------- 3112 // Definition of frame structure and management information. 3113 // 3114 // S T A C K L A Y O U T Allocators stack-slot number 3115 // | (to get allocators register number 3116 // G Owned by | | v add VMRegImpl::stack0) 3117 // r CALLER | | 3118 // o | +--------+ pad to even-align allocators stack-slot 3119 // w V | pad0 | numbers; owned by CALLER 3120 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 3121 // h ^ | in | 5 3122 // | | args | 4 Holes in incoming args owned by SELF 3123 // | | | | 3 3124 // | | +--------+ 3125 // V | | old out| Empty on Intel, window on Sparc 3126 // | old |preserve| Must be even aligned. 3127 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned 3128 // | | in | 3 area for Intel ret address 3129 // Owned by |preserve| Empty on Sparc. 3130 // SELF +--------+ 3131 // | | pad2 | 2 pad to align old SP 3132 // | +--------+ 1 3133 // | | locks | 0 3134 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned 3135 // | | pad1 | 11 pad to align new SP 3136 // | +--------+ 3137 // | | | 10 3138 // | | spills | 9 spills 3139 // V | | 8 (pad0 slot for callee) 3140 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 3141 // ^ | out | 7 3142 // | | args | 6 Holes in outgoing args owned by CALLEE 3143 // Owned by +--------+ 3144 // CALLEE | new out| 6 Empty on Intel, window on Sparc 3145 // | new |preserve| Must be even-aligned. 3146 // | SP-+--------+----> Matcher::_new_SP, even aligned 3147 // | | | 3148 // 3149 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 3150 // known from SELF's arguments and the Java calling convention. 3151 // Region 6-7 is determined per call site. 3152 // Note 2: If the calling convention leaves holes in the incoming argument 3153 // area, those holes are owned by SELF. Holes in the outgoing area 3154 // are owned by the CALLEE. Holes should not be nessecary in the 3155 // incoming area, as the Java calling convention is completely under 3156 // the control of the AD file. Doubles can be sorted and packed to 3157 // avoid holes. Holes in the outgoing arguments may be nessecary for 3158 // varargs C calling conventions. 3159 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 3160 // even aligned with pad0 as needed. 3161 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 3162 // region 6-11 is even aligned; it may be padded out more so that 3163 // the region from SP to FP meets the minimum stack alignment. 3164 3165 frame %{ 3166 // What direction does stack grow in (assumed to be same for native & Java) 3167 stack_direction(TOWARDS_LOW); 3168 3169 // These two registers define part of the calling convention 3170 // between compiled code and the interpreter. 3171 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C 3172 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter 3173 3174 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] 3175 cisc_spilling_operand_name(indOffset); 3176 3177 // Number of stack slots consumed by a Monitor enter 3178 #ifdef _LP64 3179 sync_stack_slots(2); 3180 #else 3181 sync_stack_slots(1); 3182 #endif 3183 3184 // Compiled code's Frame Pointer 3185 frame_pointer(R_SP); 3186 3187 // Stack alignment requirement 3188 stack_alignment(StackAlignmentInBytes); 3189 // LP64: Alignment size in bytes (128-bit -> 16 bytes) 3190 // !LP64: Alignment size in bytes (64-bit -> 8 bytes) 3191 3192 // Number of stack slots between incoming argument block and the start of 3193 // a new frame. The PROLOG must add this many slots to the stack. The 3194 // EPILOG must remove this many slots. 3195 in_preserve_stack_slots(0); 3196 3197 // Number of outgoing stack slots killed above the out_preserve_stack_slots 3198 // for calls to C. Supports the var-args backing area for register parms. 3199 // ADLC doesn't support parsing expressions, so I folded the math by hand. 3200 #ifdef _LP64 3201 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word 3202 varargs_C_out_slots_killed(12); 3203 #else 3204 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word 3205 varargs_C_out_slots_killed( 7); 3206 #endif 3207 3208 // The after-PROLOG location of the return address. Location of 3209 // return address specifies a type (REG or STACK) and a number 3210 // representing the register number (i.e. - use a register name) or 3211 // stack slot. 3212 return_addr(REG R_I7); // Ret Addr is in register I7 3213 3214 // Body of function which returns an OptoRegs array locating 3215 // arguments either in registers or in stack slots for calling 3216 // java 3217 calling_convention %{ 3218 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); 3219 3220 %} 3221 3222 // Body of function which returns an OptoRegs array locating 3223 // arguments either in registers or in stack slots for callin 3224 // C. 3225 c_calling_convention %{ 3226 // This is obviously always outgoing 3227 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length); 3228 %} 3229 3230 // Location of native (C/C++) and interpreter return values. This is specified to 3231 // be the same as Java. In the 32-bit VM, long values are actually returned from 3232 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying 3233 // to and from the register pairs is done by the appropriate call and epilog 3234 // opcodes. This simplifies the register allocator. 3235 c_return_value %{ 3236 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3237 #ifdef _LP64 3238 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3239 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3240 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3241 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3242 #else // !_LP64 3243 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3244 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3245 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3246 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3247 #endif 3248 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3249 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3250 %} 3251 3252 // Location of compiled Java return values. Same as C 3253 return_value %{ 3254 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3255 #ifdef _LP64 3256 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3257 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3258 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3259 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3260 #else // !_LP64 3261 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3262 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3263 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3264 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3265 #endif 3266 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3267 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3268 %} 3269 3270 %} 3271 3272 3273 //----------ATTRIBUTES--------------------------------------------------------- 3274 //----------Operand Attributes------------------------------------------------- 3275 op_attrib op_cost(1); // Required cost attribute 3276 3277 //----------Instruction Attributes--------------------------------------------- 3278 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute 3279 ins_attrib ins_size(32); // Required size attribute (in bits) 3280 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back 3281 ins_attrib ins_short_branch(0); // Required flag: is this instruction a 3282 // non-matching short branch variant of some 3283 // long branch? 3284 3285 //----------OPERANDS----------------------------------------------------------- 3286 // Operand definitions must precede instruction definitions for correct parsing 3287 // in the ADLC because operands constitute user defined types which are used in 3288 // instruction definitions. 3289 3290 //----------Simple Operands---------------------------------------------------- 3291 // Immediate Operands 3292 // Integer Immediate: 32-bit 3293 operand immI() %{ 3294 match(ConI); 3295 3296 op_cost(0); 3297 // formats are generated automatically for constants and base registers 3298 format %{ %} 3299 interface(CONST_INTER); 3300 %} 3301 3302 // Integer Immediate: 8-bit 3303 operand immI8() %{ 3304 predicate(Assembler::is_simm8(n->get_int())); 3305 match(ConI); 3306 op_cost(0); 3307 format %{ %} 3308 interface(CONST_INTER); 3309 %} 3310 3311 // Integer Immediate: 13-bit 3312 operand immI13() %{ 3313 predicate(Assembler::is_simm13(n->get_int())); 3314 match(ConI); 3315 op_cost(0); 3316 3317 format %{ %} 3318 interface(CONST_INTER); 3319 %} 3320 3321 // Integer Immediate: 13-bit minus 7 3322 operand immI13m7() %{ 3323 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095)); 3324 match(ConI); 3325 op_cost(0); 3326 3327 format %{ %} 3328 interface(CONST_INTER); 3329 %} 3330 3331 // Integer Immediate: 16-bit 3332 operand immI16() %{ 3333 predicate(Assembler::is_simm16(n->get_int())); 3334 match(ConI); 3335 op_cost(0); 3336 format %{ %} 3337 interface(CONST_INTER); 3338 %} 3339 3340 // Unsigned (positive) Integer Immediate: 13-bit 3341 operand immU13() %{ 3342 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); 3343 match(ConI); 3344 op_cost(0); 3345 3346 format %{ %} 3347 interface(CONST_INTER); 3348 %} 3349 3350 // Integer Immediate: 6-bit 3351 operand immU6() %{ 3352 predicate(n->get_int() >= 0 && n->get_int() <= 63); 3353 match(ConI); 3354 op_cost(0); 3355 format %{ %} 3356 interface(CONST_INTER); 3357 %} 3358 3359 // Integer Immediate: 11-bit 3360 operand immI11() %{ 3361 predicate(Assembler::is_simm11(n->get_int())); 3362 match(ConI); 3363 op_cost(0); 3364 format %{ %} 3365 interface(CONST_INTER); 3366 %} 3367 3368 // Integer Immediate: 5-bit 3369 operand immI5() %{ 3370 predicate(Assembler::is_simm5(n->get_int())); 3371 match(ConI); 3372 op_cost(0); 3373 format %{ %} 3374 interface(CONST_INTER); 3375 %} 3376 3377 // Integer Immediate: 0-bit 3378 operand immI0() %{ 3379 predicate(n->get_int() == 0); 3380 match(ConI); 3381 op_cost(0); 3382 3383 format %{ %} 3384 interface(CONST_INTER); 3385 %} 3386 3387 // Integer Immediate: the value 10 3388 operand immI10() %{ 3389 predicate(n->get_int() == 10); 3390 match(ConI); 3391 op_cost(0); 3392 3393 format %{ %} 3394 interface(CONST_INTER); 3395 %} 3396 3397 // Integer Immediate: the values 0-31 3398 operand immU5() %{ 3399 predicate(n->get_int() >= 0 && n->get_int() <= 31); 3400 match(ConI); 3401 op_cost(0); 3402 3403 format %{ %} 3404 interface(CONST_INTER); 3405 %} 3406 3407 // Integer Immediate: the values 1-31 3408 operand immI_1_31() %{ 3409 predicate(n->get_int() >= 1 && n->get_int() <= 31); 3410 match(ConI); 3411 op_cost(0); 3412 3413 format %{ %} 3414 interface(CONST_INTER); 3415 %} 3416 3417 // Integer Immediate: the values 32-63 3418 operand immI_32_63() %{ 3419 predicate(n->get_int() >= 32 && n->get_int() <= 63); 3420 match(ConI); 3421 op_cost(0); 3422 3423 format %{ %} 3424 interface(CONST_INTER); 3425 %} 3426 3427 // Immediates for special shifts (sign extend) 3428 3429 // Integer Immediate: the value 16 3430 operand immI_16() %{ 3431 predicate(n->get_int() == 16); 3432 match(ConI); 3433 op_cost(0); 3434 3435 format %{ %} 3436 interface(CONST_INTER); 3437 %} 3438 3439 // Integer Immediate: the value 24 3440 operand immI_24() %{ 3441 predicate(n->get_int() == 24); 3442 match(ConI); 3443 op_cost(0); 3444 3445 format %{ %} 3446 interface(CONST_INTER); 3447 %} 3448 3449 // Integer Immediate: the value 255 3450 operand immI_255() %{ 3451 predicate( n->get_int() == 255 ); 3452 match(ConI); 3453 op_cost(0); 3454 3455 format %{ %} 3456 interface(CONST_INTER); 3457 %} 3458 3459 // Integer Immediate: the value 65535 3460 operand immI_65535() %{ 3461 predicate(n->get_int() == 65535); 3462 match(ConI); 3463 op_cost(0); 3464 3465 format %{ %} 3466 interface(CONST_INTER); 3467 %} 3468 3469 // Long Immediate: the value FF 3470 operand immL_FF() %{ 3471 predicate( n->get_long() == 0xFFL ); 3472 match(ConL); 3473 op_cost(0); 3474 3475 format %{ %} 3476 interface(CONST_INTER); 3477 %} 3478 3479 // Long Immediate: the value FFFF 3480 operand immL_FFFF() %{ 3481 predicate( n->get_long() == 0xFFFFL ); 3482 match(ConL); 3483 op_cost(0); 3484 3485 format %{ %} 3486 interface(CONST_INTER); 3487 %} 3488 3489 // Pointer Immediate: 32 or 64-bit 3490 operand immP() %{ 3491 match(ConP); 3492 3493 op_cost(5); 3494 // formats are generated automatically for constants and base registers 3495 format %{ %} 3496 interface(CONST_INTER); 3497 %} 3498 3499 #ifdef _LP64 3500 // Pointer Immediate: 64-bit 3501 operand immP_set() %{ 3502 predicate(!VM_Version::is_niagara_plus()); 3503 match(ConP); 3504 3505 op_cost(5); 3506 // formats are generated automatically for constants and base registers 3507 format %{ %} 3508 interface(CONST_INTER); 3509 %} 3510 3511 // Pointer Immediate: 64-bit 3512 // From Niagara2 processors on a load should be better than materializing. 3513 operand immP_load() %{ 3514 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3))); 3515 match(ConP); 3516 3517 op_cost(5); 3518 // formats are generated automatically for constants and base registers 3519 format %{ %} 3520 interface(CONST_INTER); 3521 %} 3522 3523 // Pointer Immediate: 64-bit 3524 operand immP_no_oop_cheap() %{ 3525 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3)); 3526 match(ConP); 3527 3528 op_cost(5); 3529 // formats are generated automatically for constants and base registers 3530 format %{ %} 3531 interface(CONST_INTER); 3532 %} 3533 #endif 3534 3535 operand immP13() %{ 3536 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 3537 match(ConP); 3538 op_cost(0); 3539 3540 format %{ %} 3541 interface(CONST_INTER); 3542 %} 3543 3544 operand immP0() %{ 3545 predicate(n->get_ptr() == 0); 3546 match(ConP); 3547 op_cost(0); 3548 3549 format %{ %} 3550 interface(CONST_INTER); 3551 %} 3552 3553 operand immP_poll() %{ 3554 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 3555 match(ConP); 3556 3557 // formats are generated automatically for constants and base registers 3558 format %{ %} 3559 interface(CONST_INTER); 3560 %} 3561 3562 // Pointer Immediate 3563 operand immN() 3564 %{ 3565 match(ConN); 3566 3567 op_cost(10); 3568 format %{ %} 3569 interface(CONST_INTER); 3570 %} 3571 3572 operand immNKlass() 3573 %{ 3574 match(ConNKlass); 3575 3576 op_cost(10); 3577 format %{ %} 3578 interface(CONST_INTER); 3579 %} 3580 3581 // NULL Pointer Immediate 3582 operand immN0() 3583 %{ 3584 predicate(n->get_narrowcon() == 0); 3585 match(ConN); 3586 3587 op_cost(0); 3588 format %{ %} 3589 interface(CONST_INTER); 3590 %} 3591 3592 operand immL() %{ 3593 match(ConL); 3594 op_cost(40); 3595 // formats are generated automatically for constants and base registers 3596 format %{ %} 3597 interface(CONST_INTER); 3598 %} 3599 3600 operand immL0() %{ 3601 predicate(n->get_long() == 0L); 3602 match(ConL); 3603 op_cost(0); 3604 // formats are generated automatically for constants and base registers 3605 format %{ %} 3606 interface(CONST_INTER); 3607 %} 3608 3609 // Integer Immediate: 5-bit 3610 operand immL5() %{ 3611 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long())); 3612 match(ConL); 3613 op_cost(0); 3614 format %{ %} 3615 interface(CONST_INTER); 3616 %} 3617 3618 // Long Immediate: 13-bit 3619 operand immL13() %{ 3620 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L)); 3621 match(ConL); 3622 op_cost(0); 3623 3624 format %{ %} 3625 interface(CONST_INTER); 3626 %} 3627 3628 // Long Immediate: 13-bit minus 7 3629 operand immL13m7() %{ 3630 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L)); 3631 match(ConL); 3632 op_cost(0); 3633 3634 format %{ %} 3635 interface(CONST_INTER); 3636 %} 3637 3638 // Long Immediate: low 32-bit mask 3639 operand immL_32bits() %{ 3640 predicate(n->get_long() == 0xFFFFFFFFL); 3641 match(ConL); 3642 op_cost(0); 3643 3644 format %{ %} 3645 interface(CONST_INTER); 3646 %} 3647 3648 // Long Immediate: cheap (materialize in <= 3 instructions) 3649 operand immL_cheap() %{ 3650 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3); 3651 match(ConL); 3652 op_cost(0); 3653 3654 format %{ %} 3655 interface(CONST_INTER); 3656 %} 3657 3658 // Long Immediate: expensive (materialize in > 3 instructions) 3659 operand immL_expensive() %{ 3660 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3); 3661 match(ConL); 3662 op_cost(0); 3663 3664 format %{ %} 3665 interface(CONST_INTER); 3666 %} 3667 3668 // Double Immediate 3669 operand immD() %{ 3670 match(ConD); 3671 3672 op_cost(40); 3673 format %{ %} 3674 interface(CONST_INTER); 3675 %} 3676 3677 operand immD0() %{ 3678 #ifdef _LP64 3679 // on 64-bit architectures this comparision is faster 3680 predicate(jlong_cast(n->getd()) == 0); 3681 #else 3682 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO)); 3683 #endif 3684 match(ConD); 3685 3686 op_cost(0); 3687 format %{ %} 3688 interface(CONST_INTER); 3689 %} 3690 3691 // Float Immediate 3692 operand immF() %{ 3693 match(ConF); 3694 3695 op_cost(20); 3696 format %{ %} 3697 interface(CONST_INTER); 3698 %} 3699 3700 // Float Immediate: 0 3701 operand immF0() %{ 3702 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO)); 3703 match(ConF); 3704 3705 op_cost(0); 3706 format %{ %} 3707 interface(CONST_INTER); 3708 %} 3709 3710 // Integer Register Operands 3711 // Integer Register 3712 operand iRegI() %{ 3713 constraint(ALLOC_IN_RC(int_reg)); 3714 match(RegI); 3715 3716 match(notemp_iRegI); 3717 match(g1RegI); 3718 match(o0RegI); 3719 match(iRegIsafe); 3720 3721 format %{ %} 3722 interface(REG_INTER); 3723 %} 3724 3725 operand notemp_iRegI() %{ 3726 constraint(ALLOC_IN_RC(notemp_int_reg)); 3727 match(RegI); 3728 3729 match(o0RegI); 3730 3731 format %{ %} 3732 interface(REG_INTER); 3733 %} 3734 3735 operand o0RegI() %{ 3736 constraint(ALLOC_IN_RC(o0_regI)); 3737 match(iRegI); 3738 3739 format %{ %} 3740 interface(REG_INTER); 3741 %} 3742 3743 // Pointer Register 3744 operand iRegP() %{ 3745 constraint(ALLOC_IN_RC(ptr_reg)); 3746 match(RegP); 3747 3748 match(lock_ptr_RegP); 3749 match(g1RegP); 3750 match(g2RegP); 3751 match(g3RegP); 3752 match(g4RegP); 3753 match(i0RegP); 3754 match(o0RegP); 3755 match(o1RegP); 3756 match(l7RegP); 3757 3758 format %{ %} 3759 interface(REG_INTER); 3760 %} 3761 3762 operand sp_ptr_RegP() %{ 3763 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3764 match(RegP); 3765 match(iRegP); 3766 3767 format %{ %} 3768 interface(REG_INTER); 3769 %} 3770 3771 operand lock_ptr_RegP() %{ 3772 constraint(ALLOC_IN_RC(lock_ptr_reg)); 3773 match(RegP); 3774 match(i0RegP); 3775 match(o0RegP); 3776 match(o1RegP); 3777 match(l7RegP); 3778 3779 format %{ %} 3780 interface(REG_INTER); 3781 %} 3782 3783 operand g1RegP() %{ 3784 constraint(ALLOC_IN_RC(g1_regP)); 3785 match(iRegP); 3786 3787 format %{ %} 3788 interface(REG_INTER); 3789 %} 3790 3791 operand g2RegP() %{ 3792 constraint(ALLOC_IN_RC(g2_regP)); 3793 match(iRegP); 3794 3795 format %{ %} 3796 interface(REG_INTER); 3797 %} 3798 3799 operand g3RegP() %{ 3800 constraint(ALLOC_IN_RC(g3_regP)); 3801 match(iRegP); 3802 3803 format %{ %} 3804 interface(REG_INTER); 3805 %} 3806 3807 operand g1RegI() %{ 3808 constraint(ALLOC_IN_RC(g1_regI)); 3809 match(iRegI); 3810 3811 format %{ %} 3812 interface(REG_INTER); 3813 %} 3814 3815 operand g3RegI() %{ 3816 constraint(ALLOC_IN_RC(g3_regI)); 3817 match(iRegI); 3818 3819 format %{ %} 3820 interface(REG_INTER); 3821 %} 3822 3823 operand g4RegI() %{ 3824 constraint(ALLOC_IN_RC(g4_regI)); 3825 match(iRegI); 3826 3827 format %{ %} 3828 interface(REG_INTER); 3829 %} 3830 3831 operand g4RegP() %{ 3832 constraint(ALLOC_IN_RC(g4_regP)); 3833 match(iRegP); 3834 3835 format %{ %} 3836 interface(REG_INTER); 3837 %} 3838 3839 operand i0RegP() %{ 3840 constraint(ALLOC_IN_RC(i0_regP)); 3841 match(iRegP); 3842 3843 format %{ %} 3844 interface(REG_INTER); 3845 %} 3846 3847 operand o0RegP() %{ 3848 constraint(ALLOC_IN_RC(o0_regP)); 3849 match(iRegP); 3850 3851 format %{ %} 3852 interface(REG_INTER); 3853 %} 3854 3855 operand o1RegP() %{ 3856 constraint(ALLOC_IN_RC(o1_regP)); 3857 match(iRegP); 3858 3859 format %{ %} 3860 interface(REG_INTER); 3861 %} 3862 3863 operand o2RegP() %{ 3864 constraint(ALLOC_IN_RC(o2_regP)); 3865 match(iRegP); 3866 3867 format %{ %} 3868 interface(REG_INTER); 3869 %} 3870 3871 operand o7RegP() %{ 3872 constraint(ALLOC_IN_RC(o7_regP)); 3873 match(iRegP); 3874 3875 format %{ %} 3876 interface(REG_INTER); 3877 %} 3878 3879 operand l7RegP() %{ 3880 constraint(ALLOC_IN_RC(l7_regP)); 3881 match(iRegP); 3882 3883 format %{ %} 3884 interface(REG_INTER); 3885 %} 3886 3887 operand o7RegI() %{ 3888 constraint(ALLOC_IN_RC(o7_regI)); 3889 match(iRegI); 3890 3891 format %{ %} 3892 interface(REG_INTER); 3893 %} 3894 3895 operand iRegN() %{ 3896 constraint(ALLOC_IN_RC(int_reg)); 3897 match(RegN); 3898 3899 format %{ %} 3900 interface(REG_INTER); 3901 %} 3902 3903 // Long Register 3904 operand iRegL() %{ 3905 constraint(ALLOC_IN_RC(long_reg)); 3906 match(RegL); 3907 3908 format %{ %} 3909 interface(REG_INTER); 3910 %} 3911 3912 operand o2RegL() %{ 3913 constraint(ALLOC_IN_RC(o2_regL)); 3914 match(iRegL); 3915 3916 format %{ %} 3917 interface(REG_INTER); 3918 %} 3919 3920 operand o7RegL() %{ 3921 constraint(ALLOC_IN_RC(o7_regL)); 3922 match(iRegL); 3923 3924 format %{ %} 3925 interface(REG_INTER); 3926 %} 3927 3928 operand g1RegL() %{ 3929 constraint(ALLOC_IN_RC(g1_regL)); 3930 match(iRegL); 3931 3932 format %{ %} 3933 interface(REG_INTER); 3934 %} 3935 3936 operand g3RegL() %{ 3937 constraint(ALLOC_IN_RC(g3_regL)); 3938 match(iRegL); 3939 3940 format %{ %} 3941 interface(REG_INTER); 3942 %} 3943 3944 // Int Register safe 3945 // This is 64bit safe 3946 operand iRegIsafe() %{ 3947 constraint(ALLOC_IN_RC(long_reg)); 3948 3949 match(iRegI); 3950 3951 format %{ %} 3952 interface(REG_INTER); 3953 %} 3954 3955 // Condition Code Flag Register 3956 operand flagsReg() %{ 3957 constraint(ALLOC_IN_RC(int_flags)); 3958 match(RegFlags); 3959 3960 format %{ "ccr" %} // both ICC and XCC 3961 interface(REG_INTER); 3962 %} 3963 3964 // Condition Code Register, unsigned comparisons. 3965 operand flagsRegU() %{ 3966 constraint(ALLOC_IN_RC(int_flags)); 3967 match(RegFlags); 3968 3969 format %{ "icc_U" %} 3970 interface(REG_INTER); 3971 %} 3972 3973 // Condition Code Register, pointer comparisons. 3974 operand flagsRegP() %{ 3975 constraint(ALLOC_IN_RC(int_flags)); 3976 match(RegFlags); 3977 3978 #ifdef _LP64 3979 format %{ "xcc_P" %} 3980 #else 3981 format %{ "icc_P" %} 3982 #endif 3983 interface(REG_INTER); 3984 %} 3985 3986 // Condition Code Register, long comparisons. 3987 operand flagsRegL() %{ 3988 constraint(ALLOC_IN_RC(int_flags)); 3989 match(RegFlags); 3990 3991 format %{ "xcc_L" %} 3992 interface(REG_INTER); 3993 %} 3994 3995 // Condition Code Register, floating comparisons, unordered same as "less". 3996 operand flagsRegF() %{ 3997 constraint(ALLOC_IN_RC(float_flags)); 3998 match(RegFlags); 3999 match(flagsRegF0); 4000 4001 format %{ %} 4002 interface(REG_INTER); 4003 %} 4004 4005 operand flagsRegF0() %{ 4006 constraint(ALLOC_IN_RC(float_flag0)); 4007 match(RegFlags); 4008 4009 format %{ %} 4010 interface(REG_INTER); 4011 %} 4012 4013 4014 // Condition Code Flag Register used by long compare 4015 operand flagsReg_long_LTGE() %{ 4016 constraint(ALLOC_IN_RC(int_flags)); 4017 match(RegFlags); 4018 format %{ "icc_LTGE" %} 4019 interface(REG_INTER); 4020 %} 4021 operand flagsReg_long_EQNE() %{ 4022 constraint(ALLOC_IN_RC(int_flags)); 4023 match(RegFlags); 4024 format %{ "icc_EQNE" %} 4025 interface(REG_INTER); 4026 %} 4027 operand flagsReg_long_LEGT() %{ 4028 constraint(ALLOC_IN_RC(int_flags)); 4029 match(RegFlags); 4030 format %{ "icc_LEGT" %} 4031 interface(REG_INTER); 4032 %} 4033 4034 4035 operand regD() %{ 4036 constraint(ALLOC_IN_RC(dflt_reg)); 4037 match(RegD); 4038 4039 match(regD_low); 4040 4041 format %{ %} 4042 interface(REG_INTER); 4043 %} 4044 4045 operand regF() %{ 4046 constraint(ALLOC_IN_RC(sflt_reg)); 4047 match(RegF); 4048 4049 format %{ %} 4050 interface(REG_INTER); 4051 %} 4052 4053 operand regD_low() %{ 4054 constraint(ALLOC_IN_RC(dflt_low_reg)); 4055 match(regD); 4056 4057 format %{ %} 4058 interface(REG_INTER); 4059 %} 4060 4061 // Special Registers 4062 4063 // Method Register 4064 operand inline_cache_regP(iRegP reg) %{ 4065 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1 4066 match(reg); 4067 format %{ %} 4068 interface(REG_INTER); 4069 %} 4070 4071 operand interpreter_method_oop_regP(iRegP reg) %{ 4072 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1 4073 match(reg); 4074 format %{ %} 4075 interface(REG_INTER); 4076 %} 4077 4078 4079 //----------Complex Operands--------------------------------------------------- 4080 // Indirect Memory Reference 4081 operand indirect(sp_ptr_RegP reg) %{ 4082 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4083 match(reg); 4084 4085 op_cost(100); 4086 format %{ "[$reg]" %} 4087 interface(MEMORY_INTER) %{ 4088 base($reg); 4089 index(0x0); 4090 scale(0x0); 4091 disp(0x0); 4092 %} 4093 %} 4094 4095 // Indirect with simm13 Offset 4096 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{ 4097 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4098 match(AddP reg offset); 4099 4100 op_cost(100); 4101 format %{ "[$reg + $offset]" %} 4102 interface(MEMORY_INTER) %{ 4103 base($reg); 4104 index(0x0); 4105 scale(0x0); 4106 disp($offset); 4107 %} 4108 %} 4109 4110 // Indirect with simm13 Offset minus 7 4111 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{ 4112 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4113 match(AddP reg offset); 4114 4115 op_cost(100); 4116 format %{ "[$reg + $offset]" %} 4117 interface(MEMORY_INTER) %{ 4118 base($reg); 4119 index(0x0); 4120 scale(0x0); 4121 disp($offset); 4122 %} 4123 %} 4124 4125 // Note: Intel has a swapped version also, like this: 4126 //operand indOffsetX(iRegI reg, immP offset) %{ 4127 // constraint(ALLOC_IN_RC(int_reg)); 4128 // match(AddP offset reg); 4129 // 4130 // op_cost(100); 4131 // format %{ "[$reg + $offset]" %} 4132 // interface(MEMORY_INTER) %{ 4133 // base($reg); 4134 // index(0x0); 4135 // scale(0x0); 4136 // disp($offset); 4137 // %} 4138 //%} 4139 //// However, it doesn't make sense for SPARC, since 4140 // we have no particularly good way to embed oops in 4141 // single instructions. 4142 4143 // Indirect with Register Index 4144 operand indIndex(iRegP addr, iRegX index) %{ 4145 constraint(ALLOC_IN_RC(ptr_reg)); 4146 match(AddP addr index); 4147 4148 op_cost(100); 4149 format %{ "[$addr + $index]" %} 4150 interface(MEMORY_INTER) %{ 4151 base($addr); 4152 index($index); 4153 scale(0x0); 4154 disp(0x0); 4155 %} 4156 %} 4157 4158 //----------Special Memory Operands-------------------------------------------- 4159 // Stack Slot Operand - This operand is used for loading and storing temporary 4160 // values on the stack where a match requires a value to 4161 // flow through memory. 4162 operand stackSlotI(sRegI reg) %{ 4163 constraint(ALLOC_IN_RC(stack_slots)); 4164 op_cost(100); 4165 //match(RegI); 4166 format %{ "[$reg]" %} 4167 interface(MEMORY_INTER) %{ 4168 base(0xE); // R_SP 4169 index(0x0); 4170 scale(0x0); 4171 disp($reg); // Stack Offset 4172 %} 4173 %} 4174 4175 operand stackSlotP(sRegP reg) %{ 4176 constraint(ALLOC_IN_RC(stack_slots)); 4177 op_cost(100); 4178 //match(RegP); 4179 format %{ "[$reg]" %} 4180 interface(MEMORY_INTER) %{ 4181 base(0xE); // R_SP 4182 index(0x0); 4183 scale(0x0); 4184 disp($reg); // Stack Offset 4185 %} 4186 %} 4187 4188 operand stackSlotF(sRegF reg) %{ 4189 constraint(ALLOC_IN_RC(stack_slots)); 4190 op_cost(100); 4191 //match(RegF); 4192 format %{ "[$reg]" %} 4193 interface(MEMORY_INTER) %{ 4194 base(0xE); // R_SP 4195 index(0x0); 4196 scale(0x0); 4197 disp($reg); // Stack Offset 4198 %} 4199 %} 4200 operand stackSlotD(sRegD reg) %{ 4201 constraint(ALLOC_IN_RC(stack_slots)); 4202 op_cost(100); 4203 //match(RegD); 4204 format %{ "[$reg]" %} 4205 interface(MEMORY_INTER) %{ 4206 base(0xE); // R_SP 4207 index(0x0); 4208 scale(0x0); 4209 disp($reg); // Stack Offset 4210 %} 4211 %} 4212 operand stackSlotL(sRegL reg) %{ 4213 constraint(ALLOC_IN_RC(stack_slots)); 4214 op_cost(100); 4215 //match(RegL); 4216 format %{ "[$reg]" %} 4217 interface(MEMORY_INTER) %{ 4218 base(0xE); // R_SP 4219 index(0x0); 4220 scale(0x0); 4221 disp($reg); // Stack Offset 4222 %} 4223 %} 4224 4225 // Operands for expressing Control Flow 4226 // NOTE: Label is a predefined operand which should not be redefined in 4227 // the AD file. It is generically handled within the ADLC. 4228 4229 //----------Conditional Branch Operands---------------------------------------- 4230 // Comparison Op - This is the operation of the comparison, and is limited to 4231 // the following set of codes: 4232 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4233 // 4234 // Other attributes of the comparison, such as unsignedness, are specified 4235 // by the comparison instruction that sets a condition code flags register. 4236 // That result is represented by a flags operand whose subtype is appropriate 4237 // to the unsignedness (etc.) of the comparison. 4238 // 4239 // Later, the instruction which matches both the Comparison Op (a Bool) and 4240 // the flags (produced by the Cmp) specifies the coding of the comparison op 4241 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4242 4243 operand cmpOp() %{ 4244 match(Bool); 4245 4246 format %{ "" %} 4247 interface(COND_INTER) %{ 4248 equal(0x1); 4249 not_equal(0x9); 4250 less(0x3); 4251 greater_equal(0xB); 4252 less_equal(0x2); 4253 greater(0xA); 4254 %} 4255 %} 4256 4257 // Comparison Op, unsigned 4258 operand cmpOpU() %{ 4259 match(Bool); 4260 4261 format %{ "u" %} 4262 interface(COND_INTER) %{ 4263 equal(0x1); 4264 not_equal(0x9); 4265 less(0x5); 4266 greater_equal(0xD); 4267 less_equal(0x4); 4268 greater(0xC); 4269 %} 4270 %} 4271 4272 // Comparison Op, pointer (same as unsigned) 4273 operand cmpOpP() %{ 4274 match(Bool); 4275 4276 format %{ "p" %} 4277 interface(COND_INTER) %{ 4278 equal(0x1); 4279 not_equal(0x9); 4280 less(0x5); 4281 greater_equal(0xD); 4282 less_equal(0x4); 4283 greater(0xC); 4284 %} 4285 %} 4286 4287 // Comparison Op, branch-register encoding 4288 operand cmpOp_reg() %{ 4289 match(Bool); 4290 4291 format %{ "" %} 4292 interface(COND_INTER) %{ 4293 equal (0x1); 4294 not_equal (0x5); 4295 less (0x3); 4296 greater_equal(0x7); 4297 less_equal (0x2); 4298 greater (0x6); 4299 %} 4300 %} 4301 4302 // Comparison Code, floating, unordered same as less 4303 operand cmpOpF() %{ 4304 match(Bool); 4305 4306 format %{ "fl" %} 4307 interface(COND_INTER) %{ 4308 equal(0x9); 4309 not_equal(0x1); 4310 less(0x3); 4311 greater_equal(0xB); 4312 less_equal(0xE); 4313 greater(0x6); 4314 %} 4315 %} 4316 4317 // Used by long compare 4318 operand cmpOp_commute() %{ 4319 match(Bool); 4320 4321 format %{ "" %} 4322 interface(COND_INTER) %{ 4323 equal(0x1); 4324 not_equal(0x9); 4325 less(0xA); 4326 greater_equal(0x2); 4327 less_equal(0xB); 4328 greater(0x3); 4329 %} 4330 %} 4331 4332 //----------OPERAND CLASSES---------------------------------------------------- 4333 // Operand Classes are groups of operands that are used to simplify 4334 // instruction definitions by not requiring the AD writer to specify separate 4335 // instructions for every form of operand when the instruction accepts 4336 // multiple operand types with the same basic encoding and format. The classic 4337 // case of this is memory operands. 4338 opclass memory( indirect, indOffset13, indIndex ); 4339 opclass indIndexMemory( indIndex ); 4340 4341 //----------PIPELINE----------------------------------------------------------- 4342 pipeline %{ 4343 4344 //----------ATTRIBUTES--------------------------------------------------------- 4345 attributes %{ 4346 fixed_size_instructions; // Fixed size instructions 4347 branch_has_delay_slot; // Branch has delay slot following 4348 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle 4349 instruction_unit_size = 4; // An instruction is 4 bytes long 4350 instruction_fetch_unit_size = 16; // The processor fetches one line 4351 instruction_fetch_units = 1; // of 16 bytes 4352 4353 // List of nop instructions 4354 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR ); 4355 %} 4356 4357 //----------RESOURCES---------------------------------------------------------- 4358 // Resources are the functional units available to the machine 4359 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1); 4360 4361 //----------PIPELINE DESCRIPTION----------------------------------------------- 4362 // Pipeline Description specifies the stages in the machine's pipeline 4363 4364 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D); 4365 4366 //----------PIPELINE CLASSES--------------------------------------------------- 4367 // Pipeline Classes describe the stages in which input and output are 4368 // referenced by the hardware pipeline. 4369 4370 // Integer ALU reg-reg operation 4371 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4372 single_instruction; 4373 dst : E(write); 4374 src1 : R(read); 4375 src2 : R(read); 4376 IALU : R; 4377 %} 4378 4379 // Integer ALU reg-reg long operation 4380 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 4381 instruction_count(2); 4382 dst : E(write); 4383 src1 : R(read); 4384 src2 : R(read); 4385 IALU : R; 4386 IALU : R; 4387 %} 4388 4389 // Integer ALU reg-reg long dependent operation 4390 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{ 4391 instruction_count(1); multiple_bundles; 4392 dst : E(write); 4393 src1 : R(read); 4394 src2 : R(read); 4395 cr : E(write); 4396 IALU : R(2); 4397 %} 4398 4399 // Integer ALU reg-imm operaion 4400 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4401 single_instruction; 4402 dst : E(write); 4403 src1 : R(read); 4404 IALU : R; 4405 %} 4406 4407 // Integer ALU reg-reg operation with condition code 4408 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{ 4409 single_instruction; 4410 dst : E(write); 4411 cr : E(write); 4412 src1 : R(read); 4413 src2 : R(read); 4414 IALU : R; 4415 %} 4416 4417 // Integer ALU reg-imm operation with condition code 4418 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{ 4419 single_instruction; 4420 dst : E(write); 4421 cr : E(write); 4422 src1 : R(read); 4423 IALU : R; 4424 %} 4425 4426 // Integer ALU zero-reg operation 4427 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 4428 single_instruction; 4429 dst : E(write); 4430 src2 : R(read); 4431 IALU : R; 4432 %} 4433 4434 // Integer ALU zero-reg operation with condition code only 4435 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{ 4436 single_instruction; 4437 cr : E(write); 4438 src : R(read); 4439 IALU : R; 4440 %} 4441 4442 // Integer ALU reg-reg operation with condition code only 4443 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4444 single_instruction; 4445 cr : E(write); 4446 src1 : R(read); 4447 src2 : R(read); 4448 IALU : R; 4449 %} 4450 4451 // Integer ALU reg-imm operation with condition code only 4452 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4453 single_instruction; 4454 cr : E(write); 4455 src1 : R(read); 4456 IALU : R; 4457 %} 4458 4459 // Integer ALU reg-reg-zero operation with condition code only 4460 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{ 4461 single_instruction; 4462 cr : E(write); 4463 src1 : R(read); 4464 src2 : R(read); 4465 IALU : R; 4466 %} 4467 4468 // Integer ALU reg-imm-zero operation with condition code only 4469 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{ 4470 single_instruction; 4471 cr : E(write); 4472 src1 : R(read); 4473 IALU : R; 4474 %} 4475 4476 // Integer ALU reg-reg operation with condition code, src1 modified 4477 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4478 single_instruction; 4479 cr : E(write); 4480 src1 : E(write); 4481 src1 : R(read); 4482 src2 : R(read); 4483 IALU : R; 4484 %} 4485 4486 // Integer ALU reg-imm operation with condition code, src1 modified 4487 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4488 single_instruction; 4489 cr : E(write); 4490 src1 : E(write); 4491 src1 : R(read); 4492 IALU : R; 4493 %} 4494 4495 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{ 4496 multiple_bundles; 4497 dst : E(write)+4; 4498 cr : E(write); 4499 src1 : R(read); 4500 src2 : R(read); 4501 IALU : R(3); 4502 BR : R(2); 4503 %} 4504 4505 // Integer ALU operation 4506 pipe_class ialu_none(iRegI dst) %{ 4507 single_instruction; 4508 dst : E(write); 4509 IALU : R; 4510 %} 4511 4512 // Integer ALU reg operation 4513 pipe_class ialu_reg(iRegI dst, iRegI src) %{ 4514 single_instruction; may_have_no_code; 4515 dst : E(write); 4516 src : R(read); 4517 IALU : R; 4518 %} 4519 4520 // Integer ALU reg conditional operation 4521 // This instruction has a 1 cycle stall, and cannot execute 4522 // in the same cycle as the instruction setting the condition 4523 // code. We kludge this by pretending to read the condition code 4524 // 1 cycle earlier, and by marking the functional units as busy 4525 // for 2 cycles with the result available 1 cycle later than 4526 // is really the case. 4527 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{ 4528 single_instruction; 4529 op2_out : C(write); 4530 op1 : R(read); 4531 cr : R(read); // This is really E, with a 1 cycle stall 4532 BR : R(2); 4533 MS : R(2); 4534 %} 4535 4536 #ifdef _LP64 4537 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ 4538 instruction_count(1); multiple_bundles; 4539 dst : C(write)+1; 4540 src : R(read)+1; 4541 IALU : R(1); 4542 BR : E(2); 4543 MS : E(2); 4544 %} 4545 #endif 4546 4547 // Integer ALU reg operation 4548 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ 4549 single_instruction; may_have_no_code; 4550 dst : E(write); 4551 src : R(read); 4552 IALU : R; 4553 %} 4554 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{ 4555 single_instruction; may_have_no_code; 4556 dst : E(write); 4557 src : R(read); 4558 IALU : R; 4559 %} 4560 4561 // Two integer ALU reg operations 4562 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{ 4563 instruction_count(2); 4564 dst : E(write); 4565 src : R(read); 4566 A0 : R; 4567 A1 : R; 4568 %} 4569 4570 // Two integer ALU reg operations 4571 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{ 4572 instruction_count(2); may_have_no_code; 4573 dst : E(write); 4574 src : R(read); 4575 A0 : R; 4576 A1 : R; 4577 %} 4578 4579 // Integer ALU imm operation 4580 pipe_class ialu_imm(iRegI dst, immI13 src) %{ 4581 single_instruction; 4582 dst : E(write); 4583 IALU : R; 4584 %} 4585 4586 // Integer ALU reg-reg with carry operation 4587 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{ 4588 single_instruction; 4589 dst : E(write); 4590 src1 : R(read); 4591 src2 : R(read); 4592 IALU : R; 4593 %} 4594 4595 // Integer ALU cc operation 4596 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{ 4597 single_instruction; 4598 dst : E(write); 4599 cc : R(read); 4600 IALU : R; 4601 %} 4602 4603 // Integer ALU cc / second IALU operation 4604 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{ 4605 instruction_count(1); multiple_bundles; 4606 dst : E(write)+1; 4607 src : R(read); 4608 IALU : R; 4609 %} 4610 4611 // Integer ALU cc / second IALU operation 4612 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{ 4613 instruction_count(1); multiple_bundles; 4614 dst : E(write)+1; 4615 p : R(read); 4616 q : R(read); 4617 IALU : R; 4618 %} 4619 4620 // Integer ALU hi-lo-reg operation 4621 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{ 4622 instruction_count(1); multiple_bundles; 4623 dst : E(write)+1; 4624 IALU : R(2); 4625 %} 4626 4627 // Float ALU hi-lo-reg operation (with temp) 4628 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{ 4629 instruction_count(1); multiple_bundles; 4630 dst : E(write)+1; 4631 IALU : R(2); 4632 %} 4633 4634 // Long Constant 4635 pipe_class loadConL( iRegL dst, immL src ) %{ 4636 instruction_count(2); multiple_bundles; 4637 dst : E(write)+1; 4638 IALU : R(2); 4639 IALU : R(2); 4640 %} 4641 4642 // Pointer Constant 4643 pipe_class loadConP( iRegP dst, immP src ) %{ 4644 instruction_count(0); multiple_bundles; 4645 fixed_latency(6); 4646 %} 4647 4648 // Polling Address 4649 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ 4650 #ifdef _LP64 4651 instruction_count(0); multiple_bundles; 4652 fixed_latency(6); 4653 #else 4654 dst : E(write); 4655 IALU : R; 4656 #endif 4657 %} 4658 4659 // Long Constant small 4660 pipe_class loadConLlo( iRegL dst, immL src ) %{ 4661 instruction_count(2); 4662 dst : E(write); 4663 IALU : R; 4664 IALU : R; 4665 %} 4666 4667 // [PHH] This is wrong for 64-bit. See LdImmF/D. 4668 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{ 4669 instruction_count(1); multiple_bundles; 4670 src : R(read); 4671 dst : M(write)+1; 4672 IALU : R; 4673 MS : E; 4674 %} 4675 4676 // Integer ALU nop operation 4677 pipe_class ialu_nop() %{ 4678 single_instruction; 4679 IALU : R; 4680 %} 4681 4682 // Integer ALU nop operation 4683 pipe_class ialu_nop_A0() %{ 4684 single_instruction; 4685 A0 : R; 4686 %} 4687 4688 // Integer ALU nop operation 4689 pipe_class ialu_nop_A1() %{ 4690 single_instruction; 4691 A1 : R; 4692 %} 4693 4694 // Integer Multiply reg-reg operation 4695 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4696 single_instruction; 4697 dst : E(write); 4698 src1 : R(read); 4699 src2 : R(read); 4700 MS : R(5); 4701 %} 4702 4703 // Integer Multiply reg-imm operation 4704 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4705 single_instruction; 4706 dst : E(write); 4707 src1 : R(read); 4708 MS : R(5); 4709 %} 4710 4711 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4712 single_instruction; 4713 dst : E(write)+4; 4714 src1 : R(read); 4715 src2 : R(read); 4716 MS : R(6); 4717 %} 4718 4719 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4720 single_instruction; 4721 dst : E(write)+4; 4722 src1 : R(read); 4723 MS : R(6); 4724 %} 4725 4726 // Integer Divide reg-reg 4727 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{ 4728 instruction_count(1); multiple_bundles; 4729 dst : E(write); 4730 temp : E(write); 4731 src1 : R(read); 4732 src2 : R(read); 4733 temp : R(read); 4734 MS : R(38); 4735 %} 4736 4737 // Integer Divide reg-imm 4738 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{ 4739 instruction_count(1); multiple_bundles; 4740 dst : E(write); 4741 temp : E(write); 4742 src1 : R(read); 4743 temp : R(read); 4744 MS : R(38); 4745 %} 4746 4747 // Long Divide 4748 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4749 dst : E(write)+71; 4750 src1 : R(read); 4751 src2 : R(read)+1; 4752 MS : R(70); 4753 %} 4754 4755 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4756 dst : E(write)+71; 4757 src1 : R(read); 4758 MS : R(70); 4759 %} 4760 4761 // Floating Point Add Float 4762 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{ 4763 single_instruction; 4764 dst : X(write); 4765 src1 : E(read); 4766 src2 : E(read); 4767 FA : R; 4768 %} 4769 4770 // Floating Point Add Double 4771 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{ 4772 single_instruction; 4773 dst : X(write); 4774 src1 : E(read); 4775 src2 : E(read); 4776 FA : R; 4777 %} 4778 4779 // Floating Point Conditional Move based on integer flags 4780 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{ 4781 single_instruction; 4782 dst : X(write); 4783 src : E(read); 4784 cr : R(read); 4785 FA : R(2); 4786 BR : R(2); 4787 %} 4788 4789 // Floating Point Conditional Move based on integer flags 4790 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{ 4791 single_instruction; 4792 dst : X(write); 4793 src : E(read); 4794 cr : R(read); 4795 FA : R(2); 4796 BR : R(2); 4797 %} 4798 4799 // Floating Point Multiply Float 4800 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{ 4801 single_instruction; 4802 dst : X(write); 4803 src1 : E(read); 4804 src2 : E(read); 4805 FM : R; 4806 %} 4807 4808 // Floating Point Multiply Double 4809 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{ 4810 single_instruction; 4811 dst : X(write); 4812 src1 : E(read); 4813 src2 : E(read); 4814 FM : R; 4815 %} 4816 4817 // Floating Point Divide Float 4818 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{ 4819 single_instruction; 4820 dst : X(write); 4821 src1 : E(read); 4822 src2 : E(read); 4823 FM : R; 4824 FDIV : C(14); 4825 %} 4826 4827 // Floating Point Divide Double 4828 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{ 4829 single_instruction; 4830 dst : X(write); 4831 src1 : E(read); 4832 src2 : E(read); 4833 FM : R; 4834 FDIV : C(17); 4835 %} 4836 4837 // Floating Point Move/Negate/Abs Float 4838 pipe_class faddF_reg(regF dst, regF src) %{ 4839 single_instruction; 4840 dst : W(write); 4841 src : E(read); 4842 FA : R(1); 4843 %} 4844 4845 // Floating Point Move/Negate/Abs Double 4846 pipe_class faddD_reg(regD dst, regD src) %{ 4847 single_instruction; 4848 dst : W(write); 4849 src : E(read); 4850 FA : R; 4851 %} 4852 4853 // Floating Point Convert F->D 4854 pipe_class fcvtF2D(regD dst, regF src) %{ 4855 single_instruction; 4856 dst : X(write); 4857 src : E(read); 4858 FA : R; 4859 %} 4860 4861 // Floating Point Convert I->D 4862 pipe_class fcvtI2D(regD dst, regF src) %{ 4863 single_instruction; 4864 dst : X(write); 4865 src : E(read); 4866 FA : R; 4867 %} 4868 4869 // Floating Point Convert LHi->D 4870 pipe_class fcvtLHi2D(regD dst, regD src) %{ 4871 single_instruction; 4872 dst : X(write); 4873 src : E(read); 4874 FA : R; 4875 %} 4876 4877 // Floating Point Convert L->D 4878 pipe_class fcvtL2D(regD dst, regF src) %{ 4879 single_instruction; 4880 dst : X(write); 4881 src : E(read); 4882 FA : R; 4883 %} 4884 4885 // Floating Point Convert L->F 4886 pipe_class fcvtL2F(regD dst, regF src) %{ 4887 single_instruction; 4888 dst : X(write); 4889 src : E(read); 4890 FA : R; 4891 %} 4892 4893 // Floating Point Convert D->F 4894 pipe_class fcvtD2F(regD dst, regF src) %{ 4895 single_instruction; 4896 dst : X(write); 4897 src : E(read); 4898 FA : R; 4899 %} 4900 4901 // Floating Point Convert I->L 4902 pipe_class fcvtI2L(regD dst, regF src) %{ 4903 single_instruction; 4904 dst : X(write); 4905 src : E(read); 4906 FA : R; 4907 %} 4908 4909 // Floating Point Convert D->F 4910 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{ 4911 instruction_count(1); multiple_bundles; 4912 dst : X(write)+6; 4913 src : E(read); 4914 FA : R; 4915 %} 4916 4917 // Floating Point Convert D->L 4918 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{ 4919 instruction_count(1); multiple_bundles; 4920 dst : X(write)+6; 4921 src : E(read); 4922 FA : R; 4923 %} 4924 4925 // Floating Point Convert F->I 4926 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{ 4927 instruction_count(1); multiple_bundles; 4928 dst : X(write)+6; 4929 src : E(read); 4930 FA : R; 4931 %} 4932 4933 // Floating Point Convert F->L 4934 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{ 4935 instruction_count(1); multiple_bundles; 4936 dst : X(write)+6; 4937 src : E(read); 4938 FA : R; 4939 %} 4940 4941 // Floating Point Convert I->F 4942 pipe_class fcvtI2F(regF dst, regF src) %{ 4943 single_instruction; 4944 dst : X(write); 4945 src : E(read); 4946 FA : R; 4947 %} 4948 4949 // Floating Point Compare 4950 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{ 4951 single_instruction; 4952 cr : X(write); 4953 src1 : E(read); 4954 src2 : E(read); 4955 FA : R; 4956 %} 4957 4958 // Floating Point Compare 4959 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{ 4960 single_instruction; 4961 cr : X(write); 4962 src1 : E(read); 4963 src2 : E(read); 4964 FA : R; 4965 %} 4966 4967 // Floating Add Nop 4968 pipe_class fadd_nop() %{ 4969 single_instruction; 4970 FA : R; 4971 %} 4972 4973 // Integer Store to Memory 4974 pipe_class istore_mem_reg(memory mem, iRegI src) %{ 4975 single_instruction; 4976 mem : R(read); 4977 src : C(read); 4978 MS : R; 4979 %} 4980 4981 // Integer Store to Memory 4982 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{ 4983 single_instruction; 4984 mem : R(read); 4985 src : C(read); 4986 MS : R; 4987 %} 4988 4989 // Integer Store Zero to Memory 4990 pipe_class istore_mem_zero(memory mem, immI0 src) %{ 4991 single_instruction; 4992 mem : R(read); 4993 MS : R; 4994 %} 4995 4996 // Special Stack Slot Store 4997 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{ 4998 single_instruction; 4999 stkSlot : R(read); 5000 src : C(read); 5001 MS : R; 5002 %} 5003 5004 // Special Stack Slot Store 5005 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{ 5006 instruction_count(2); multiple_bundles; 5007 stkSlot : R(read); 5008 src : C(read); 5009 MS : R(2); 5010 %} 5011 5012 // Float Store 5013 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{ 5014 single_instruction; 5015 mem : R(read); 5016 src : C(read); 5017 MS : R; 5018 %} 5019 5020 // Float Store 5021 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{ 5022 single_instruction; 5023 mem : R(read); 5024 MS : R; 5025 %} 5026 5027 // Double Store 5028 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{ 5029 instruction_count(1); 5030 mem : R(read); 5031 src : C(read); 5032 MS : R; 5033 %} 5034 5035 // Double Store 5036 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{ 5037 single_instruction; 5038 mem : R(read); 5039 MS : R; 5040 %} 5041 5042 // Special Stack Slot Float Store 5043 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{ 5044 single_instruction; 5045 stkSlot : R(read); 5046 src : C(read); 5047 MS : R; 5048 %} 5049 5050 // Special Stack Slot Double Store 5051 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{ 5052 single_instruction; 5053 stkSlot : R(read); 5054 src : C(read); 5055 MS : R; 5056 %} 5057 5058 // Integer Load (when sign bit propagation not needed) 5059 pipe_class iload_mem(iRegI dst, memory mem) %{ 5060 single_instruction; 5061 mem : R(read); 5062 dst : C(write); 5063 MS : R; 5064 %} 5065 5066 // Integer Load from stack operand 5067 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{ 5068 single_instruction; 5069 mem : R(read); 5070 dst : C(write); 5071 MS : R; 5072 %} 5073 5074 // Integer Load (when sign bit propagation or masking is needed) 5075 pipe_class iload_mask_mem(iRegI dst, memory mem) %{ 5076 single_instruction; 5077 mem : R(read); 5078 dst : M(write); 5079 MS : R; 5080 %} 5081 5082 // Float Load 5083 pipe_class floadF_mem(regF dst, memory mem) %{ 5084 single_instruction; 5085 mem : R(read); 5086 dst : M(write); 5087 MS : R; 5088 %} 5089 5090 // Float Load 5091 pipe_class floadD_mem(regD dst, memory mem) %{ 5092 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case 5093 mem : R(read); 5094 dst : M(write); 5095 MS : R; 5096 %} 5097 5098 // Float Load 5099 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{ 5100 single_instruction; 5101 stkSlot : R(read); 5102 dst : M(write); 5103 MS : R; 5104 %} 5105 5106 // Float Load 5107 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{ 5108 single_instruction; 5109 stkSlot : R(read); 5110 dst : M(write); 5111 MS : R; 5112 %} 5113 5114 // Memory Nop 5115 pipe_class mem_nop() %{ 5116 single_instruction; 5117 MS : R; 5118 %} 5119 5120 pipe_class sethi(iRegP dst, immI src) %{ 5121 single_instruction; 5122 dst : E(write); 5123 IALU : R; 5124 %} 5125 5126 pipe_class loadPollP(iRegP poll) %{ 5127 single_instruction; 5128 poll : R(read); 5129 MS : R; 5130 %} 5131 5132 pipe_class br(Universe br, label labl) %{ 5133 single_instruction_with_delay_slot; 5134 BR : R; 5135 %} 5136 5137 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{ 5138 single_instruction_with_delay_slot; 5139 cr : E(read); 5140 BR : R; 5141 %} 5142 5143 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{ 5144 single_instruction_with_delay_slot; 5145 op1 : E(read); 5146 BR : R; 5147 MS : R; 5148 %} 5149 5150 // Compare and branch 5151 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{ 5152 instruction_count(2); has_delay_slot; 5153 cr : E(write); 5154 src1 : R(read); 5155 src2 : R(read); 5156 IALU : R; 5157 BR : R; 5158 %} 5159 5160 // Compare and branch 5161 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{ 5162 instruction_count(2); has_delay_slot; 5163 cr : E(write); 5164 src1 : R(read); 5165 IALU : R; 5166 BR : R; 5167 %} 5168 5169 // Compare and branch using cbcond 5170 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{ 5171 single_instruction; 5172 src1 : E(read); 5173 src2 : E(read); 5174 IALU : R; 5175 BR : R; 5176 %} 5177 5178 // Compare and branch using cbcond 5179 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{ 5180 single_instruction; 5181 src1 : E(read); 5182 IALU : R; 5183 BR : R; 5184 %} 5185 5186 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{ 5187 single_instruction_with_delay_slot; 5188 cr : E(read); 5189 BR : R; 5190 %} 5191 5192 pipe_class br_nop() %{ 5193 single_instruction; 5194 BR : R; 5195 %} 5196 5197 pipe_class simple_call(method meth) %{ 5198 instruction_count(2); multiple_bundles; force_serialization; 5199 fixed_latency(100); 5200 BR : R(1); 5201 MS : R(1); 5202 A0 : R(1); 5203 %} 5204 5205 pipe_class compiled_call(method meth) %{ 5206 instruction_count(1); multiple_bundles; force_serialization; 5207 fixed_latency(100); 5208 MS : R(1); 5209 %} 5210 5211 pipe_class call(method meth) %{ 5212 instruction_count(0); multiple_bundles; force_serialization; 5213 fixed_latency(100); 5214 %} 5215 5216 pipe_class tail_call(Universe ignore, label labl) %{ 5217 single_instruction; has_delay_slot; 5218 fixed_latency(100); 5219 BR : R(1); 5220 MS : R(1); 5221 %} 5222 5223 pipe_class ret(Universe ignore) %{ 5224 single_instruction; has_delay_slot; 5225 BR : R(1); 5226 MS : R(1); 5227 %} 5228 5229 pipe_class ret_poll(g3RegP poll) %{ 5230 instruction_count(3); has_delay_slot; 5231 poll : E(read); 5232 MS : R; 5233 %} 5234 5235 // The real do-nothing guy 5236 pipe_class empty( ) %{ 5237 instruction_count(0); 5238 %} 5239 5240 pipe_class long_memory_op() %{ 5241 instruction_count(0); multiple_bundles; force_serialization; 5242 fixed_latency(25); 5243 MS : R(1); 5244 %} 5245 5246 // Check-cast 5247 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{ 5248 array : R(read); 5249 match : R(read); 5250 IALU : R(2); 5251 BR : R(2); 5252 MS : R; 5253 %} 5254 5255 // Convert FPU flags into +1,0,-1 5256 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{ 5257 src1 : E(read); 5258 src2 : E(read); 5259 dst : E(write); 5260 FA : R; 5261 MS : R(2); 5262 BR : R(2); 5263 %} 5264 5265 // Compare for p < q, and conditionally add y 5266 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{ 5267 p : E(read); 5268 q : E(read); 5269 y : E(read); 5270 IALU : R(3) 5271 %} 5272 5273 // Perform a compare, then move conditionally in a branch delay slot. 5274 pipe_class min_max( iRegI src2, iRegI srcdst ) %{ 5275 src2 : E(read); 5276 srcdst : E(read); 5277 IALU : R; 5278 BR : R; 5279 %} 5280 5281 // Define the class for the Nop node 5282 define %{ 5283 MachNop = ialu_nop; 5284 %} 5285 5286 %} 5287 5288 //----------INSTRUCTIONS------------------------------------------------------- 5289 5290 //------------Special Stack Slot instructions - no match rules----------------- 5291 instruct stkI_to_regF(regF dst, stackSlotI src) %{ 5292 // No match rule to avoid chain rule match. 5293 effect(DEF dst, USE src); 5294 ins_cost(MEMORY_REF_COST); 5295 size(4); 5296 format %{ "LDF $src,$dst\t! stkI to regF" %} 5297 opcode(Assembler::ldf_op3); 5298 ins_encode(simple_form3_mem_reg(src, dst)); 5299 ins_pipe(floadF_stk); 5300 %} 5301 5302 instruct stkL_to_regD(regD dst, stackSlotL src) %{ 5303 // No match rule to avoid chain rule match. 5304 effect(DEF dst, USE src); 5305 ins_cost(MEMORY_REF_COST); 5306 size(4); 5307 format %{ "LDDF $src,$dst\t! stkL to regD" %} 5308 opcode(Assembler::lddf_op3); 5309 ins_encode(simple_form3_mem_reg(src, dst)); 5310 ins_pipe(floadD_stk); 5311 %} 5312 5313 instruct regF_to_stkI(stackSlotI dst, regF src) %{ 5314 // No match rule to avoid chain rule match. 5315 effect(DEF dst, USE src); 5316 ins_cost(MEMORY_REF_COST); 5317 size(4); 5318 format %{ "STF $src,$dst\t! regF to stkI" %} 5319 opcode(Assembler::stf_op3); 5320 ins_encode(simple_form3_mem_reg(dst, src)); 5321 ins_pipe(fstoreF_stk_reg); 5322 %} 5323 5324 instruct regD_to_stkL(stackSlotL dst, regD src) %{ 5325 // No match rule to avoid chain rule match. 5326 effect(DEF dst, USE src); 5327 ins_cost(MEMORY_REF_COST); 5328 size(4); 5329 format %{ "STDF $src,$dst\t! regD to stkL" %} 5330 opcode(Assembler::stdf_op3); 5331 ins_encode(simple_form3_mem_reg(dst, src)); 5332 ins_pipe(fstoreD_stk_reg); 5333 %} 5334 5335 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{ 5336 effect(DEF dst, USE src); 5337 ins_cost(MEMORY_REF_COST*2); 5338 size(8); 5339 format %{ "STW $src,$dst.hi\t! long\n\t" 5340 "STW R_G0,$dst.lo" %} 5341 opcode(Assembler::stw_op3); 5342 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); 5343 ins_pipe(lstoreI_stk_reg); 5344 %} 5345 5346 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{ 5347 // No match rule to avoid chain rule match. 5348 effect(DEF dst, USE src); 5349 ins_cost(MEMORY_REF_COST); 5350 size(4); 5351 format %{ "STX $src,$dst\t! regL to stkD" %} 5352 opcode(Assembler::stx_op3); 5353 ins_encode(simple_form3_mem_reg( dst, src ) ); 5354 ins_pipe(istore_stk_reg); 5355 %} 5356 5357 //---------- Chain stack slots between similar types -------- 5358 5359 // Load integer from stack slot 5360 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{ 5361 match(Set dst src); 5362 ins_cost(MEMORY_REF_COST); 5363 5364 size(4); 5365 format %{ "LDUW $src,$dst\t!stk" %} 5366 opcode(Assembler::lduw_op3); 5367 ins_encode(simple_form3_mem_reg( src, dst ) ); 5368 ins_pipe(iload_mem); 5369 %} 5370 5371 // Store integer to stack slot 5372 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{ 5373 match(Set dst src); 5374 ins_cost(MEMORY_REF_COST); 5375 5376 size(4); 5377 format %{ "STW $src,$dst\t!stk" %} 5378 opcode(Assembler::stw_op3); 5379 ins_encode(simple_form3_mem_reg( dst, src ) ); 5380 ins_pipe(istore_mem_reg); 5381 %} 5382 5383 // Load long from stack slot 5384 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{ 5385 match(Set dst src); 5386 5387 ins_cost(MEMORY_REF_COST); 5388 size(4); 5389 format %{ "LDX $src,$dst\t! long" %} 5390 opcode(Assembler::ldx_op3); 5391 ins_encode(simple_form3_mem_reg( src, dst ) ); 5392 ins_pipe(iload_mem); 5393 %} 5394 5395 // Store long to stack slot 5396 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{ 5397 match(Set dst src); 5398 5399 ins_cost(MEMORY_REF_COST); 5400 size(4); 5401 format %{ "STX $src,$dst\t! long" %} 5402 opcode(Assembler::stx_op3); 5403 ins_encode(simple_form3_mem_reg( dst, src ) ); 5404 ins_pipe(istore_mem_reg); 5405 %} 5406 5407 #ifdef _LP64 5408 // Load pointer from stack slot, 64-bit encoding 5409 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5410 match(Set dst src); 5411 ins_cost(MEMORY_REF_COST); 5412 size(4); 5413 format %{ "LDX $src,$dst\t!ptr" %} 5414 opcode(Assembler::ldx_op3); 5415 ins_encode(simple_form3_mem_reg( src, dst ) ); 5416 ins_pipe(iload_mem); 5417 %} 5418 5419 // Store pointer to stack slot 5420 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5421 match(Set dst src); 5422 ins_cost(MEMORY_REF_COST); 5423 size(4); 5424 format %{ "STX $src,$dst\t!ptr" %} 5425 opcode(Assembler::stx_op3); 5426 ins_encode(simple_form3_mem_reg( dst, src ) ); 5427 ins_pipe(istore_mem_reg); 5428 %} 5429 #else // _LP64 5430 // Load pointer from stack slot, 32-bit encoding 5431 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5432 match(Set dst src); 5433 ins_cost(MEMORY_REF_COST); 5434 format %{ "LDUW $src,$dst\t!ptr" %} 5435 opcode(Assembler::lduw_op3, Assembler::ldst_op); 5436 ins_encode(simple_form3_mem_reg( src, dst ) ); 5437 ins_pipe(iload_mem); 5438 %} 5439 5440 // Store pointer to stack slot 5441 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5442 match(Set dst src); 5443 ins_cost(MEMORY_REF_COST); 5444 format %{ "STW $src,$dst\t!ptr" %} 5445 opcode(Assembler::stw_op3, Assembler::ldst_op); 5446 ins_encode(simple_form3_mem_reg( dst, src ) ); 5447 ins_pipe(istore_mem_reg); 5448 %} 5449 #endif // _LP64 5450 5451 //------------Special Nop instructions for bundling - no match rules----------- 5452 // Nop using the A0 functional unit 5453 instruct Nop_A0() %{ 5454 ins_cost(0); 5455 5456 format %{ "NOP ! Alu Pipeline" %} 5457 opcode(Assembler::or_op3, Assembler::arith_op); 5458 ins_encode( form2_nop() ); 5459 ins_pipe(ialu_nop_A0); 5460 %} 5461 5462 // Nop using the A1 functional unit 5463 instruct Nop_A1( ) %{ 5464 ins_cost(0); 5465 5466 format %{ "NOP ! Alu Pipeline" %} 5467 opcode(Assembler::or_op3, Assembler::arith_op); 5468 ins_encode( form2_nop() ); 5469 ins_pipe(ialu_nop_A1); 5470 %} 5471 5472 // Nop using the memory functional unit 5473 instruct Nop_MS( ) %{ 5474 ins_cost(0); 5475 5476 format %{ "NOP ! Memory Pipeline" %} 5477 ins_encode( emit_mem_nop ); 5478 ins_pipe(mem_nop); 5479 %} 5480 5481 // Nop using the floating add functional unit 5482 instruct Nop_FA( ) %{ 5483 ins_cost(0); 5484 5485 format %{ "NOP ! Floating Add Pipeline" %} 5486 ins_encode( emit_fadd_nop ); 5487 ins_pipe(fadd_nop); 5488 %} 5489 5490 // Nop using the branch functional unit 5491 instruct Nop_BR( ) %{ 5492 ins_cost(0); 5493 5494 format %{ "NOP ! Branch Pipeline" %} 5495 ins_encode( emit_br_nop ); 5496 ins_pipe(br_nop); 5497 %} 5498 5499 //----------Load/Store/Move Instructions--------------------------------------- 5500 //----------Load Instructions-------------------------------------------------- 5501 // Load Byte (8bit signed) 5502 instruct loadB(iRegI dst, memory mem) %{ 5503 match(Set dst (LoadB mem)); 5504 ins_cost(MEMORY_REF_COST); 5505 5506 size(4); 5507 format %{ "LDSB $mem,$dst\t! byte" %} 5508 ins_encode %{ 5509 __ ldsb($mem$$Address, $dst$$Register); 5510 %} 5511 ins_pipe(iload_mask_mem); 5512 %} 5513 5514 // Load Byte (8bit signed) into a Long Register 5515 instruct loadB2L(iRegL dst, memory mem) %{ 5516 match(Set dst (ConvI2L (LoadB mem))); 5517 ins_cost(MEMORY_REF_COST); 5518 5519 size(4); 5520 format %{ "LDSB $mem,$dst\t! byte -> long" %} 5521 ins_encode %{ 5522 __ ldsb($mem$$Address, $dst$$Register); 5523 %} 5524 ins_pipe(iload_mask_mem); 5525 %} 5526 5527 // Load Unsigned Byte (8bit UNsigned) into an int reg 5528 instruct loadUB(iRegI dst, memory mem) %{ 5529 match(Set dst (LoadUB mem)); 5530 ins_cost(MEMORY_REF_COST); 5531 5532 size(4); 5533 format %{ "LDUB $mem,$dst\t! ubyte" %} 5534 ins_encode %{ 5535 __ ldub($mem$$Address, $dst$$Register); 5536 %} 5537 ins_pipe(iload_mem); 5538 %} 5539 5540 // Load Unsigned Byte (8bit UNsigned) into a Long Register 5541 instruct loadUB2L(iRegL dst, memory mem) %{ 5542 match(Set dst (ConvI2L (LoadUB mem))); 5543 ins_cost(MEMORY_REF_COST); 5544 5545 size(4); 5546 format %{ "LDUB $mem,$dst\t! ubyte -> long" %} 5547 ins_encode %{ 5548 __ ldub($mem$$Address, $dst$$Register); 5549 %} 5550 ins_pipe(iload_mem); 5551 %} 5552 5553 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register 5554 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{ 5555 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5556 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5557 5558 size(2*4); 5559 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t" 5560 "AND $dst,$mask,$dst" %} 5561 ins_encode %{ 5562 __ ldub($mem$$Address, $dst$$Register); 5563 __ and3($dst$$Register, $mask$$constant, $dst$$Register); 5564 %} 5565 ins_pipe(iload_mem); 5566 %} 5567 5568 // Load Short (16bit signed) 5569 instruct loadS(iRegI dst, memory mem) %{ 5570 match(Set dst (LoadS mem)); 5571 ins_cost(MEMORY_REF_COST); 5572 5573 size(4); 5574 format %{ "LDSH $mem,$dst\t! short" %} 5575 ins_encode %{ 5576 __ ldsh($mem$$Address, $dst$$Register); 5577 %} 5578 ins_pipe(iload_mask_mem); 5579 %} 5580 5581 // Load Short (16 bit signed) to Byte (8 bit signed) 5582 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5583 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5584 ins_cost(MEMORY_REF_COST); 5585 5586 size(4); 5587 5588 format %{ "LDSB $mem+1,$dst\t! short -> byte" %} 5589 ins_encode %{ 5590 __ ldsb($mem$$Address, $dst$$Register, 1); 5591 %} 5592 ins_pipe(iload_mask_mem); 5593 %} 5594 5595 // Load Short (16bit signed) into a Long Register 5596 instruct loadS2L(iRegL dst, memory mem) %{ 5597 match(Set dst (ConvI2L (LoadS mem))); 5598 ins_cost(MEMORY_REF_COST); 5599 5600 size(4); 5601 format %{ "LDSH $mem,$dst\t! short -> long" %} 5602 ins_encode %{ 5603 __ ldsh($mem$$Address, $dst$$Register); 5604 %} 5605 ins_pipe(iload_mask_mem); 5606 %} 5607 5608 // Load Unsigned Short/Char (16bit UNsigned) 5609 instruct loadUS(iRegI dst, memory mem) %{ 5610 match(Set dst (LoadUS mem)); 5611 ins_cost(MEMORY_REF_COST); 5612 5613 size(4); 5614 format %{ "LDUH $mem,$dst\t! ushort/char" %} 5615 ins_encode %{ 5616 __ lduh($mem$$Address, $dst$$Register); 5617 %} 5618 ins_pipe(iload_mem); 5619 %} 5620 5621 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5622 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5623 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5624 ins_cost(MEMORY_REF_COST); 5625 5626 size(4); 5627 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %} 5628 ins_encode %{ 5629 __ ldsb($mem$$Address, $dst$$Register, 1); 5630 %} 5631 ins_pipe(iload_mask_mem); 5632 %} 5633 5634 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register 5635 instruct loadUS2L(iRegL dst, memory mem) %{ 5636 match(Set dst (ConvI2L (LoadUS mem))); 5637 ins_cost(MEMORY_REF_COST); 5638 5639 size(4); 5640 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} 5641 ins_encode %{ 5642 __ lduh($mem$$Address, $dst$$Register); 5643 %} 5644 ins_pipe(iload_mem); 5645 %} 5646 5647 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register 5648 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5649 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5650 ins_cost(MEMORY_REF_COST); 5651 5652 size(4); 5653 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %} 5654 ins_encode %{ 5655 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE 5656 %} 5657 ins_pipe(iload_mem); 5658 %} 5659 5660 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register 5661 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5662 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5663 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5664 5665 size(2*4); 5666 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t" 5667 "AND $dst,$mask,$dst" %} 5668 ins_encode %{ 5669 Register Rdst = $dst$$Register; 5670 __ lduh($mem$$Address, Rdst); 5671 __ and3(Rdst, $mask$$constant, Rdst); 5672 %} 5673 ins_pipe(iload_mem); 5674 %} 5675 5676 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register 5677 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{ 5678 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5679 effect(TEMP dst, TEMP tmp); 5680 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5681 5682 size((3+1)*4); // set may use two instructions. 5683 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t" 5684 "SET $mask,$tmp\n\t" 5685 "AND $dst,$tmp,$dst" %} 5686 ins_encode %{ 5687 Register Rdst = $dst$$Register; 5688 Register Rtmp = $tmp$$Register; 5689 __ lduh($mem$$Address, Rdst); 5690 __ set($mask$$constant, Rtmp); 5691 __ and3(Rdst, Rtmp, Rdst); 5692 %} 5693 ins_pipe(iload_mem); 5694 %} 5695 5696 // Load Integer 5697 instruct loadI(iRegI dst, memory mem) %{ 5698 match(Set dst (LoadI mem)); 5699 ins_cost(MEMORY_REF_COST); 5700 5701 size(4); 5702 format %{ "LDUW $mem,$dst\t! int" %} 5703 ins_encode %{ 5704 __ lduw($mem$$Address, $dst$$Register); 5705 %} 5706 ins_pipe(iload_mem); 5707 %} 5708 5709 // Load Integer to Byte (8 bit signed) 5710 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5711 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5712 ins_cost(MEMORY_REF_COST); 5713 5714 size(4); 5715 5716 format %{ "LDSB $mem+3,$dst\t! int -> byte" %} 5717 ins_encode %{ 5718 __ ldsb($mem$$Address, $dst$$Register, 3); 5719 %} 5720 ins_pipe(iload_mask_mem); 5721 %} 5722 5723 // Load Integer to Unsigned Byte (8 bit UNsigned) 5724 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{ 5725 match(Set dst (AndI (LoadI mem) mask)); 5726 ins_cost(MEMORY_REF_COST); 5727 5728 size(4); 5729 5730 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %} 5731 ins_encode %{ 5732 __ ldub($mem$$Address, $dst$$Register, 3); 5733 %} 5734 ins_pipe(iload_mask_mem); 5735 %} 5736 5737 // Load Integer to Short (16 bit signed) 5738 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{ 5739 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5740 ins_cost(MEMORY_REF_COST); 5741 5742 size(4); 5743 5744 format %{ "LDSH $mem+2,$dst\t! int -> short" %} 5745 ins_encode %{ 5746 __ ldsh($mem$$Address, $dst$$Register, 2); 5747 %} 5748 ins_pipe(iload_mask_mem); 5749 %} 5750 5751 // Load Integer to Unsigned Short (16 bit UNsigned) 5752 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{ 5753 match(Set dst (AndI (LoadI mem) mask)); 5754 ins_cost(MEMORY_REF_COST); 5755 5756 size(4); 5757 5758 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %} 5759 ins_encode %{ 5760 __ lduh($mem$$Address, $dst$$Register, 2); 5761 %} 5762 ins_pipe(iload_mask_mem); 5763 %} 5764 5765 // Load Integer into a Long Register 5766 instruct loadI2L(iRegL dst, memory mem) %{ 5767 match(Set dst (ConvI2L (LoadI mem))); 5768 ins_cost(MEMORY_REF_COST); 5769 5770 size(4); 5771 format %{ "LDSW $mem,$dst\t! int -> long" %} 5772 ins_encode %{ 5773 __ ldsw($mem$$Address, $dst$$Register); 5774 %} 5775 ins_pipe(iload_mask_mem); 5776 %} 5777 5778 // Load Integer with mask 0xFF into a Long Register 5779 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5780 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5781 ins_cost(MEMORY_REF_COST); 5782 5783 size(4); 5784 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %} 5785 ins_encode %{ 5786 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE 5787 %} 5788 ins_pipe(iload_mem); 5789 %} 5790 5791 // Load Integer with mask 0xFFFF into a Long Register 5792 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{ 5793 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5794 ins_cost(MEMORY_REF_COST); 5795 5796 size(4); 5797 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %} 5798 ins_encode %{ 5799 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE 5800 %} 5801 ins_pipe(iload_mem); 5802 %} 5803 5804 // Load Integer with a 13-bit mask into a Long Register 5805 instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5806 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5807 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5808 5809 size(2*4); 5810 format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t" 5811 "AND $dst,$mask,$dst" %} 5812 ins_encode %{ 5813 Register Rdst = $dst$$Register; 5814 __ lduw($mem$$Address, Rdst); 5815 __ and3(Rdst, $mask$$constant, Rdst); 5816 %} 5817 ins_pipe(iload_mem); 5818 %} 5819 5820 // Load Integer with a 32-bit mask into a Long Register 5821 instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ 5822 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5823 effect(TEMP dst, TEMP tmp); 5824 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5825 5826 size((3+1)*4); // set may use two instructions. 5827 format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t" 5828 "SET $mask,$tmp\n\t" 5829 "AND $dst,$tmp,$dst" %} 5830 ins_encode %{ 5831 Register Rdst = $dst$$Register; 5832 Register Rtmp = $tmp$$Register; 5833 __ lduw($mem$$Address, Rdst); 5834 __ set($mask$$constant, Rtmp); 5835 __ and3(Rdst, Rtmp, Rdst); 5836 %} 5837 ins_pipe(iload_mem); 5838 %} 5839 5840 // Load Unsigned Integer into a Long Register 5841 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{ 5842 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 5843 ins_cost(MEMORY_REF_COST); 5844 5845 size(4); 5846 format %{ "LDUW $mem,$dst\t! uint -> long" %} 5847 ins_encode %{ 5848 __ lduw($mem$$Address, $dst$$Register); 5849 %} 5850 ins_pipe(iload_mem); 5851 %} 5852 5853 // Load Long - aligned 5854 instruct loadL(iRegL dst, memory mem ) %{ 5855 match(Set dst (LoadL mem)); 5856 ins_cost(MEMORY_REF_COST); 5857 5858 size(4); 5859 format %{ "LDX $mem,$dst\t! long" %} 5860 ins_encode %{ 5861 __ ldx($mem$$Address, $dst$$Register); 5862 %} 5863 ins_pipe(iload_mem); 5864 %} 5865 5866 // Load Long - UNaligned 5867 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{ 5868 match(Set dst (LoadL_unaligned mem)); 5869 effect(KILL tmp); 5870 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5871 size(16); 5872 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n" 5873 "\tLDUW $mem ,$dst\n" 5874 "\tSLLX #32, $dst, $dst\n" 5875 "\tOR $dst, R_O7, $dst" %} 5876 opcode(Assembler::lduw_op3); 5877 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); 5878 ins_pipe(iload_mem); 5879 %} 5880 5881 // Load Range 5882 instruct loadRange(iRegI dst, memory mem) %{ 5883 match(Set dst (LoadRange mem)); 5884 ins_cost(MEMORY_REF_COST); 5885 5886 size(4); 5887 format %{ "LDUW $mem,$dst\t! range" %} 5888 opcode(Assembler::lduw_op3); 5889 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5890 ins_pipe(iload_mem); 5891 %} 5892 5893 // Load Integer into %f register (for fitos/fitod) 5894 instruct loadI_freg(regF dst, memory mem) %{ 5895 match(Set dst (LoadI mem)); 5896 ins_cost(MEMORY_REF_COST); 5897 size(4); 5898 5899 format %{ "LDF $mem,$dst\t! for fitos/fitod" %} 5900 opcode(Assembler::ldf_op3); 5901 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5902 ins_pipe(floadF_mem); 5903 %} 5904 5905 // Load Pointer 5906 instruct loadP(iRegP dst, memory mem) %{ 5907 match(Set dst (LoadP mem)); 5908 ins_cost(MEMORY_REF_COST); 5909 size(4); 5910 5911 #ifndef _LP64 5912 format %{ "LDUW $mem,$dst\t! ptr" %} 5913 ins_encode %{ 5914 __ lduw($mem$$Address, $dst$$Register); 5915 %} 5916 #else 5917 format %{ "LDX $mem,$dst\t! ptr" %} 5918 ins_encode %{ 5919 __ ldx($mem$$Address, $dst$$Register); 5920 %} 5921 #endif 5922 ins_pipe(iload_mem); 5923 %} 5924 5925 // Load Compressed Pointer 5926 instruct loadN(iRegN dst, memory mem) %{ 5927 match(Set dst (LoadN mem)); 5928 ins_cost(MEMORY_REF_COST); 5929 size(4); 5930 5931 format %{ "LDUW $mem,$dst\t! compressed ptr" %} 5932 ins_encode %{ 5933 __ lduw($mem$$Address, $dst$$Register); 5934 %} 5935 ins_pipe(iload_mem); 5936 %} 5937 5938 // Load Klass Pointer 5939 instruct loadKlass(iRegP dst, memory mem) %{ 5940 match(Set dst (LoadKlass mem)); 5941 ins_cost(MEMORY_REF_COST); 5942 size(4); 5943 5944 #ifndef _LP64 5945 format %{ "LDUW $mem,$dst\t! klass ptr" %} 5946 ins_encode %{ 5947 __ lduw($mem$$Address, $dst$$Register); 5948 %} 5949 #else 5950 format %{ "LDX $mem,$dst\t! klass ptr" %} 5951 ins_encode %{ 5952 __ ldx($mem$$Address, $dst$$Register); 5953 %} 5954 #endif 5955 ins_pipe(iload_mem); 5956 %} 5957 5958 // Load narrow Klass Pointer 5959 instruct loadNKlass(iRegN dst, memory mem) %{ 5960 match(Set dst (LoadNKlass mem)); 5961 ins_cost(MEMORY_REF_COST); 5962 size(4); 5963 5964 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} 5965 ins_encode %{ 5966 __ lduw($mem$$Address, $dst$$Register); 5967 %} 5968 ins_pipe(iload_mem); 5969 %} 5970 5971 // Load Double 5972 instruct loadD(regD dst, memory mem) %{ 5973 match(Set dst (LoadD mem)); 5974 ins_cost(MEMORY_REF_COST); 5975 5976 size(4); 5977 format %{ "LDDF $mem,$dst" %} 5978 opcode(Assembler::lddf_op3); 5979 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5980 ins_pipe(floadD_mem); 5981 %} 5982 5983 // Load Double - UNaligned 5984 instruct loadD_unaligned(regD_low dst, memory mem ) %{ 5985 match(Set dst (LoadD_unaligned mem)); 5986 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5987 size(8); 5988 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" 5989 "\tLDF $mem+4,$dst.lo\t!" %} 5990 opcode(Assembler::ldf_op3); 5991 ins_encode( form3_mem_reg_double_unaligned( mem, dst )); 5992 ins_pipe(iload_mem); 5993 %} 5994 5995 // Load Float 5996 instruct loadF(regF dst, memory mem) %{ 5997 match(Set dst (LoadF mem)); 5998 ins_cost(MEMORY_REF_COST); 5999 6000 size(4); 6001 format %{ "LDF $mem,$dst" %} 6002 opcode(Assembler::ldf_op3); 6003 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6004 ins_pipe(floadF_mem); 6005 %} 6006 6007 // Load Constant 6008 instruct loadConI( iRegI dst, immI src ) %{ 6009 match(Set dst src); 6010 ins_cost(DEFAULT_COST * 3/2); 6011 format %{ "SET $src,$dst" %} 6012 ins_encode( Set32(src, dst) ); 6013 ins_pipe(ialu_hi_lo_reg); 6014 %} 6015 6016 instruct loadConI13( iRegI dst, immI13 src ) %{ 6017 match(Set dst src); 6018 6019 size(4); 6020 format %{ "MOV $src,$dst" %} 6021 ins_encode( Set13( src, dst ) ); 6022 ins_pipe(ialu_imm); 6023 %} 6024 6025 #ifndef _LP64 6026 instruct loadConP(iRegP dst, immP con) %{ 6027 match(Set dst con); 6028 ins_cost(DEFAULT_COST * 3/2); 6029 format %{ "SET $con,$dst\t!ptr" %} 6030 ins_encode %{ 6031 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6032 intptr_t val = $con$$constant; 6033 if (constant_reloc == relocInfo::oop_type) { 6034 __ set_oop_constant((jobject) val, $dst$$Register); 6035 } else if (constant_reloc == relocInfo::metadata_type) { 6036 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6037 } else { // non-oop pointers, e.g. card mark base, heap top 6038 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6039 __ set(val, $dst$$Register); 6040 } 6041 %} 6042 ins_pipe(loadConP); 6043 %} 6044 #else 6045 instruct loadConP_set(iRegP dst, immP_set con) %{ 6046 match(Set dst con); 6047 ins_cost(DEFAULT_COST * 3/2); 6048 format %{ "SET $con,$dst\t! ptr" %} 6049 ins_encode %{ 6050 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6051 intptr_t val = $con$$constant; 6052 if (constant_reloc == relocInfo::oop_type) { 6053 __ set_oop_constant((jobject) val, $dst$$Register); 6054 } else if (constant_reloc == relocInfo::metadata_type) { 6055 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6056 } else { // non-oop pointers, e.g. card mark base, heap top 6057 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6058 __ set(val, $dst$$Register); 6059 } 6060 %} 6061 ins_pipe(loadConP); 6062 %} 6063 6064 instruct loadConP_load(iRegP dst, immP_load con) %{ 6065 match(Set dst con); 6066 ins_cost(MEMORY_REF_COST); 6067 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 6068 ins_encode %{ 6069 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6070 __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 6071 %} 6072 ins_pipe(loadConP); 6073 %} 6074 6075 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{ 6076 match(Set dst con); 6077 ins_cost(DEFAULT_COST * 3/2); 6078 format %{ "SET $con,$dst\t! non-oop ptr" %} 6079 ins_encode %{ 6080 __ set($con$$constant, $dst$$Register); 6081 %} 6082 ins_pipe(loadConP); 6083 %} 6084 #endif // _LP64 6085 6086 instruct loadConP0(iRegP dst, immP0 src) %{ 6087 match(Set dst src); 6088 6089 size(4); 6090 format %{ "CLR $dst\t!ptr" %} 6091 ins_encode %{ 6092 __ clr($dst$$Register); 6093 %} 6094 ins_pipe(ialu_imm); 6095 %} 6096 6097 instruct loadConP_poll(iRegP dst, immP_poll src) %{ 6098 match(Set dst src); 6099 ins_cost(DEFAULT_COST); 6100 format %{ "SET $src,$dst\t!ptr" %} 6101 ins_encode %{ 6102 AddressLiteral polling_page(os::get_polling_page()); 6103 __ sethi(polling_page, reg_to_register_object($dst$$reg)); 6104 %} 6105 ins_pipe(loadConP_poll); 6106 %} 6107 6108 instruct loadConN0(iRegN dst, immN0 src) %{ 6109 match(Set dst src); 6110 6111 size(4); 6112 format %{ "CLR $dst\t! compressed NULL ptr" %} 6113 ins_encode %{ 6114 __ clr($dst$$Register); 6115 %} 6116 ins_pipe(ialu_imm); 6117 %} 6118 6119 instruct loadConN(iRegN dst, immN src) %{ 6120 match(Set dst src); 6121 ins_cost(DEFAULT_COST * 3/2); 6122 format %{ "SET $src,$dst\t! compressed ptr" %} 6123 ins_encode %{ 6124 Register dst = $dst$$Register; 6125 __ set_narrow_oop((jobject)$src$$constant, dst); 6126 %} 6127 ins_pipe(ialu_hi_lo_reg); 6128 %} 6129 6130 instruct loadConNKlass(iRegN dst, immNKlass src) %{ 6131 match(Set dst src); 6132 ins_cost(DEFAULT_COST * 3/2); 6133 format %{ "SET $src,$dst\t! compressed klass ptr" %} 6134 ins_encode %{ 6135 Register dst = $dst$$Register; 6136 __ set_narrow_klass((Klass*)$src$$constant, dst); 6137 %} 6138 ins_pipe(ialu_hi_lo_reg); 6139 %} 6140 6141 // Materialize long value (predicated by immL_cheap). 6142 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 6143 match(Set dst con); 6144 effect(KILL tmp); 6145 ins_cost(DEFAULT_COST * 3); 6146 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 6147 ins_encode %{ 6148 __ set64($con$$constant, $dst$$Register, $tmp$$Register); 6149 %} 6150 ins_pipe(loadConL); 6151 %} 6152 6153 // Load long value from constant table (predicated by immL_expensive). 6154 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 6155 match(Set dst con); 6156 ins_cost(MEMORY_REF_COST); 6157 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 6158 ins_encode %{ 6159 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6160 __ ldx($constanttablebase, con_offset, $dst$$Register); 6161 %} 6162 ins_pipe(loadConL); 6163 %} 6164 6165 instruct loadConL0( iRegL dst, immL0 src ) %{ 6166 match(Set dst src); 6167 ins_cost(DEFAULT_COST); 6168 size(4); 6169 format %{ "CLR $dst\t! long" %} 6170 ins_encode( Set13( src, dst ) ); 6171 ins_pipe(ialu_imm); 6172 %} 6173 6174 instruct loadConL13( iRegL dst, immL13 src ) %{ 6175 match(Set dst src); 6176 ins_cost(DEFAULT_COST * 2); 6177 6178 size(4); 6179 format %{ "MOV $src,$dst\t! long" %} 6180 ins_encode( Set13( src, dst ) ); 6181 ins_pipe(ialu_imm); 6182 %} 6183 6184 instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 6185 match(Set dst con); 6186 effect(KILL tmp); 6187 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 6188 ins_encode %{ 6189 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6190 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 6191 %} 6192 ins_pipe(loadConFD); 6193 %} 6194 6195 instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 6196 match(Set dst con); 6197 effect(KILL tmp); 6198 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 6199 ins_encode %{ 6200 // XXX This is a quick fix for 6833573. 6201 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 6202 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6203 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 6204 %} 6205 ins_pipe(loadConFD); 6206 %} 6207 6208 // Prefetch instructions. 6209 // Must be safe to execute with invalid address (cannot fault). 6210 6211 instruct prefetchr( memory mem ) %{ 6212 match( PrefetchRead mem ); 6213 ins_cost(MEMORY_REF_COST); 6214 size(4); 6215 6216 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %} 6217 opcode(Assembler::prefetch_op3); 6218 ins_encode( form3_mem_prefetch_read( mem ) ); 6219 ins_pipe(iload_mem); 6220 %} 6221 6222 instruct prefetchw( memory mem ) %{ 6223 match( PrefetchWrite mem ); 6224 ins_cost(MEMORY_REF_COST); 6225 size(4); 6226 6227 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %} 6228 opcode(Assembler::prefetch_op3); 6229 ins_encode( form3_mem_prefetch_write( mem ) ); 6230 ins_pipe(iload_mem); 6231 %} 6232 6233 // Prefetch instructions for allocation. 6234 6235 instruct prefetchAlloc( memory mem ) %{ 6236 predicate(AllocatePrefetchInstr == 0); 6237 match( PrefetchAllocation mem ); 6238 ins_cost(MEMORY_REF_COST); 6239 size(4); 6240 6241 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %} 6242 opcode(Assembler::prefetch_op3); 6243 ins_encode( form3_mem_prefetch_write( mem ) ); 6244 ins_pipe(iload_mem); 6245 %} 6246 6247 // Use BIS instruction to prefetch for allocation. 6248 // Could fault, need space at the end of TLAB. 6249 instruct prefetchAlloc_bis( iRegP dst ) %{ 6250 predicate(AllocatePrefetchInstr == 1); 6251 match( PrefetchAllocation dst ); 6252 ins_cost(MEMORY_REF_COST); 6253 size(4); 6254 6255 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %} 6256 ins_encode %{ 6257 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 6258 %} 6259 ins_pipe(istore_mem_reg); 6260 %} 6261 6262 // Next code is used for finding next cache line address to prefetch. 6263 #ifndef _LP64 6264 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{ 6265 match(Set dst (CastX2P (AndI (CastP2X src) mask))); 6266 ins_cost(DEFAULT_COST); 6267 size(4); 6268 6269 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6270 ins_encode %{ 6271 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6272 %} 6273 ins_pipe(ialu_reg_imm); 6274 %} 6275 #else 6276 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ 6277 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 6278 ins_cost(DEFAULT_COST); 6279 size(4); 6280 6281 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6282 ins_encode %{ 6283 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6284 %} 6285 ins_pipe(ialu_reg_imm); 6286 %} 6287 #endif 6288 6289 //----------Store Instructions------------------------------------------------- 6290 // Store Byte 6291 instruct storeB(memory mem, iRegI src) %{ 6292 match(Set mem (StoreB mem src)); 6293 ins_cost(MEMORY_REF_COST); 6294 6295 size(4); 6296 format %{ "STB $src,$mem\t! byte" %} 6297 opcode(Assembler::stb_op3); 6298 ins_encode(simple_form3_mem_reg( mem, src ) ); 6299 ins_pipe(istore_mem_reg); 6300 %} 6301 6302 instruct storeB0(memory mem, immI0 src) %{ 6303 match(Set mem (StoreB mem src)); 6304 ins_cost(MEMORY_REF_COST); 6305 6306 size(4); 6307 format %{ "STB $src,$mem\t! byte" %} 6308 opcode(Assembler::stb_op3); 6309 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6310 ins_pipe(istore_mem_zero); 6311 %} 6312 6313 instruct storeCM0(memory mem, immI0 src) %{ 6314 match(Set mem (StoreCM mem src)); 6315 ins_cost(MEMORY_REF_COST); 6316 6317 size(4); 6318 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} 6319 opcode(Assembler::stb_op3); 6320 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6321 ins_pipe(istore_mem_zero); 6322 %} 6323 6324 // Store Char/Short 6325 instruct storeC(memory mem, iRegI src) %{ 6326 match(Set mem (StoreC mem src)); 6327 ins_cost(MEMORY_REF_COST); 6328 6329 size(4); 6330 format %{ "STH $src,$mem\t! short" %} 6331 opcode(Assembler::sth_op3); 6332 ins_encode(simple_form3_mem_reg( mem, src ) ); 6333 ins_pipe(istore_mem_reg); 6334 %} 6335 6336 instruct storeC0(memory mem, immI0 src) %{ 6337 match(Set mem (StoreC mem src)); 6338 ins_cost(MEMORY_REF_COST); 6339 6340 size(4); 6341 format %{ "STH $src,$mem\t! short" %} 6342 opcode(Assembler::sth_op3); 6343 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6344 ins_pipe(istore_mem_zero); 6345 %} 6346 6347 // Store Integer 6348 instruct storeI(memory mem, iRegI src) %{ 6349 match(Set mem (StoreI mem src)); 6350 ins_cost(MEMORY_REF_COST); 6351 6352 size(4); 6353 format %{ "STW $src,$mem" %} 6354 opcode(Assembler::stw_op3); 6355 ins_encode(simple_form3_mem_reg( mem, src ) ); 6356 ins_pipe(istore_mem_reg); 6357 %} 6358 6359 // Store Long 6360 instruct storeL(memory mem, iRegL src) %{ 6361 match(Set mem (StoreL mem src)); 6362 ins_cost(MEMORY_REF_COST); 6363 size(4); 6364 format %{ "STX $src,$mem\t! long" %} 6365 opcode(Assembler::stx_op3); 6366 ins_encode(simple_form3_mem_reg( mem, src ) ); 6367 ins_pipe(istore_mem_reg); 6368 %} 6369 6370 instruct storeI0(memory mem, immI0 src) %{ 6371 match(Set mem (StoreI mem src)); 6372 ins_cost(MEMORY_REF_COST); 6373 6374 size(4); 6375 format %{ "STW $src,$mem" %} 6376 opcode(Assembler::stw_op3); 6377 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6378 ins_pipe(istore_mem_zero); 6379 %} 6380 6381 instruct storeL0(memory mem, immL0 src) %{ 6382 match(Set mem (StoreL mem src)); 6383 ins_cost(MEMORY_REF_COST); 6384 6385 size(4); 6386 format %{ "STX $src,$mem" %} 6387 opcode(Assembler::stx_op3); 6388 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6389 ins_pipe(istore_mem_zero); 6390 %} 6391 6392 // Store Integer from float register (used after fstoi) 6393 instruct storeI_Freg(memory mem, regF src) %{ 6394 match(Set mem (StoreI mem src)); 6395 ins_cost(MEMORY_REF_COST); 6396 6397 size(4); 6398 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} 6399 opcode(Assembler::stf_op3); 6400 ins_encode(simple_form3_mem_reg( mem, src ) ); 6401 ins_pipe(fstoreF_mem_reg); 6402 %} 6403 6404 // Store Pointer 6405 instruct storeP(memory dst, sp_ptr_RegP src) %{ 6406 match(Set dst (StoreP dst src)); 6407 ins_cost(MEMORY_REF_COST); 6408 size(4); 6409 6410 #ifndef _LP64 6411 format %{ "STW $src,$dst\t! ptr" %} 6412 opcode(Assembler::stw_op3, 0, REGP_OP); 6413 #else 6414 format %{ "STX $src,$dst\t! ptr" %} 6415 opcode(Assembler::stx_op3, 0, REGP_OP); 6416 #endif 6417 ins_encode( form3_mem_reg( dst, src ) ); 6418 ins_pipe(istore_mem_spORreg); 6419 %} 6420 6421 instruct storeP0(memory dst, immP0 src) %{ 6422 match(Set dst (StoreP dst src)); 6423 ins_cost(MEMORY_REF_COST); 6424 size(4); 6425 6426 #ifndef _LP64 6427 format %{ "STW $src,$dst\t! ptr" %} 6428 opcode(Assembler::stw_op3, 0, REGP_OP); 6429 #else 6430 format %{ "STX $src,$dst\t! ptr" %} 6431 opcode(Assembler::stx_op3, 0, REGP_OP); 6432 #endif 6433 ins_encode( form3_mem_reg( dst, R_G0 ) ); 6434 ins_pipe(istore_mem_zero); 6435 %} 6436 6437 // Store Compressed Pointer 6438 instruct storeN(memory dst, iRegN src) %{ 6439 match(Set dst (StoreN dst src)); 6440 ins_cost(MEMORY_REF_COST); 6441 size(4); 6442 6443 format %{ "STW $src,$dst\t! compressed ptr" %} 6444 ins_encode %{ 6445 Register base = as_Register($dst$$base); 6446 Register index = as_Register($dst$$index); 6447 Register src = $src$$Register; 6448 if (index != G0) { 6449 __ stw(src, base, index); 6450 } else { 6451 __ stw(src, base, $dst$$disp); 6452 } 6453 %} 6454 ins_pipe(istore_mem_spORreg); 6455 %} 6456 6457 instruct storeNKlass(memory dst, iRegN src) %{ 6458 match(Set dst (StoreNKlass dst src)); 6459 ins_cost(MEMORY_REF_COST); 6460 size(4); 6461 6462 format %{ "STW $src,$dst\t! compressed klass ptr" %} 6463 ins_encode %{ 6464 Register base = as_Register($dst$$base); 6465 Register index = as_Register($dst$$index); 6466 Register src = $src$$Register; 6467 if (index != G0) { 6468 __ stw(src, base, index); 6469 } else { 6470 __ stw(src, base, $dst$$disp); 6471 } 6472 %} 6473 ins_pipe(istore_mem_spORreg); 6474 %} 6475 6476 instruct storeN0(memory dst, immN0 src) %{ 6477 match(Set dst (StoreN dst src)); 6478 ins_cost(MEMORY_REF_COST); 6479 size(4); 6480 6481 format %{ "STW $src,$dst\t! compressed ptr" %} 6482 ins_encode %{ 6483 Register base = as_Register($dst$$base); 6484 Register index = as_Register($dst$$index); 6485 if (index != G0) { 6486 __ stw(0, base, index); 6487 } else { 6488 __ stw(0, base, $dst$$disp); 6489 } 6490 %} 6491 ins_pipe(istore_mem_zero); 6492 %} 6493 6494 // Store Double 6495 instruct storeD( memory mem, regD src) %{ 6496 match(Set mem (StoreD mem src)); 6497 ins_cost(MEMORY_REF_COST); 6498 6499 size(4); 6500 format %{ "STDF $src,$mem" %} 6501 opcode(Assembler::stdf_op3); 6502 ins_encode(simple_form3_mem_reg( mem, src ) ); 6503 ins_pipe(fstoreD_mem_reg); 6504 %} 6505 6506 instruct storeD0( memory mem, immD0 src) %{ 6507 match(Set mem (StoreD mem src)); 6508 ins_cost(MEMORY_REF_COST); 6509 6510 size(4); 6511 format %{ "STX $src,$mem" %} 6512 opcode(Assembler::stx_op3); 6513 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6514 ins_pipe(fstoreD_mem_zero); 6515 %} 6516 6517 // Store Float 6518 instruct storeF( memory mem, regF src) %{ 6519 match(Set mem (StoreF mem src)); 6520 ins_cost(MEMORY_REF_COST); 6521 6522 size(4); 6523 format %{ "STF $src,$mem" %} 6524 opcode(Assembler::stf_op3); 6525 ins_encode(simple_form3_mem_reg( mem, src ) ); 6526 ins_pipe(fstoreF_mem_reg); 6527 %} 6528 6529 instruct storeF0( memory mem, immF0 src) %{ 6530 match(Set mem (StoreF mem src)); 6531 ins_cost(MEMORY_REF_COST); 6532 6533 size(4); 6534 format %{ "STW $src,$mem\t! storeF0" %} 6535 opcode(Assembler::stw_op3); 6536 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6537 ins_pipe(fstoreF_mem_zero); 6538 %} 6539 6540 // Convert oop pointer into compressed form 6541 instruct encodeHeapOop(iRegN dst, iRegP src) %{ 6542 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6543 match(Set dst (EncodeP src)); 6544 format %{ "encode_heap_oop $src, $dst" %} 6545 ins_encode %{ 6546 __ encode_heap_oop($src$$Register, $dst$$Register); 6547 %} 6548 ins_pipe(ialu_reg); 6549 %} 6550 6551 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ 6552 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6553 match(Set dst (EncodeP src)); 6554 format %{ "encode_heap_oop_not_null $src, $dst" %} 6555 ins_encode %{ 6556 __ encode_heap_oop_not_null($src$$Register, $dst$$Register); 6557 %} 6558 ins_pipe(ialu_reg); 6559 %} 6560 6561 instruct decodeHeapOop(iRegP dst, iRegN src) %{ 6562 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 6563 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 6564 match(Set dst (DecodeN src)); 6565 format %{ "decode_heap_oop $src, $dst" %} 6566 ins_encode %{ 6567 __ decode_heap_oop($src$$Register, $dst$$Register); 6568 %} 6569 ins_pipe(ialu_reg); 6570 %} 6571 6572 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ 6573 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 6574 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 6575 match(Set dst (DecodeN src)); 6576 format %{ "decode_heap_oop_not_null $src, $dst" %} 6577 ins_encode %{ 6578 __ decode_heap_oop_not_null($src$$Register, $dst$$Register); 6579 %} 6580 ins_pipe(ialu_reg); 6581 %} 6582 6583 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{ 6584 match(Set dst (EncodePKlass src)); 6585 format %{ "encode_klass_not_null $src, $dst" %} 6586 ins_encode %{ 6587 __ encode_klass_not_null($src$$Register, $dst$$Register); 6588 %} 6589 ins_pipe(ialu_reg); 6590 %} 6591 6592 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{ 6593 match(Set dst (DecodeNKlass src)); 6594 format %{ "decode_klass_not_null $src, $dst" %} 6595 ins_encode %{ 6596 __ decode_klass_not_null($src$$Register, $dst$$Register); 6597 %} 6598 ins_pipe(ialu_reg); 6599 %} 6600 6601 //----------MemBar Instructions----------------------------------------------- 6602 // Memory barrier flavors 6603 6604 instruct membar_acquire() %{ 6605 match(MemBarAcquire); 6606 ins_cost(4*MEMORY_REF_COST); 6607 6608 size(0); 6609 format %{ "MEMBAR-acquire" %} 6610 ins_encode( enc_membar_acquire ); 6611 ins_pipe(long_memory_op); 6612 %} 6613 6614 instruct membar_acquire_lock() %{ 6615 match(MemBarAcquireLock); 6616 ins_cost(0); 6617 6618 size(0); 6619 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %} 6620 ins_encode( ); 6621 ins_pipe(empty); 6622 %} 6623 6624 instruct membar_release() %{ 6625 match(MemBarRelease); 6626 ins_cost(4*MEMORY_REF_COST); 6627 6628 size(0); 6629 format %{ "MEMBAR-release" %} 6630 ins_encode( enc_membar_release ); 6631 ins_pipe(long_memory_op); 6632 %} 6633 6634 instruct membar_release_lock() %{ 6635 match(MemBarReleaseLock); 6636 ins_cost(0); 6637 6638 size(0); 6639 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %} 6640 ins_encode( ); 6641 ins_pipe(empty); 6642 %} 6643 6644 instruct membar_volatile() %{ 6645 match(MemBarVolatile); 6646 ins_cost(4*MEMORY_REF_COST); 6647 6648 size(4); 6649 format %{ "MEMBAR-volatile" %} 6650 ins_encode( enc_membar_volatile ); 6651 ins_pipe(long_memory_op); 6652 %} 6653 6654 instruct unnecessary_membar_volatile() %{ 6655 match(MemBarVolatile); 6656 predicate(Matcher::post_store_load_barrier(n)); 6657 ins_cost(0); 6658 6659 size(0); 6660 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %} 6661 ins_encode( ); 6662 ins_pipe(empty); 6663 %} 6664 6665 instruct membar_storestore() %{ 6666 match(MemBarStoreStore); 6667 ins_cost(0); 6668 6669 size(0); 6670 format %{ "!MEMBAR-storestore (empty encoding)" %} 6671 ins_encode( ); 6672 ins_pipe(empty); 6673 %} 6674 6675 //----------Register Move Instructions----------------------------------------- 6676 instruct roundDouble_nop(regD dst) %{ 6677 match(Set dst (RoundDouble dst)); 6678 ins_cost(0); 6679 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6680 ins_encode( ); 6681 ins_pipe(empty); 6682 %} 6683 6684 6685 instruct roundFloat_nop(regF dst) %{ 6686 match(Set dst (RoundFloat dst)); 6687 ins_cost(0); 6688 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6689 ins_encode( ); 6690 ins_pipe(empty); 6691 %} 6692 6693 6694 // Cast Index to Pointer for unsafe natives 6695 instruct castX2P(iRegX src, iRegP dst) %{ 6696 match(Set dst (CastX2P src)); 6697 6698 format %{ "MOV $src,$dst\t! IntX->Ptr" %} 6699 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6700 ins_pipe(ialu_reg); 6701 %} 6702 6703 // Cast Pointer to Index for unsafe natives 6704 instruct castP2X(iRegP src, iRegX dst) %{ 6705 match(Set dst (CastP2X src)); 6706 6707 format %{ "MOV $src,$dst\t! Ptr->IntX" %} 6708 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6709 ins_pipe(ialu_reg); 6710 %} 6711 6712 instruct stfSSD(stackSlotD stkSlot, regD src) %{ 6713 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6714 match(Set stkSlot src); // chain rule 6715 ins_cost(MEMORY_REF_COST); 6716 format %{ "STDF $src,$stkSlot\t!stk" %} 6717 opcode(Assembler::stdf_op3); 6718 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6719 ins_pipe(fstoreD_stk_reg); 6720 %} 6721 6722 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{ 6723 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6724 match(Set dst stkSlot); // chain rule 6725 ins_cost(MEMORY_REF_COST); 6726 format %{ "LDDF $stkSlot,$dst\t!stk" %} 6727 opcode(Assembler::lddf_op3); 6728 ins_encode(simple_form3_mem_reg(stkSlot, dst)); 6729 ins_pipe(floadD_stk); 6730 %} 6731 6732 instruct stfSSF(stackSlotF stkSlot, regF src) %{ 6733 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6734 match(Set stkSlot src); // chain rule 6735 ins_cost(MEMORY_REF_COST); 6736 format %{ "STF $src,$stkSlot\t!stk" %} 6737 opcode(Assembler::stf_op3); 6738 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6739 ins_pipe(fstoreF_stk_reg); 6740 %} 6741 6742 //----------Conditional Move--------------------------------------------------- 6743 // Conditional move 6744 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{ 6745 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6746 ins_cost(150); 6747 format %{ "MOV$cmp $pcc,$src,$dst" %} 6748 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6749 ins_pipe(ialu_reg); 6750 %} 6751 6752 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{ 6753 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6754 ins_cost(140); 6755 format %{ "MOV$cmp $pcc,$src,$dst" %} 6756 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6757 ins_pipe(ialu_imm); 6758 %} 6759 6760 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{ 6761 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6762 ins_cost(150); 6763 size(4); 6764 format %{ "MOV$cmp $icc,$src,$dst" %} 6765 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6766 ins_pipe(ialu_reg); 6767 %} 6768 6769 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{ 6770 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6771 ins_cost(140); 6772 size(4); 6773 format %{ "MOV$cmp $icc,$src,$dst" %} 6774 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6775 ins_pipe(ialu_imm); 6776 %} 6777 6778 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ 6779 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6780 ins_cost(150); 6781 size(4); 6782 format %{ "MOV$cmp $icc,$src,$dst" %} 6783 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6784 ins_pipe(ialu_reg); 6785 %} 6786 6787 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ 6788 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6789 ins_cost(140); 6790 size(4); 6791 format %{ "MOV$cmp $icc,$src,$dst" %} 6792 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6793 ins_pipe(ialu_imm); 6794 %} 6795 6796 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{ 6797 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6798 ins_cost(150); 6799 size(4); 6800 format %{ "MOV$cmp $fcc,$src,$dst" %} 6801 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6802 ins_pipe(ialu_reg); 6803 %} 6804 6805 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{ 6806 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6807 ins_cost(140); 6808 size(4); 6809 format %{ "MOV$cmp $fcc,$src,$dst" %} 6810 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6811 ins_pipe(ialu_imm); 6812 %} 6813 6814 // Conditional move for RegN. Only cmov(reg,reg). 6815 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ 6816 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); 6817 ins_cost(150); 6818 format %{ "MOV$cmp $pcc,$src,$dst" %} 6819 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6820 ins_pipe(ialu_reg); 6821 %} 6822 6823 // This instruction also works with CmpN so we don't need cmovNN_reg. 6824 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ 6825 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6826 ins_cost(150); 6827 size(4); 6828 format %{ "MOV$cmp $icc,$src,$dst" %} 6829 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6830 ins_pipe(ialu_reg); 6831 %} 6832 6833 // This instruction also works with CmpN so we don't need cmovNN_reg. 6834 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{ 6835 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6836 ins_cost(150); 6837 size(4); 6838 format %{ "MOV$cmp $icc,$src,$dst" %} 6839 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6840 ins_pipe(ialu_reg); 6841 %} 6842 6843 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ 6844 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); 6845 ins_cost(150); 6846 size(4); 6847 format %{ "MOV$cmp $fcc,$src,$dst" %} 6848 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6849 ins_pipe(ialu_reg); 6850 %} 6851 6852 // Conditional move 6853 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ 6854 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6855 ins_cost(150); 6856 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6857 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6858 ins_pipe(ialu_reg); 6859 %} 6860 6861 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{ 6862 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6863 ins_cost(140); 6864 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6865 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6866 ins_pipe(ialu_imm); 6867 %} 6868 6869 // This instruction also works with CmpN so we don't need cmovPN_reg. 6870 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ 6871 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6872 ins_cost(150); 6873 6874 size(4); 6875 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6876 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6877 ins_pipe(ialu_reg); 6878 %} 6879 6880 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{ 6881 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6882 ins_cost(150); 6883 6884 size(4); 6885 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6886 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6887 ins_pipe(ialu_reg); 6888 %} 6889 6890 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{ 6891 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6892 ins_cost(140); 6893 6894 size(4); 6895 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6896 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6897 ins_pipe(ialu_imm); 6898 %} 6899 6900 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{ 6901 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6902 ins_cost(140); 6903 6904 size(4); 6905 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6906 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6907 ins_pipe(ialu_imm); 6908 %} 6909 6910 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{ 6911 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6912 ins_cost(150); 6913 size(4); 6914 format %{ "MOV$cmp $fcc,$src,$dst" %} 6915 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6916 ins_pipe(ialu_imm); 6917 %} 6918 6919 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{ 6920 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6921 ins_cost(140); 6922 size(4); 6923 format %{ "MOV$cmp $fcc,$src,$dst" %} 6924 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6925 ins_pipe(ialu_imm); 6926 %} 6927 6928 // Conditional move 6929 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{ 6930 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src))); 6931 ins_cost(150); 6932 opcode(0x101); 6933 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 6934 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6935 ins_pipe(int_conditional_float_move); 6936 %} 6937 6938 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ 6939 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6940 ins_cost(150); 6941 6942 size(4); 6943 format %{ "FMOVS$cmp $icc,$src,$dst" %} 6944 opcode(0x101); 6945 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6946 ins_pipe(int_conditional_float_move); 6947 %} 6948 6949 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{ 6950 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6951 ins_cost(150); 6952 6953 size(4); 6954 format %{ "FMOVS$cmp $icc,$src,$dst" %} 6955 opcode(0x101); 6956 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6957 ins_pipe(int_conditional_float_move); 6958 %} 6959 6960 // Conditional move, 6961 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ 6962 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); 6963 ins_cost(150); 6964 size(4); 6965 format %{ "FMOVF$cmp $fcc,$src,$dst" %} 6966 opcode(0x1); 6967 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 6968 ins_pipe(int_conditional_double_move); 6969 %} 6970 6971 // Conditional move 6972 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{ 6973 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src))); 6974 ins_cost(150); 6975 size(4); 6976 opcode(0x102); 6977 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 6978 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6979 ins_pipe(int_conditional_double_move); 6980 %} 6981 6982 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{ 6983 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 6984 ins_cost(150); 6985 6986 size(4); 6987 format %{ "FMOVD$cmp $icc,$src,$dst" %} 6988 opcode(0x102); 6989 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6990 ins_pipe(int_conditional_double_move); 6991 %} 6992 6993 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{ 6994 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 6995 ins_cost(150); 6996 6997 size(4); 6998 format %{ "FMOVD$cmp $icc,$src,$dst" %} 6999 opcode(0x102); 7000 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7001 ins_pipe(int_conditional_double_move); 7002 %} 7003 7004 // Conditional move, 7005 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ 7006 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); 7007 ins_cost(150); 7008 size(4); 7009 format %{ "FMOVD$cmp $fcc,$src,$dst" %} 7010 opcode(0x2); 7011 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7012 ins_pipe(int_conditional_double_move); 7013 %} 7014 7015 // Conditional move 7016 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{ 7017 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7018 ins_cost(150); 7019 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7020 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7021 ins_pipe(ialu_reg); 7022 %} 7023 7024 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{ 7025 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7026 ins_cost(140); 7027 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7028 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 7029 ins_pipe(ialu_imm); 7030 %} 7031 7032 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{ 7033 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7034 ins_cost(150); 7035 7036 size(4); 7037 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7038 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7039 ins_pipe(ialu_reg); 7040 %} 7041 7042 7043 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{ 7044 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7045 ins_cost(150); 7046 7047 size(4); 7048 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7049 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7050 ins_pipe(ialu_reg); 7051 %} 7052 7053 7054 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{ 7055 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src))); 7056 ins_cost(150); 7057 7058 size(4); 7059 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %} 7060 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7061 ins_pipe(ialu_reg); 7062 %} 7063 7064 7065 7066 //----------OS and Locking Instructions---------------------------------------- 7067 7068 // This name is KNOWN by the ADLC and cannot be changed. 7069 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 7070 // for this guy. 7071 instruct tlsLoadP(g2RegP dst) %{ 7072 match(Set dst (ThreadLocal)); 7073 7074 size(0); 7075 ins_cost(0); 7076 format %{ "# TLS is in G2" %} 7077 ins_encode( /*empty encoding*/ ); 7078 ins_pipe(ialu_none); 7079 %} 7080 7081 instruct checkCastPP( iRegP dst ) %{ 7082 match(Set dst (CheckCastPP dst)); 7083 7084 size(0); 7085 format %{ "# checkcastPP of $dst" %} 7086 ins_encode( /*empty encoding*/ ); 7087 ins_pipe(empty); 7088 %} 7089 7090 7091 instruct castPP( iRegP dst ) %{ 7092 match(Set dst (CastPP dst)); 7093 format %{ "# castPP of $dst" %} 7094 ins_encode( /*empty encoding*/ ); 7095 ins_pipe(empty); 7096 %} 7097 7098 instruct castII( iRegI dst ) %{ 7099 match(Set dst (CastII dst)); 7100 format %{ "# castII of $dst" %} 7101 ins_encode( /*empty encoding*/ ); 7102 ins_cost(0); 7103 ins_pipe(empty); 7104 %} 7105 7106 //----------Arithmetic Instructions-------------------------------------------- 7107 // Addition Instructions 7108 // Register Addition 7109 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7110 match(Set dst (AddI src1 src2)); 7111 7112 size(4); 7113 format %{ "ADD $src1,$src2,$dst" %} 7114 ins_encode %{ 7115 __ add($src1$$Register, $src2$$Register, $dst$$Register); 7116 %} 7117 ins_pipe(ialu_reg_reg); 7118 %} 7119 7120 // Immediate Addition 7121 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7122 match(Set dst (AddI src1 src2)); 7123 7124 size(4); 7125 format %{ "ADD $src1,$src2,$dst" %} 7126 opcode(Assembler::add_op3, Assembler::arith_op); 7127 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7128 ins_pipe(ialu_reg_imm); 7129 %} 7130 7131 // Pointer Register Addition 7132 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{ 7133 match(Set dst (AddP src1 src2)); 7134 7135 size(4); 7136 format %{ "ADD $src1,$src2,$dst" %} 7137 opcode(Assembler::add_op3, Assembler::arith_op); 7138 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7139 ins_pipe(ialu_reg_reg); 7140 %} 7141 7142 // Pointer Immediate Addition 7143 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{ 7144 match(Set dst (AddP src1 src2)); 7145 7146 size(4); 7147 format %{ "ADD $src1,$src2,$dst" %} 7148 opcode(Assembler::add_op3, Assembler::arith_op); 7149 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7150 ins_pipe(ialu_reg_imm); 7151 %} 7152 7153 // Long Addition 7154 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7155 match(Set dst (AddL src1 src2)); 7156 7157 size(4); 7158 format %{ "ADD $src1,$src2,$dst\t! long" %} 7159 opcode(Assembler::add_op3, Assembler::arith_op); 7160 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7161 ins_pipe(ialu_reg_reg); 7162 %} 7163 7164 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7165 match(Set dst (AddL src1 con)); 7166 7167 size(4); 7168 format %{ "ADD $src1,$con,$dst" %} 7169 opcode(Assembler::add_op3, Assembler::arith_op); 7170 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7171 ins_pipe(ialu_reg_imm); 7172 %} 7173 7174 //----------Conditional_store-------------------------------------------------- 7175 // Conditional-store of the updated heap-top. 7176 // Used during allocation of the shared heap. 7177 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 7178 7179 // LoadP-locked. Same as a regular pointer load when used with a compare-swap 7180 instruct loadPLocked(iRegP dst, memory mem) %{ 7181 match(Set dst (LoadPLocked mem)); 7182 ins_cost(MEMORY_REF_COST); 7183 7184 #ifndef _LP64 7185 size(4); 7186 format %{ "LDUW $mem,$dst\t! ptr" %} 7187 opcode(Assembler::lduw_op3, 0, REGP_OP); 7188 #else 7189 format %{ "LDX $mem,$dst\t! ptr" %} 7190 opcode(Assembler::ldx_op3, 0, REGP_OP); 7191 #endif 7192 ins_encode( form3_mem_reg( mem, dst ) ); 7193 ins_pipe(iload_mem); 7194 %} 7195 7196 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ 7197 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); 7198 effect( KILL newval ); 7199 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" 7200 "CMP R_G3,$oldval\t\t! See if we made progress" %} 7201 ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); 7202 ins_pipe( long_memory_op ); 7203 %} 7204 7205 // Conditional-store of an int value. 7206 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ 7207 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); 7208 effect( KILL newval ); 7209 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7210 "CMP $oldval,$newval\t\t! See if we made progress" %} 7211 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7212 ins_pipe( long_memory_op ); 7213 %} 7214 7215 // Conditional-store of a long value. 7216 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ 7217 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); 7218 effect( KILL newval ); 7219 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7220 "CMP $oldval,$newval\t\t! See if we made progress" %} 7221 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7222 ins_pipe( long_memory_op ); 7223 %} 7224 7225 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 7226 7227 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7228 predicate(VM_Version::supports_cx8()); 7229 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 7230 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7231 format %{ 7232 "MOV $newval,O7\n\t" 7233 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7234 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7235 "MOV 1,$res\n\t" 7236 "MOVne xcc,R_G0,$res" 7237 %} 7238 ins_encode( enc_casx(mem_ptr, oldval, newval), 7239 enc_lflags_ne_to_boolean(res) ); 7240 ins_pipe( long_memory_op ); 7241 %} 7242 7243 7244 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7245 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 7246 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7247 format %{ 7248 "MOV $newval,O7\n\t" 7249 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7250 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7251 "MOV 1,$res\n\t" 7252 "MOVne icc,R_G0,$res" 7253 %} 7254 ins_encode( enc_casi(mem_ptr, oldval, newval), 7255 enc_iflags_ne_to_boolean(res) ); 7256 ins_pipe( long_memory_op ); 7257 %} 7258 7259 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7260 #ifdef _LP64 7261 predicate(VM_Version::supports_cx8()); 7262 #endif 7263 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 7264 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7265 format %{ 7266 "MOV $newval,O7\n\t" 7267 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7268 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7269 "MOV 1,$res\n\t" 7270 "MOVne xcc,R_G0,$res" 7271 %} 7272 #ifdef _LP64 7273 ins_encode( enc_casx(mem_ptr, oldval, newval), 7274 enc_lflags_ne_to_boolean(res) ); 7275 #else 7276 ins_encode( enc_casi(mem_ptr, oldval, newval), 7277 enc_iflags_ne_to_boolean(res) ); 7278 #endif 7279 ins_pipe( long_memory_op ); 7280 %} 7281 7282 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7283 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 7284 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7285 format %{ 7286 "MOV $newval,O7\n\t" 7287 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7288 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7289 "MOV 1,$res\n\t" 7290 "MOVne icc,R_G0,$res" 7291 %} 7292 ins_encode( enc_casi(mem_ptr, oldval, newval), 7293 enc_iflags_ne_to_boolean(res) ); 7294 ins_pipe( long_memory_op ); 7295 %} 7296 7297 instruct xchgI( memory mem, iRegI newval) %{ 7298 match(Set newval (GetAndSetI mem newval)); 7299 format %{ "SWAP [$mem],$newval" %} 7300 size(4); 7301 ins_encode %{ 7302 __ swap($mem$$Address, $newval$$Register); 7303 %} 7304 ins_pipe( long_memory_op ); 7305 %} 7306 7307 #ifndef _LP64 7308 instruct xchgP( memory mem, iRegP newval) %{ 7309 match(Set newval (GetAndSetP mem newval)); 7310 format %{ "SWAP [$mem],$newval" %} 7311 size(4); 7312 ins_encode %{ 7313 __ swap($mem$$Address, $newval$$Register); 7314 %} 7315 ins_pipe( long_memory_op ); 7316 %} 7317 #endif 7318 7319 instruct xchgN( memory mem, iRegN newval) %{ 7320 match(Set newval (GetAndSetN mem newval)); 7321 format %{ "SWAP [$mem],$newval" %} 7322 size(4); 7323 ins_encode %{ 7324 __ swap($mem$$Address, $newval$$Register); 7325 %} 7326 ins_pipe( long_memory_op ); 7327 %} 7328 7329 //--------------------- 7330 // Subtraction Instructions 7331 // Register Subtraction 7332 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7333 match(Set dst (SubI src1 src2)); 7334 7335 size(4); 7336 format %{ "SUB $src1,$src2,$dst" %} 7337 opcode(Assembler::sub_op3, Assembler::arith_op); 7338 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7339 ins_pipe(ialu_reg_reg); 7340 %} 7341 7342 // Immediate Subtraction 7343 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7344 match(Set dst (SubI src1 src2)); 7345 7346 size(4); 7347 format %{ "SUB $src1,$src2,$dst" %} 7348 opcode(Assembler::sub_op3, Assembler::arith_op); 7349 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7350 ins_pipe(ialu_reg_imm); 7351 %} 7352 7353 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 7354 match(Set dst (SubI zero src2)); 7355 7356 size(4); 7357 format %{ "NEG $src2,$dst" %} 7358 opcode(Assembler::sub_op3, Assembler::arith_op); 7359 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7360 ins_pipe(ialu_zero_reg); 7361 %} 7362 7363 // Long subtraction 7364 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7365 match(Set dst (SubL src1 src2)); 7366 7367 size(4); 7368 format %{ "SUB $src1,$src2,$dst\t! long" %} 7369 opcode(Assembler::sub_op3, Assembler::arith_op); 7370 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7371 ins_pipe(ialu_reg_reg); 7372 %} 7373 7374 // Immediate Subtraction 7375 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7376 match(Set dst (SubL src1 con)); 7377 7378 size(4); 7379 format %{ "SUB $src1,$con,$dst\t! long" %} 7380 opcode(Assembler::sub_op3, Assembler::arith_op); 7381 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7382 ins_pipe(ialu_reg_imm); 7383 %} 7384 7385 // Long negation 7386 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{ 7387 match(Set dst (SubL zero src2)); 7388 7389 size(4); 7390 format %{ "NEG $src2,$dst\t! long" %} 7391 opcode(Assembler::sub_op3, Assembler::arith_op); 7392 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7393 ins_pipe(ialu_zero_reg); 7394 %} 7395 7396 // Multiplication Instructions 7397 // Integer Multiplication 7398 // Register Multiplication 7399 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7400 match(Set dst (MulI src1 src2)); 7401 7402 size(4); 7403 format %{ "MULX $src1,$src2,$dst" %} 7404 opcode(Assembler::mulx_op3, Assembler::arith_op); 7405 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7406 ins_pipe(imul_reg_reg); 7407 %} 7408 7409 // Immediate Multiplication 7410 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7411 match(Set dst (MulI src1 src2)); 7412 7413 size(4); 7414 format %{ "MULX $src1,$src2,$dst" %} 7415 opcode(Assembler::mulx_op3, Assembler::arith_op); 7416 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7417 ins_pipe(imul_reg_imm); 7418 %} 7419 7420 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7421 match(Set dst (MulL src1 src2)); 7422 ins_cost(DEFAULT_COST * 5); 7423 size(4); 7424 format %{ "MULX $src1,$src2,$dst\t! long" %} 7425 opcode(Assembler::mulx_op3, Assembler::arith_op); 7426 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7427 ins_pipe(mulL_reg_reg); 7428 %} 7429 7430 // Immediate Multiplication 7431 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7432 match(Set dst (MulL src1 src2)); 7433 ins_cost(DEFAULT_COST * 5); 7434 size(4); 7435 format %{ "MULX $src1,$src2,$dst" %} 7436 opcode(Assembler::mulx_op3, Assembler::arith_op); 7437 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7438 ins_pipe(mulL_reg_imm); 7439 %} 7440 7441 // Integer Division 7442 // Register Division 7443 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{ 7444 match(Set dst (DivI src1 src2)); 7445 ins_cost((2+71)*DEFAULT_COST); 7446 7447 format %{ "SRA $src2,0,$src2\n\t" 7448 "SRA $src1,0,$src1\n\t" 7449 "SDIVX $src1,$src2,$dst" %} 7450 ins_encode( idiv_reg( src1, src2, dst ) ); 7451 ins_pipe(sdiv_reg_reg); 7452 %} 7453 7454 // Immediate Division 7455 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{ 7456 match(Set dst (DivI src1 src2)); 7457 ins_cost((2+71)*DEFAULT_COST); 7458 7459 format %{ "SRA $src1,0,$src1\n\t" 7460 "SDIVX $src1,$src2,$dst" %} 7461 ins_encode( idiv_imm( src1, src2, dst ) ); 7462 ins_pipe(sdiv_reg_imm); 7463 %} 7464 7465 //----------Div-By-10-Expansion------------------------------------------------ 7466 // Extract hi bits of a 32x32->64 bit multiply. 7467 // Expand rule only, not matched 7468 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{ 7469 effect( DEF dst, USE src1, USE src2 ); 7470 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t" 7471 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %} 7472 ins_encode( enc_mul_hi(dst,src1,src2)); 7473 ins_pipe(sdiv_reg_reg); 7474 %} 7475 7476 // Magic constant, reciprocal of 10 7477 instruct loadConI_x66666667(iRegIsafe dst) %{ 7478 effect( DEF dst ); 7479 7480 size(8); 7481 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %} 7482 ins_encode( Set32(0x66666667, dst) ); 7483 ins_pipe(ialu_hi_lo_reg); 7484 %} 7485 7486 // Register Shift Right Arithmetic Long by 32-63 7487 instruct sra_31( iRegI dst, iRegI src ) %{ 7488 effect( DEF dst, USE src ); 7489 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 7490 ins_encode( form3_rs1_rd_copysign_hi(src,dst) ); 7491 ins_pipe(ialu_reg_reg); 7492 %} 7493 7494 // Arithmetic Shift Right by 8-bit immediate 7495 instruct sra_reg_2( iRegI dst, iRegI src ) %{ 7496 effect( DEF dst, USE src ); 7497 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} 7498 opcode(Assembler::sra_op3, Assembler::arith_op); 7499 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); 7500 ins_pipe(ialu_reg_imm); 7501 %} 7502 7503 // Integer DIV with 10 7504 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{ 7505 match(Set dst (DivI src div)); 7506 ins_cost((6+6)*DEFAULT_COST); 7507 expand %{ 7508 iRegIsafe tmp1; // Killed temps; 7509 iRegIsafe tmp2; // Killed temps; 7510 iRegI tmp3; // Killed temps; 7511 iRegI tmp4; // Killed temps; 7512 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1 7513 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2 7514 sra_31( tmp3, src ); // SRA src,31 -> tmp3 7515 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4 7516 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst 7517 %} 7518 %} 7519 7520 // Register Long Division 7521 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7522 match(Set dst (DivL src1 src2)); 7523 ins_cost(DEFAULT_COST*71); 7524 size(4); 7525 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7526 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7527 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7528 ins_pipe(divL_reg_reg); 7529 %} 7530 7531 // Register Long Division 7532 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7533 match(Set dst (DivL src1 src2)); 7534 ins_cost(DEFAULT_COST*71); 7535 size(4); 7536 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7537 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7538 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7539 ins_pipe(divL_reg_imm); 7540 %} 7541 7542 // Integer Remainder 7543 // Register Remainder 7544 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{ 7545 match(Set dst (ModI src1 src2)); 7546 effect( KILL ccr, KILL temp); 7547 7548 format %{ "SREM $src1,$src2,$dst" %} 7549 ins_encode( irem_reg(src1, src2, dst, temp) ); 7550 ins_pipe(sdiv_reg_reg); 7551 %} 7552 7553 // Immediate Remainder 7554 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ 7555 match(Set dst (ModI src1 src2)); 7556 effect( KILL ccr, KILL temp); 7557 7558 format %{ "SREM $src1,$src2,$dst" %} 7559 ins_encode( irem_imm(src1, src2, dst, temp) ); 7560 ins_pipe(sdiv_reg_imm); 7561 %} 7562 7563 // Register Long Remainder 7564 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7565 effect(DEF dst, USE src1, USE src2); 7566 size(4); 7567 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7568 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7569 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7570 ins_pipe(divL_reg_reg); 7571 %} 7572 7573 // Register Long Division 7574 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7575 effect(DEF dst, USE src1, USE src2); 7576 size(4); 7577 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7578 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7579 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7580 ins_pipe(divL_reg_imm); 7581 %} 7582 7583 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7584 effect(DEF dst, USE src1, USE src2); 7585 size(4); 7586 format %{ "MULX $src1,$src2,$dst\t! long" %} 7587 opcode(Assembler::mulx_op3, Assembler::arith_op); 7588 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7589 ins_pipe(mulL_reg_reg); 7590 %} 7591 7592 // Immediate Multiplication 7593 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7594 effect(DEF dst, USE src1, USE src2); 7595 size(4); 7596 format %{ "MULX $src1,$src2,$dst" %} 7597 opcode(Assembler::mulx_op3, Assembler::arith_op); 7598 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7599 ins_pipe(mulL_reg_imm); 7600 %} 7601 7602 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7603 effect(DEF dst, USE src1, USE src2); 7604 size(4); 7605 format %{ "SUB $src1,$src2,$dst\t! long" %} 7606 opcode(Assembler::sub_op3, Assembler::arith_op); 7607 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7608 ins_pipe(ialu_reg_reg); 7609 %} 7610 7611 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 7612 effect(DEF dst, USE src1, USE src2); 7613 size(4); 7614 format %{ "SUB $src1,$src2,$dst\t! long" %} 7615 opcode(Assembler::sub_op3, Assembler::arith_op); 7616 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7617 ins_pipe(ialu_reg_reg); 7618 %} 7619 7620 // Register Long Remainder 7621 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7622 match(Set dst (ModL src1 src2)); 7623 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7624 expand %{ 7625 iRegL tmp1; 7626 iRegL tmp2; 7627 divL_reg_reg_1(tmp1, src1, src2); 7628 mulL_reg_reg_1(tmp2, tmp1, src2); 7629 subL_reg_reg_1(dst, src1, tmp2); 7630 %} 7631 %} 7632 7633 // Register Long Remainder 7634 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7635 match(Set dst (ModL src1 src2)); 7636 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7637 expand %{ 7638 iRegL tmp1; 7639 iRegL tmp2; 7640 divL_reg_imm13_1(tmp1, src1, src2); 7641 mulL_reg_imm13_1(tmp2, tmp1, src2); 7642 subL_reg_reg_2 (dst, src1, tmp2); 7643 %} 7644 %} 7645 7646 // Integer Shift Instructions 7647 // Register Shift Left 7648 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7649 match(Set dst (LShiftI src1 src2)); 7650 7651 size(4); 7652 format %{ "SLL $src1,$src2,$dst" %} 7653 opcode(Assembler::sll_op3, Assembler::arith_op); 7654 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7655 ins_pipe(ialu_reg_reg); 7656 %} 7657 7658 // Register Shift Left Immediate 7659 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7660 match(Set dst (LShiftI src1 src2)); 7661 7662 size(4); 7663 format %{ "SLL $src1,$src2,$dst" %} 7664 opcode(Assembler::sll_op3, Assembler::arith_op); 7665 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7666 ins_pipe(ialu_reg_imm); 7667 %} 7668 7669 // Register Shift Left 7670 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7671 match(Set dst (LShiftL src1 src2)); 7672 7673 size(4); 7674 format %{ "SLLX $src1,$src2,$dst" %} 7675 opcode(Assembler::sllx_op3, Assembler::arith_op); 7676 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7677 ins_pipe(ialu_reg_reg); 7678 %} 7679 7680 // Register Shift Left Immediate 7681 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7682 match(Set dst (LShiftL src1 src2)); 7683 7684 size(4); 7685 format %{ "SLLX $src1,$src2,$dst" %} 7686 opcode(Assembler::sllx_op3, Assembler::arith_op); 7687 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7688 ins_pipe(ialu_reg_imm); 7689 %} 7690 7691 // Register Arithmetic Shift Right 7692 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7693 match(Set dst (RShiftI src1 src2)); 7694 size(4); 7695 format %{ "SRA $src1,$src2,$dst" %} 7696 opcode(Assembler::sra_op3, Assembler::arith_op); 7697 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7698 ins_pipe(ialu_reg_reg); 7699 %} 7700 7701 // Register Arithmetic Shift Right Immediate 7702 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7703 match(Set dst (RShiftI src1 src2)); 7704 7705 size(4); 7706 format %{ "SRA $src1,$src2,$dst" %} 7707 opcode(Assembler::sra_op3, Assembler::arith_op); 7708 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7709 ins_pipe(ialu_reg_imm); 7710 %} 7711 7712 // Register Shift Right Arithmatic Long 7713 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7714 match(Set dst (RShiftL src1 src2)); 7715 7716 size(4); 7717 format %{ "SRAX $src1,$src2,$dst" %} 7718 opcode(Assembler::srax_op3, Assembler::arith_op); 7719 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7720 ins_pipe(ialu_reg_reg); 7721 %} 7722 7723 // Register Shift Left Immediate 7724 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7725 match(Set dst (RShiftL src1 src2)); 7726 7727 size(4); 7728 format %{ "SRAX $src1,$src2,$dst" %} 7729 opcode(Assembler::srax_op3, Assembler::arith_op); 7730 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7731 ins_pipe(ialu_reg_imm); 7732 %} 7733 7734 // Register Shift Right 7735 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7736 match(Set dst (URShiftI src1 src2)); 7737 7738 size(4); 7739 format %{ "SRL $src1,$src2,$dst" %} 7740 opcode(Assembler::srl_op3, Assembler::arith_op); 7741 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7742 ins_pipe(ialu_reg_reg); 7743 %} 7744 7745 // Register Shift Right Immediate 7746 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7747 match(Set dst (URShiftI src1 src2)); 7748 7749 size(4); 7750 format %{ "SRL $src1,$src2,$dst" %} 7751 opcode(Assembler::srl_op3, Assembler::arith_op); 7752 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7753 ins_pipe(ialu_reg_imm); 7754 %} 7755 7756 // Register Shift Right 7757 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7758 match(Set dst (URShiftL src1 src2)); 7759 7760 size(4); 7761 format %{ "SRLX $src1,$src2,$dst" %} 7762 opcode(Assembler::srlx_op3, Assembler::arith_op); 7763 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7764 ins_pipe(ialu_reg_reg); 7765 %} 7766 7767 // Register Shift Right Immediate 7768 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7769 match(Set dst (URShiftL src1 src2)); 7770 7771 size(4); 7772 format %{ "SRLX $src1,$src2,$dst" %} 7773 opcode(Assembler::srlx_op3, Assembler::arith_op); 7774 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7775 ins_pipe(ialu_reg_imm); 7776 %} 7777 7778 // Register Shift Right Immediate with a CastP2X 7779 #ifdef _LP64 7780 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ 7781 match(Set dst (URShiftL (CastP2X src1) src2)); 7782 size(4); 7783 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} 7784 opcode(Assembler::srlx_op3, Assembler::arith_op); 7785 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7786 ins_pipe(ialu_reg_imm); 7787 %} 7788 #else 7789 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{ 7790 match(Set dst (URShiftI (CastP2X src1) src2)); 7791 size(4); 7792 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %} 7793 opcode(Assembler::srl_op3, Assembler::arith_op); 7794 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7795 ins_pipe(ialu_reg_imm); 7796 %} 7797 #endif 7798 7799 7800 //----------Floating Point Arithmetic Instructions----------------------------- 7801 7802 // Add float single precision 7803 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 7804 match(Set dst (AddF src1 src2)); 7805 7806 size(4); 7807 format %{ "FADDS $src1,$src2,$dst" %} 7808 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf); 7809 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7810 ins_pipe(faddF_reg_reg); 7811 %} 7812 7813 // Add float double precision 7814 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 7815 match(Set dst (AddD src1 src2)); 7816 7817 size(4); 7818 format %{ "FADDD $src1,$src2,$dst" %} 7819 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 7820 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7821 ins_pipe(faddD_reg_reg); 7822 %} 7823 7824 // Sub float single precision 7825 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 7826 match(Set dst (SubF src1 src2)); 7827 7828 size(4); 7829 format %{ "FSUBS $src1,$src2,$dst" %} 7830 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf); 7831 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7832 ins_pipe(faddF_reg_reg); 7833 %} 7834 7835 // Sub float double precision 7836 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 7837 match(Set dst (SubD src1 src2)); 7838 7839 size(4); 7840 format %{ "FSUBD $src1,$src2,$dst" %} 7841 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 7842 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7843 ins_pipe(faddD_reg_reg); 7844 %} 7845 7846 // Mul float single precision 7847 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 7848 match(Set dst (MulF src1 src2)); 7849 7850 size(4); 7851 format %{ "FMULS $src1,$src2,$dst" %} 7852 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf); 7853 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7854 ins_pipe(fmulF_reg_reg); 7855 %} 7856 7857 // Mul float double precision 7858 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 7859 match(Set dst (MulD src1 src2)); 7860 7861 size(4); 7862 format %{ "FMULD $src1,$src2,$dst" %} 7863 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 7864 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7865 ins_pipe(fmulD_reg_reg); 7866 %} 7867 7868 // Div float single precision 7869 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 7870 match(Set dst (DivF src1 src2)); 7871 7872 size(4); 7873 format %{ "FDIVS $src1,$src2,$dst" %} 7874 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf); 7875 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7876 ins_pipe(fdivF_reg_reg); 7877 %} 7878 7879 // Div float double precision 7880 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 7881 match(Set dst (DivD src1 src2)); 7882 7883 size(4); 7884 format %{ "FDIVD $src1,$src2,$dst" %} 7885 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf); 7886 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7887 ins_pipe(fdivD_reg_reg); 7888 %} 7889 7890 // Absolute float double precision 7891 instruct absD_reg(regD dst, regD src) %{ 7892 match(Set dst (AbsD src)); 7893 7894 format %{ "FABSd $src,$dst" %} 7895 ins_encode(fabsd(dst, src)); 7896 ins_pipe(faddD_reg); 7897 %} 7898 7899 // Absolute float single precision 7900 instruct absF_reg(regF dst, regF src) %{ 7901 match(Set dst (AbsF src)); 7902 7903 format %{ "FABSs $src,$dst" %} 7904 ins_encode(fabss(dst, src)); 7905 ins_pipe(faddF_reg); 7906 %} 7907 7908 instruct negF_reg(regF dst, regF src) %{ 7909 match(Set dst (NegF src)); 7910 7911 size(4); 7912 format %{ "FNEGs $src,$dst" %} 7913 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); 7914 ins_encode(form3_opf_rs2F_rdF(src, dst)); 7915 ins_pipe(faddF_reg); 7916 %} 7917 7918 instruct negD_reg(regD dst, regD src) %{ 7919 match(Set dst (NegD src)); 7920 7921 format %{ "FNEGd $src,$dst" %} 7922 ins_encode(fnegd(dst, src)); 7923 ins_pipe(faddD_reg); 7924 %} 7925 7926 // Sqrt float double precision 7927 instruct sqrtF_reg_reg(regF dst, regF src) %{ 7928 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 7929 7930 size(4); 7931 format %{ "FSQRTS $src,$dst" %} 7932 ins_encode(fsqrts(dst, src)); 7933 ins_pipe(fdivF_reg_reg); 7934 %} 7935 7936 // Sqrt float double precision 7937 instruct sqrtD_reg_reg(regD dst, regD src) %{ 7938 match(Set dst (SqrtD src)); 7939 7940 size(4); 7941 format %{ "FSQRTD $src,$dst" %} 7942 ins_encode(fsqrtd(dst, src)); 7943 ins_pipe(fdivD_reg_reg); 7944 %} 7945 7946 //----------Logical Instructions----------------------------------------------- 7947 // And Instructions 7948 // Register And 7949 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7950 match(Set dst (AndI src1 src2)); 7951 7952 size(4); 7953 format %{ "AND $src1,$src2,$dst" %} 7954 opcode(Assembler::and_op3, Assembler::arith_op); 7955 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7956 ins_pipe(ialu_reg_reg); 7957 %} 7958 7959 // Immediate And 7960 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7961 match(Set dst (AndI src1 src2)); 7962 7963 size(4); 7964 format %{ "AND $src1,$src2,$dst" %} 7965 opcode(Assembler::and_op3, Assembler::arith_op); 7966 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7967 ins_pipe(ialu_reg_imm); 7968 %} 7969 7970 // Register And Long 7971 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7972 match(Set dst (AndL src1 src2)); 7973 7974 ins_cost(DEFAULT_COST); 7975 size(4); 7976 format %{ "AND $src1,$src2,$dst\t! long" %} 7977 opcode(Assembler::and_op3, Assembler::arith_op); 7978 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7979 ins_pipe(ialu_reg_reg); 7980 %} 7981 7982 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7983 match(Set dst (AndL src1 con)); 7984 7985 ins_cost(DEFAULT_COST); 7986 size(4); 7987 format %{ "AND $src1,$con,$dst\t! long" %} 7988 opcode(Assembler::and_op3, Assembler::arith_op); 7989 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7990 ins_pipe(ialu_reg_imm); 7991 %} 7992 7993 // Or Instructions 7994 // Register Or 7995 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7996 match(Set dst (OrI src1 src2)); 7997 7998 size(4); 7999 format %{ "OR $src1,$src2,$dst" %} 8000 opcode(Assembler::or_op3, Assembler::arith_op); 8001 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8002 ins_pipe(ialu_reg_reg); 8003 %} 8004 8005 // Immediate Or 8006 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8007 match(Set dst (OrI src1 src2)); 8008 8009 size(4); 8010 format %{ "OR $src1,$src2,$dst" %} 8011 opcode(Assembler::or_op3, Assembler::arith_op); 8012 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8013 ins_pipe(ialu_reg_imm); 8014 %} 8015 8016 // Register Or Long 8017 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8018 match(Set dst (OrL src1 src2)); 8019 8020 ins_cost(DEFAULT_COST); 8021 size(4); 8022 format %{ "OR $src1,$src2,$dst\t! long" %} 8023 opcode(Assembler::or_op3, Assembler::arith_op); 8024 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8025 ins_pipe(ialu_reg_reg); 8026 %} 8027 8028 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8029 match(Set dst (OrL src1 con)); 8030 ins_cost(DEFAULT_COST*2); 8031 8032 ins_cost(DEFAULT_COST); 8033 size(4); 8034 format %{ "OR $src1,$con,$dst\t! long" %} 8035 opcode(Assembler::or_op3, Assembler::arith_op); 8036 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8037 ins_pipe(ialu_reg_imm); 8038 %} 8039 8040 #ifndef _LP64 8041 8042 // Use sp_ptr_RegP to match G2 (TLS register) without spilling. 8043 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{ 8044 match(Set dst (OrI src1 (CastP2X src2))); 8045 8046 size(4); 8047 format %{ "OR $src1,$src2,$dst" %} 8048 opcode(Assembler::or_op3, Assembler::arith_op); 8049 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8050 ins_pipe(ialu_reg_reg); 8051 %} 8052 8053 #else 8054 8055 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ 8056 match(Set dst (OrL src1 (CastP2X src2))); 8057 8058 ins_cost(DEFAULT_COST); 8059 size(4); 8060 format %{ "OR $src1,$src2,$dst\t! long" %} 8061 opcode(Assembler::or_op3, Assembler::arith_op); 8062 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8063 ins_pipe(ialu_reg_reg); 8064 %} 8065 8066 #endif 8067 8068 // Xor Instructions 8069 // Register Xor 8070 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8071 match(Set dst (XorI src1 src2)); 8072 8073 size(4); 8074 format %{ "XOR $src1,$src2,$dst" %} 8075 opcode(Assembler::xor_op3, Assembler::arith_op); 8076 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8077 ins_pipe(ialu_reg_reg); 8078 %} 8079 8080 // Immediate Xor 8081 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8082 match(Set dst (XorI src1 src2)); 8083 8084 size(4); 8085 format %{ "XOR $src1,$src2,$dst" %} 8086 opcode(Assembler::xor_op3, Assembler::arith_op); 8087 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8088 ins_pipe(ialu_reg_imm); 8089 %} 8090 8091 // Register Xor Long 8092 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8093 match(Set dst (XorL src1 src2)); 8094 8095 ins_cost(DEFAULT_COST); 8096 size(4); 8097 format %{ "XOR $src1,$src2,$dst\t! long" %} 8098 opcode(Assembler::xor_op3, Assembler::arith_op); 8099 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8100 ins_pipe(ialu_reg_reg); 8101 %} 8102 8103 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8104 match(Set dst (XorL src1 con)); 8105 8106 ins_cost(DEFAULT_COST); 8107 size(4); 8108 format %{ "XOR $src1,$con,$dst\t! long" %} 8109 opcode(Assembler::xor_op3, Assembler::arith_op); 8110 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8111 ins_pipe(ialu_reg_imm); 8112 %} 8113 8114 //----------Convert to Boolean------------------------------------------------- 8115 // Nice hack for 32-bit tests but doesn't work for 8116 // 64-bit pointers. 8117 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{ 8118 match(Set dst (Conv2B src)); 8119 effect( KILL ccr ); 8120 ins_cost(DEFAULT_COST*2); 8121 format %{ "CMP R_G0,$src\n\t" 8122 "ADDX R_G0,0,$dst" %} 8123 ins_encode( enc_to_bool( src, dst ) ); 8124 ins_pipe(ialu_reg_ialu); 8125 %} 8126 8127 #ifndef _LP64 8128 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{ 8129 match(Set dst (Conv2B src)); 8130 effect( KILL ccr ); 8131 ins_cost(DEFAULT_COST*2); 8132 format %{ "CMP R_G0,$src\n\t" 8133 "ADDX R_G0,0,$dst" %} 8134 ins_encode( enc_to_bool( src, dst ) ); 8135 ins_pipe(ialu_reg_ialu); 8136 %} 8137 #else 8138 instruct convP2B( iRegI dst, iRegP src ) %{ 8139 match(Set dst (Conv2B src)); 8140 ins_cost(DEFAULT_COST*2); 8141 format %{ "MOV $src,$dst\n\t" 8142 "MOVRNZ $src,1,$dst" %} 8143 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); 8144 ins_pipe(ialu_clr_and_mover); 8145 %} 8146 #endif 8147 8148 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ 8149 match(Set dst (CmpLTMask src zero)); 8150 effect(KILL ccr); 8151 size(4); 8152 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %} 8153 ins_encode %{ 8154 __ sra($src$$Register, 31, $dst$$Register); 8155 %} 8156 ins_pipe(ialu_reg_imm); 8157 %} 8158 8159 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{ 8160 match(Set dst (CmpLTMask p q)); 8161 effect( KILL ccr ); 8162 ins_cost(DEFAULT_COST*4); 8163 format %{ "CMP $p,$q\n\t" 8164 "MOV #0,$dst\n\t" 8165 "BLT,a .+8\n\t" 8166 "MOV #-1,$dst" %} 8167 ins_encode( enc_ltmask(p,q,dst) ); 8168 ins_pipe(ialu_reg_reg_ialu); 8169 %} 8170 8171 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ 8172 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 8173 effect(KILL ccr, TEMP tmp); 8174 ins_cost(DEFAULT_COST*3); 8175 8176 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" 8177 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" 8178 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} 8179 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); 8180 ins_pipe(cadd_cmpltmask); 8181 %} 8182 8183 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ 8184 match(Set p (AndI (CmpLTMask p q) y)); 8185 effect(KILL ccr); 8186 ins_cost(DEFAULT_COST*3); 8187 8188 format %{ "CMP $p,$q\n\t" 8189 "MOV $y,$p\n\t" 8190 "MOVge G0,$p" %} 8191 ins_encode %{ 8192 __ cmp($p$$Register, $q$$Register); 8193 __ mov($y$$Register, $p$$Register); 8194 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); 8195 %} 8196 ins_pipe(ialu_reg_reg_ialu); 8197 %} 8198 8199 //----------------------------------------------------------------- 8200 // Direct raw moves between float and general registers using VIS3. 8201 8202 // ins_pipe(faddF_reg); 8203 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{ 8204 predicate(UseVIS >= 3); 8205 match(Set dst (MoveF2I src)); 8206 8207 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %} 8208 ins_encode %{ 8209 __ movstouw($src$$FloatRegister, $dst$$Register); 8210 %} 8211 ins_pipe(ialu_reg_reg); 8212 %} 8213 8214 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{ 8215 predicate(UseVIS >= 3); 8216 match(Set dst (MoveI2F src)); 8217 8218 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %} 8219 ins_encode %{ 8220 __ movwtos($src$$Register, $dst$$FloatRegister); 8221 %} 8222 ins_pipe(ialu_reg_reg); 8223 %} 8224 8225 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{ 8226 predicate(UseVIS >= 3); 8227 match(Set dst (MoveD2L src)); 8228 8229 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %} 8230 ins_encode %{ 8231 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register); 8232 %} 8233 ins_pipe(ialu_reg_reg); 8234 %} 8235 8236 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{ 8237 predicate(UseVIS >= 3); 8238 match(Set dst (MoveL2D src)); 8239 8240 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %} 8241 ins_encode %{ 8242 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg)); 8243 %} 8244 ins_pipe(ialu_reg_reg); 8245 %} 8246 8247 8248 // Raw moves between float and general registers using stack. 8249 8250 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ 8251 match(Set dst (MoveF2I src)); 8252 effect(DEF dst, USE src); 8253 ins_cost(MEMORY_REF_COST); 8254 8255 size(4); 8256 format %{ "LDUW $src,$dst\t! MoveF2I" %} 8257 opcode(Assembler::lduw_op3); 8258 ins_encode(simple_form3_mem_reg( src, dst ) ); 8259 ins_pipe(iload_mem); 8260 %} 8261 8262 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 8263 match(Set dst (MoveI2F src)); 8264 effect(DEF dst, USE src); 8265 ins_cost(MEMORY_REF_COST); 8266 8267 size(4); 8268 format %{ "LDF $src,$dst\t! MoveI2F" %} 8269 opcode(Assembler::ldf_op3); 8270 ins_encode(simple_form3_mem_reg(src, dst)); 8271 ins_pipe(floadF_stk); 8272 %} 8273 8274 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{ 8275 match(Set dst (MoveD2L src)); 8276 effect(DEF dst, USE src); 8277 ins_cost(MEMORY_REF_COST); 8278 8279 size(4); 8280 format %{ "LDX $src,$dst\t! MoveD2L" %} 8281 opcode(Assembler::ldx_op3); 8282 ins_encode(simple_form3_mem_reg( src, dst ) ); 8283 ins_pipe(iload_mem); 8284 %} 8285 8286 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 8287 match(Set dst (MoveL2D src)); 8288 effect(DEF dst, USE src); 8289 ins_cost(MEMORY_REF_COST); 8290 8291 size(4); 8292 format %{ "LDDF $src,$dst\t! MoveL2D" %} 8293 opcode(Assembler::lddf_op3); 8294 ins_encode(simple_form3_mem_reg(src, dst)); 8295 ins_pipe(floadD_stk); 8296 %} 8297 8298 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 8299 match(Set dst (MoveF2I src)); 8300 effect(DEF dst, USE src); 8301 ins_cost(MEMORY_REF_COST); 8302 8303 size(4); 8304 format %{ "STF $src,$dst\t! MoveF2I" %} 8305 opcode(Assembler::stf_op3); 8306 ins_encode(simple_form3_mem_reg(dst, src)); 8307 ins_pipe(fstoreF_stk_reg); 8308 %} 8309 8310 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ 8311 match(Set dst (MoveI2F src)); 8312 effect(DEF dst, USE src); 8313 ins_cost(MEMORY_REF_COST); 8314 8315 size(4); 8316 format %{ "STW $src,$dst\t! MoveI2F" %} 8317 opcode(Assembler::stw_op3); 8318 ins_encode(simple_form3_mem_reg( dst, src ) ); 8319 ins_pipe(istore_mem_reg); 8320 %} 8321 8322 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 8323 match(Set dst (MoveD2L src)); 8324 effect(DEF dst, USE src); 8325 ins_cost(MEMORY_REF_COST); 8326 8327 size(4); 8328 format %{ "STDF $src,$dst\t! MoveD2L" %} 8329 opcode(Assembler::stdf_op3); 8330 ins_encode(simple_form3_mem_reg(dst, src)); 8331 ins_pipe(fstoreD_stk_reg); 8332 %} 8333 8334 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ 8335 match(Set dst (MoveL2D src)); 8336 effect(DEF dst, USE src); 8337 ins_cost(MEMORY_REF_COST); 8338 8339 size(4); 8340 format %{ "STX $src,$dst\t! MoveL2D" %} 8341 opcode(Assembler::stx_op3); 8342 ins_encode(simple_form3_mem_reg( dst, src ) ); 8343 ins_pipe(istore_mem_reg); 8344 %} 8345 8346 8347 //----------Arithmetic Conversion Instructions--------------------------------- 8348 // The conversions operations are all Alpha sorted. Please keep it that way! 8349 8350 instruct convD2F_reg(regF dst, regD src) %{ 8351 match(Set dst (ConvD2F src)); 8352 size(4); 8353 format %{ "FDTOS $src,$dst" %} 8354 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); 8355 ins_encode(form3_opf_rs2D_rdF(src, dst)); 8356 ins_pipe(fcvtD2F); 8357 %} 8358 8359 8360 // Convert a double to an int in a float register. 8361 // If the double is a NAN, stuff a zero in instead. 8362 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ 8363 effect(DEF dst, USE src, KILL fcc0); 8364 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8365 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8366 "FDTOI $src,$dst\t! convert in delay slot\n\t" 8367 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8368 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8369 "skip:" %} 8370 ins_encode(form_d2i_helper(src,dst)); 8371 ins_pipe(fcvtD2I); 8372 %} 8373 8374 instruct convD2I_stk(stackSlotI dst, regD src) %{ 8375 match(Set dst (ConvD2I src)); 8376 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8377 expand %{ 8378 regF tmp; 8379 convD2I_helper(tmp, src); 8380 regF_to_stkI(dst, tmp); 8381 %} 8382 %} 8383 8384 instruct convD2I_reg(iRegI dst, regD src) %{ 8385 predicate(UseVIS >= 3); 8386 match(Set dst (ConvD2I src)); 8387 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8388 expand %{ 8389 regF tmp; 8390 convD2I_helper(tmp, src); 8391 MoveF2I_reg_reg(dst, tmp); 8392 %} 8393 %} 8394 8395 8396 // Convert a double to a long in a double register. 8397 // If the double is a NAN, stuff a zero in instead. 8398 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ 8399 effect(DEF dst, USE src, KILL fcc0); 8400 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8401 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8402 "FDTOX $src,$dst\t! convert in delay slot\n\t" 8403 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8404 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8405 "skip:" %} 8406 ins_encode(form_d2l_helper(src,dst)); 8407 ins_pipe(fcvtD2L); 8408 %} 8409 8410 instruct convD2L_stk(stackSlotL dst, regD src) %{ 8411 match(Set dst (ConvD2L src)); 8412 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8413 expand %{ 8414 regD tmp; 8415 convD2L_helper(tmp, src); 8416 regD_to_stkL(dst, tmp); 8417 %} 8418 %} 8419 8420 instruct convD2L_reg(iRegL dst, regD src) %{ 8421 predicate(UseVIS >= 3); 8422 match(Set dst (ConvD2L src)); 8423 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8424 expand %{ 8425 regD tmp; 8426 convD2L_helper(tmp, src); 8427 MoveD2L_reg_reg(dst, tmp); 8428 %} 8429 %} 8430 8431 8432 instruct convF2D_reg(regD dst, regF src) %{ 8433 match(Set dst (ConvF2D src)); 8434 format %{ "FSTOD $src,$dst" %} 8435 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf); 8436 ins_encode(form3_opf_rs2F_rdD(src, dst)); 8437 ins_pipe(fcvtF2D); 8438 %} 8439 8440 8441 // Convert a float to an int in a float register. 8442 // If the float is a NAN, stuff a zero in instead. 8443 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ 8444 effect(DEF dst, USE src, KILL fcc0); 8445 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8446 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8447 "FSTOI $src,$dst\t! convert in delay slot\n\t" 8448 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8449 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8450 "skip:" %} 8451 ins_encode(form_f2i_helper(src,dst)); 8452 ins_pipe(fcvtF2I); 8453 %} 8454 8455 instruct convF2I_stk(stackSlotI dst, regF src) %{ 8456 match(Set dst (ConvF2I src)); 8457 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8458 expand %{ 8459 regF tmp; 8460 convF2I_helper(tmp, src); 8461 regF_to_stkI(dst, tmp); 8462 %} 8463 %} 8464 8465 instruct convF2I_reg(iRegI dst, regF src) %{ 8466 predicate(UseVIS >= 3); 8467 match(Set dst (ConvF2I src)); 8468 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8469 expand %{ 8470 regF tmp; 8471 convF2I_helper(tmp, src); 8472 MoveF2I_reg_reg(dst, tmp); 8473 %} 8474 %} 8475 8476 8477 // Convert a float to a long in a float register. 8478 // If the float is a NAN, stuff a zero in instead. 8479 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ 8480 effect(DEF dst, USE src, KILL fcc0); 8481 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8482 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8483 "FSTOX $src,$dst\t! convert in delay slot\n\t" 8484 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8485 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8486 "skip:" %} 8487 ins_encode(form_f2l_helper(src,dst)); 8488 ins_pipe(fcvtF2L); 8489 %} 8490 8491 instruct convF2L_stk(stackSlotL dst, regF src) %{ 8492 match(Set dst (ConvF2L src)); 8493 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8494 expand %{ 8495 regD tmp; 8496 convF2L_helper(tmp, src); 8497 regD_to_stkL(dst, tmp); 8498 %} 8499 %} 8500 8501 instruct convF2L_reg(iRegL dst, regF src) %{ 8502 predicate(UseVIS >= 3); 8503 match(Set dst (ConvF2L src)); 8504 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8505 expand %{ 8506 regD tmp; 8507 convF2L_helper(tmp, src); 8508 MoveD2L_reg_reg(dst, tmp); 8509 %} 8510 %} 8511 8512 8513 instruct convI2D_helper(regD dst, regF tmp) %{ 8514 effect(USE tmp, DEF dst); 8515 format %{ "FITOD $tmp,$dst" %} 8516 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8517 ins_encode(form3_opf_rs2F_rdD(tmp, dst)); 8518 ins_pipe(fcvtI2D); 8519 %} 8520 8521 instruct convI2D_stk(stackSlotI src, regD dst) %{ 8522 match(Set dst (ConvI2D src)); 8523 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8524 expand %{ 8525 regF tmp; 8526 stkI_to_regF(tmp, src); 8527 convI2D_helper(dst, tmp); 8528 %} 8529 %} 8530 8531 instruct convI2D_reg(regD_low dst, iRegI src) %{ 8532 predicate(UseVIS >= 3); 8533 match(Set dst (ConvI2D src)); 8534 expand %{ 8535 regF tmp; 8536 MoveI2F_reg_reg(tmp, src); 8537 convI2D_helper(dst, tmp); 8538 %} 8539 %} 8540 8541 instruct convI2D_mem(regD_low dst, memory mem) %{ 8542 match(Set dst (ConvI2D (LoadI mem))); 8543 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8544 size(8); 8545 format %{ "LDF $mem,$dst\n\t" 8546 "FITOD $dst,$dst" %} 8547 opcode(Assembler::ldf_op3, Assembler::fitod_opf); 8548 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8549 ins_pipe(floadF_mem); 8550 %} 8551 8552 8553 instruct convI2F_helper(regF dst, regF tmp) %{ 8554 effect(DEF dst, USE tmp); 8555 format %{ "FITOS $tmp,$dst" %} 8556 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf); 8557 ins_encode(form3_opf_rs2F_rdF(tmp, dst)); 8558 ins_pipe(fcvtI2F); 8559 %} 8560 8561 instruct convI2F_stk(regF dst, stackSlotI src) %{ 8562 match(Set dst (ConvI2F src)); 8563 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8564 expand %{ 8565 regF tmp; 8566 stkI_to_regF(tmp,src); 8567 convI2F_helper(dst, tmp); 8568 %} 8569 %} 8570 8571 instruct convI2F_reg(regF dst, iRegI src) %{ 8572 predicate(UseVIS >= 3); 8573 match(Set dst (ConvI2F src)); 8574 ins_cost(DEFAULT_COST); 8575 expand %{ 8576 regF tmp; 8577 MoveI2F_reg_reg(tmp, src); 8578 convI2F_helper(dst, tmp); 8579 %} 8580 %} 8581 8582 instruct convI2F_mem( regF dst, memory mem ) %{ 8583 match(Set dst (ConvI2F (LoadI mem))); 8584 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8585 size(8); 8586 format %{ "LDF $mem,$dst\n\t" 8587 "FITOS $dst,$dst" %} 8588 opcode(Assembler::ldf_op3, Assembler::fitos_opf); 8589 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8590 ins_pipe(floadF_mem); 8591 %} 8592 8593 8594 instruct convI2L_reg(iRegL dst, iRegI src) %{ 8595 match(Set dst (ConvI2L src)); 8596 size(4); 8597 format %{ "SRA $src,0,$dst\t! int->long" %} 8598 opcode(Assembler::sra_op3, Assembler::arith_op); 8599 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8600 ins_pipe(ialu_reg_reg); 8601 %} 8602 8603 // Zero-extend convert int to long 8604 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ 8605 match(Set dst (AndL (ConvI2L src) mask) ); 8606 size(4); 8607 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} 8608 opcode(Assembler::srl_op3, Assembler::arith_op); 8609 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8610 ins_pipe(ialu_reg_reg); 8611 %} 8612 8613 // Zero-extend long 8614 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ 8615 match(Set dst (AndL src mask) ); 8616 size(4); 8617 format %{ "SRL $src,0,$dst\t! zero-extend long" %} 8618 opcode(Assembler::srl_op3, Assembler::arith_op); 8619 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8620 ins_pipe(ialu_reg_reg); 8621 %} 8622 8623 8624 //----------- 8625 // Long to Double conversion using V8 opcodes. 8626 // Still useful because cheetah traps and becomes 8627 // amazingly slow for some common numbers. 8628 8629 // Magic constant, 0x43300000 8630 instruct loadConI_x43300000(iRegI dst) %{ 8631 effect(DEF dst); 8632 size(4); 8633 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %} 8634 ins_encode(SetHi22(0x43300000, dst)); 8635 ins_pipe(ialu_none); 8636 %} 8637 8638 // Magic constant, 0x41f00000 8639 instruct loadConI_x41f00000(iRegI dst) %{ 8640 effect(DEF dst); 8641 size(4); 8642 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %} 8643 ins_encode(SetHi22(0x41f00000, dst)); 8644 ins_pipe(ialu_none); 8645 %} 8646 8647 // Construct a double from two float halves 8648 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{ 8649 effect(DEF dst, USE src1, USE src2); 8650 size(8); 8651 format %{ "FMOVS $src1.hi,$dst.hi\n\t" 8652 "FMOVS $src2.lo,$dst.lo" %} 8653 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); 8654 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); 8655 ins_pipe(faddD_reg_reg); 8656 %} 8657 8658 // Convert integer in high half of a double register (in the lower half of 8659 // the double register file) to double 8660 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{ 8661 effect(DEF dst, USE src); 8662 size(4); 8663 format %{ "FITOD $src,$dst" %} 8664 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8665 ins_encode(form3_opf_rs2D_rdD(src, dst)); 8666 ins_pipe(fcvtLHi2D); 8667 %} 8668 8669 // Add float double precision 8670 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{ 8671 effect(DEF dst, USE src1, USE src2); 8672 size(4); 8673 format %{ "FADDD $src1,$src2,$dst" %} 8674 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 8675 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8676 ins_pipe(faddD_reg_reg); 8677 %} 8678 8679 // Sub float double precision 8680 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{ 8681 effect(DEF dst, USE src1, USE src2); 8682 size(4); 8683 format %{ "FSUBD $src1,$src2,$dst" %} 8684 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 8685 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8686 ins_pipe(faddD_reg_reg); 8687 %} 8688 8689 // Mul float double precision 8690 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{ 8691 effect(DEF dst, USE src1, USE src2); 8692 size(4); 8693 format %{ "FMULD $src1,$src2,$dst" %} 8694 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 8695 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8696 ins_pipe(fmulD_reg_reg); 8697 %} 8698 8699 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{ 8700 match(Set dst (ConvL2D src)); 8701 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); 8702 8703 expand %{ 8704 regD_low tmpsrc; 8705 iRegI ix43300000; 8706 iRegI ix41f00000; 8707 stackSlotL lx43300000; 8708 stackSlotL lx41f00000; 8709 regD_low dx43300000; 8710 regD dx41f00000; 8711 regD tmp1; 8712 regD_low tmp2; 8713 regD tmp3; 8714 regD tmp4; 8715 8716 stkL_to_regD(tmpsrc, src); 8717 8718 loadConI_x43300000(ix43300000); 8719 loadConI_x41f00000(ix41f00000); 8720 regI_to_stkLHi(lx43300000, ix43300000); 8721 regI_to_stkLHi(lx41f00000, ix41f00000); 8722 stkL_to_regD(dx43300000, lx43300000); 8723 stkL_to_regD(dx41f00000, lx41f00000); 8724 8725 convI2D_regDHi_regD(tmp1, tmpsrc); 8726 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc); 8727 subD_regD_regD(tmp3, tmp2, dx43300000); 8728 mulD_regD_regD(tmp4, tmp1, dx41f00000); 8729 addD_regD_regD(dst, tmp3, tmp4); 8730 %} 8731 %} 8732 8733 // Long to Double conversion using fast fxtof 8734 instruct convL2D_helper(regD dst, regD tmp) %{ 8735 effect(DEF dst, USE tmp); 8736 size(4); 8737 format %{ "FXTOD $tmp,$dst" %} 8738 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf); 8739 ins_encode(form3_opf_rs2D_rdD(tmp, dst)); 8740 ins_pipe(fcvtL2D); 8741 %} 8742 8743 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{ 8744 predicate(VM_Version::has_fast_fxtof()); 8745 match(Set dst (ConvL2D src)); 8746 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); 8747 expand %{ 8748 regD tmp; 8749 stkL_to_regD(tmp, src); 8750 convL2D_helper(dst, tmp); 8751 %} 8752 %} 8753 8754 instruct convL2D_reg(regD dst, iRegL src) %{ 8755 predicate(UseVIS >= 3); 8756 match(Set dst (ConvL2D src)); 8757 expand %{ 8758 regD tmp; 8759 MoveL2D_reg_reg(tmp, src); 8760 convL2D_helper(dst, tmp); 8761 %} 8762 %} 8763 8764 // Long to Float conversion using fast fxtof 8765 instruct convL2F_helper(regF dst, regD tmp) %{ 8766 effect(DEF dst, USE tmp); 8767 size(4); 8768 format %{ "FXTOS $tmp,$dst" %} 8769 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf); 8770 ins_encode(form3_opf_rs2D_rdF(tmp, dst)); 8771 ins_pipe(fcvtL2F); 8772 %} 8773 8774 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{ 8775 match(Set dst (ConvL2F src)); 8776 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8777 expand %{ 8778 regD tmp; 8779 stkL_to_regD(tmp, src); 8780 convL2F_helper(dst, tmp); 8781 %} 8782 %} 8783 8784 instruct convL2F_reg(regF dst, iRegL src) %{ 8785 predicate(UseVIS >= 3); 8786 match(Set dst (ConvL2F src)); 8787 ins_cost(DEFAULT_COST); 8788 expand %{ 8789 regD tmp; 8790 MoveL2D_reg_reg(tmp, src); 8791 convL2F_helper(dst, tmp); 8792 %} 8793 %} 8794 8795 //----------- 8796 8797 instruct convL2I_reg(iRegI dst, iRegL src) %{ 8798 match(Set dst (ConvL2I src)); 8799 #ifndef _LP64 8800 format %{ "MOV $src.lo,$dst\t! long->int" %} 8801 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) ); 8802 ins_pipe(ialu_move_reg_I_to_L); 8803 #else 8804 size(4); 8805 format %{ "SRA $src,R_G0,$dst\t! long->int" %} 8806 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); 8807 ins_pipe(ialu_reg); 8808 #endif 8809 %} 8810 8811 // Register Shift Right Immediate 8812 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{ 8813 match(Set dst (ConvL2I (RShiftL src cnt))); 8814 8815 size(4); 8816 format %{ "SRAX $src,$cnt,$dst" %} 8817 opcode(Assembler::srax_op3, Assembler::arith_op); 8818 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) ); 8819 ins_pipe(ialu_reg_imm); 8820 %} 8821 8822 //----------Control Flow Instructions------------------------------------------ 8823 // Compare Instructions 8824 // Compare Integers 8825 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{ 8826 match(Set icc (CmpI op1 op2)); 8827 effect( DEF icc, USE op1, USE op2 ); 8828 8829 size(4); 8830 format %{ "CMP $op1,$op2" %} 8831 opcode(Assembler::subcc_op3, Assembler::arith_op); 8832 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8833 ins_pipe(ialu_cconly_reg_reg); 8834 %} 8835 8836 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{ 8837 match(Set icc (CmpU op1 op2)); 8838 8839 size(4); 8840 format %{ "CMP $op1,$op2\t! unsigned" %} 8841 opcode(Assembler::subcc_op3, Assembler::arith_op); 8842 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8843 ins_pipe(ialu_cconly_reg_reg); 8844 %} 8845 8846 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{ 8847 match(Set icc (CmpI op1 op2)); 8848 effect( DEF icc, USE op1 ); 8849 8850 size(4); 8851 format %{ "CMP $op1,$op2" %} 8852 opcode(Assembler::subcc_op3, Assembler::arith_op); 8853 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8854 ins_pipe(ialu_cconly_reg_imm); 8855 %} 8856 8857 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{ 8858 match(Set icc (CmpI (AndI op1 op2) zero)); 8859 8860 size(4); 8861 format %{ "BTST $op2,$op1" %} 8862 opcode(Assembler::andcc_op3, Assembler::arith_op); 8863 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8864 ins_pipe(ialu_cconly_reg_reg_zero); 8865 %} 8866 8867 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{ 8868 match(Set icc (CmpI (AndI op1 op2) zero)); 8869 8870 size(4); 8871 format %{ "BTST $op2,$op1" %} 8872 opcode(Assembler::andcc_op3, Assembler::arith_op); 8873 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8874 ins_pipe(ialu_cconly_reg_imm_zero); 8875 %} 8876 8877 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{ 8878 match(Set xcc (CmpL op1 op2)); 8879 effect( DEF xcc, USE op1, USE op2 ); 8880 8881 size(4); 8882 format %{ "CMP $op1,$op2\t\t! long" %} 8883 opcode(Assembler::subcc_op3, Assembler::arith_op); 8884 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8885 ins_pipe(ialu_cconly_reg_reg); 8886 %} 8887 8888 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{ 8889 match(Set xcc (CmpL op1 con)); 8890 effect( DEF xcc, USE op1, USE con ); 8891 8892 size(4); 8893 format %{ "CMP $op1,$con\t\t! long" %} 8894 opcode(Assembler::subcc_op3, Assembler::arith_op); 8895 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8896 ins_pipe(ialu_cconly_reg_reg); 8897 %} 8898 8899 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ 8900 match(Set xcc (CmpL (AndL op1 op2) zero)); 8901 effect( DEF xcc, USE op1, USE op2 ); 8902 8903 size(4); 8904 format %{ "BTST $op1,$op2\t\t! long" %} 8905 opcode(Assembler::andcc_op3, Assembler::arith_op); 8906 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8907 ins_pipe(ialu_cconly_reg_reg); 8908 %} 8909 8910 // useful for checking the alignment of a pointer: 8911 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{ 8912 match(Set xcc (CmpL (AndL op1 con) zero)); 8913 effect( DEF xcc, USE op1, USE con ); 8914 8915 size(4); 8916 format %{ "BTST $op1,$con\t\t! long" %} 8917 opcode(Assembler::andcc_op3, Assembler::arith_op); 8918 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8919 ins_pipe(ialu_cconly_reg_reg); 8920 %} 8921 8922 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{ 8923 match(Set icc (CmpU op1 op2)); 8924 8925 size(4); 8926 format %{ "CMP $op1,$op2\t! unsigned" %} 8927 opcode(Assembler::subcc_op3, Assembler::arith_op); 8928 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8929 ins_pipe(ialu_cconly_reg_imm); 8930 %} 8931 8932 // Compare Pointers 8933 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{ 8934 match(Set pcc (CmpP op1 op2)); 8935 8936 size(4); 8937 format %{ "CMP $op1,$op2\t! ptr" %} 8938 opcode(Assembler::subcc_op3, Assembler::arith_op); 8939 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8940 ins_pipe(ialu_cconly_reg_reg); 8941 %} 8942 8943 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{ 8944 match(Set pcc (CmpP op1 op2)); 8945 8946 size(4); 8947 format %{ "CMP $op1,$op2\t! ptr" %} 8948 opcode(Assembler::subcc_op3, Assembler::arith_op); 8949 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8950 ins_pipe(ialu_cconly_reg_imm); 8951 %} 8952 8953 // Compare Narrow oops 8954 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ 8955 match(Set icc (CmpN op1 op2)); 8956 8957 size(4); 8958 format %{ "CMP $op1,$op2\t! compressed ptr" %} 8959 opcode(Assembler::subcc_op3, Assembler::arith_op); 8960 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8961 ins_pipe(ialu_cconly_reg_reg); 8962 %} 8963 8964 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ 8965 match(Set icc (CmpN op1 op2)); 8966 8967 size(4); 8968 format %{ "CMP $op1,$op2\t! compressed ptr" %} 8969 opcode(Assembler::subcc_op3, Assembler::arith_op); 8970 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8971 ins_pipe(ialu_cconly_reg_imm); 8972 %} 8973 8974 //----------Max and Min-------------------------------------------------------- 8975 // Min Instructions 8976 // Conditional move for min 8977 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{ 8978 effect( USE_DEF op2, USE op1, USE icc ); 8979 8980 size(4); 8981 format %{ "MOVlt icc,$op1,$op2\t! min" %} 8982 opcode(Assembler::less); 8983 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 8984 ins_pipe(ialu_reg_flags); 8985 %} 8986 8987 // Min Register with Register. 8988 instruct minI_eReg(iRegI op1, iRegI op2) %{ 8989 match(Set op2 (MinI op1 op2)); 8990 ins_cost(DEFAULT_COST*2); 8991 expand %{ 8992 flagsReg icc; 8993 compI_iReg(icc,op1,op2); 8994 cmovI_reg_lt(op2,op1,icc); 8995 %} 8996 %} 8997 8998 // Max Instructions 8999 // Conditional move for max 9000 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9001 effect( USE_DEF op2, USE op1, USE icc ); 9002 format %{ "MOVgt icc,$op1,$op2\t! max" %} 9003 opcode(Assembler::greater); 9004 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9005 ins_pipe(ialu_reg_flags); 9006 %} 9007 9008 // Max Register with Register 9009 instruct maxI_eReg(iRegI op1, iRegI op2) %{ 9010 match(Set op2 (MaxI op1 op2)); 9011 ins_cost(DEFAULT_COST*2); 9012 expand %{ 9013 flagsReg icc; 9014 compI_iReg(icc,op1,op2); 9015 cmovI_reg_gt(op2,op1,icc); 9016 %} 9017 %} 9018 9019 9020 //----------Float Compares---------------------------------------------------- 9021 // Compare floating, generate condition code 9022 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{ 9023 match(Set fcc (CmpF src1 src2)); 9024 9025 size(4); 9026 format %{ "FCMPs $fcc,$src1,$src2" %} 9027 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); 9028 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); 9029 ins_pipe(faddF_fcc_reg_reg_zero); 9030 %} 9031 9032 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{ 9033 match(Set fcc (CmpD src1 src2)); 9034 9035 size(4); 9036 format %{ "FCMPd $fcc,$src1,$src2" %} 9037 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); 9038 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); 9039 ins_pipe(faddD_fcc_reg_reg_zero); 9040 %} 9041 9042 9043 // Compare floating, generate -1,0,1 9044 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{ 9045 match(Set dst (CmpF3 src1 src2)); 9046 effect(KILL fcc0); 9047 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9048 format %{ "fcmpl $dst,$src1,$src2" %} 9049 // Primary = float 9050 opcode( true ); 9051 ins_encode( floating_cmp( dst, src1, src2 ) ); 9052 ins_pipe( floating_cmp ); 9053 %} 9054 9055 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{ 9056 match(Set dst (CmpD3 src1 src2)); 9057 effect(KILL fcc0); 9058 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9059 format %{ "dcmpl $dst,$src1,$src2" %} 9060 // Primary = double (not float) 9061 opcode( false ); 9062 ins_encode( floating_cmp( dst, src1, src2 ) ); 9063 ins_pipe( floating_cmp ); 9064 %} 9065 9066 //----------Branches--------------------------------------------------------- 9067 // Jump 9068 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above) 9069 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{ 9070 match(Jump switch_val); 9071 effect(TEMP table); 9072 9073 ins_cost(350); 9074 9075 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 9076 "LD [O7 + $switch_val], O7\n\t" 9077 "JUMP O7" %} 9078 ins_encode %{ 9079 // Calculate table address into a register. 9080 Register table_reg; 9081 Register label_reg = O7; 9082 // If we are calculating the size of this instruction don't trust 9083 // zero offsets because they might change when 9084 // MachConstantBaseNode decides to optimize the constant table 9085 // base. 9086 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) { 9087 table_reg = $constanttablebase; 9088 } else { 9089 table_reg = O7; 9090 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 9091 __ add($constanttablebase, con_offset, table_reg); 9092 } 9093 9094 // Jump to base address + switch value 9095 __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 9096 __ jmp(label_reg, G0); 9097 __ delayed()->nop(); 9098 %} 9099 ins_pipe(ialu_reg_reg); 9100 %} 9101 9102 // Direct Branch. Use V8 version with longer range. 9103 instruct branch(label labl) %{ 9104 match(Goto); 9105 effect(USE labl); 9106 9107 size(8); 9108 ins_cost(BRANCH_COST); 9109 format %{ "BA $labl" %} 9110 ins_encode %{ 9111 Label* L = $labl$$label; 9112 __ ba(*L); 9113 __ delayed()->nop(); 9114 %} 9115 ins_pipe(br); 9116 %} 9117 9118 // Direct Branch, short with no delay slot 9119 instruct branch_short(label labl) %{ 9120 match(Goto); 9121 predicate(UseCBCond); 9122 effect(USE labl); 9123 9124 size(4); 9125 ins_cost(BRANCH_COST); 9126 format %{ "BA $labl\t! short branch" %} 9127 ins_encode %{ 9128 Label* L = $labl$$label; 9129 assert(__ use_cbcond(*L), "back to back cbcond"); 9130 __ ba_short(*L); 9131 %} 9132 ins_short_branch(1); 9133 ins_avoid_back_to_back(1); 9134 ins_pipe(cbcond_reg_imm); 9135 %} 9136 9137 // Conditional Direct Branch 9138 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{ 9139 match(If cmp icc); 9140 effect(USE labl); 9141 9142 size(8); 9143 ins_cost(BRANCH_COST); 9144 format %{ "BP$cmp $icc,$labl" %} 9145 // Prim = bits 24-22, Secnd = bits 31-30 9146 ins_encode( enc_bp( labl, cmp, icc ) ); 9147 ins_pipe(br_cc); 9148 %} 9149 9150 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9151 match(If cmp icc); 9152 effect(USE labl); 9153 9154 ins_cost(BRANCH_COST); 9155 format %{ "BP$cmp $icc,$labl" %} 9156 // Prim = bits 24-22, Secnd = bits 31-30 9157 ins_encode( enc_bp( labl, cmp, icc ) ); 9158 ins_pipe(br_cc); 9159 %} 9160 9161 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{ 9162 match(If cmp pcc); 9163 effect(USE labl); 9164 9165 size(8); 9166 ins_cost(BRANCH_COST); 9167 format %{ "BP$cmp $pcc,$labl" %} 9168 ins_encode %{ 9169 Label* L = $labl$$label; 9170 Assembler::Predict predict_taken = 9171 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9172 9173 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9174 __ delayed()->nop(); 9175 %} 9176 ins_pipe(br_cc); 9177 %} 9178 9179 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{ 9180 match(If cmp fcc); 9181 effect(USE labl); 9182 9183 size(8); 9184 ins_cost(BRANCH_COST); 9185 format %{ "FBP$cmp $fcc,$labl" %} 9186 ins_encode %{ 9187 Label* L = $labl$$label; 9188 Assembler::Predict predict_taken = 9189 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9190 9191 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L); 9192 __ delayed()->nop(); 9193 %} 9194 ins_pipe(br_fcc); 9195 %} 9196 9197 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{ 9198 match(CountedLoopEnd cmp icc); 9199 effect(USE labl); 9200 9201 size(8); 9202 ins_cost(BRANCH_COST); 9203 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9204 // Prim = bits 24-22, Secnd = bits 31-30 9205 ins_encode( enc_bp( labl, cmp, icc ) ); 9206 ins_pipe(br_cc); 9207 %} 9208 9209 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9210 match(CountedLoopEnd cmp icc); 9211 effect(USE labl); 9212 9213 size(8); 9214 ins_cost(BRANCH_COST); 9215 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9216 // Prim = bits 24-22, Secnd = bits 31-30 9217 ins_encode( enc_bp( labl, cmp, icc ) ); 9218 ins_pipe(br_cc); 9219 %} 9220 9221 // Compare and branch instructions 9222 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9223 match(If cmp (CmpI op1 op2)); 9224 effect(USE labl, KILL icc); 9225 9226 size(12); 9227 ins_cost(BRANCH_COST); 9228 format %{ "CMP $op1,$op2\t! int\n\t" 9229 "BP$cmp $labl" %} 9230 ins_encode %{ 9231 Label* L = $labl$$label; 9232 Assembler::Predict predict_taken = 9233 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9234 __ cmp($op1$$Register, $op2$$Register); 9235 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9236 __ delayed()->nop(); 9237 %} 9238 ins_pipe(cmp_br_reg_reg); 9239 %} 9240 9241 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9242 match(If cmp (CmpI op1 op2)); 9243 effect(USE labl, KILL icc); 9244 9245 size(12); 9246 ins_cost(BRANCH_COST); 9247 format %{ "CMP $op1,$op2\t! int\n\t" 9248 "BP$cmp $labl" %} 9249 ins_encode %{ 9250 Label* L = $labl$$label; 9251 Assembler::Predict predict_taken = 9252 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9253 __ cmp($op1$$Register, $op2$$constant); 9254 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9255 __ delayed()->nop(); 9256 %} 9257 ins_pipe(cmp_br_reg_imm); 9258 %} 9259 9260 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9261 match(If cmp (CmpU op1 op2)); 9262 effect(USE labl, KILL icc); 9263 9264 size(12); 9265 ins_cost(BRANCH_COST); 9266 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9267 "BP$cmp $labl" %} 9268 ins_encode %{ 9269 Label* L = $labl$$label; 9270 Assembler::Predict predict_taken = 9271 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9272 __ cmp($op1$$Register, $op2$$Register); 9273 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9274 __ delayed()->nop(); 9275 %} 9276 ins_pipe(cmp_br_reg_reg); 9277 %} 9278 9279 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9280 match(If cmp (CmpU op1 op2)); 9281 effect(USE labl, KILL icc); 9282 9283 size(12); 9284 ins_cost(BRANCH_COST); 9285 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9286 "BP$cmp $labl" %} 9287 ins_encode %{ 9288 Label* L = $labl$$label; 9289 Assembler::Predict predict_taken = 9290 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9291 __ cmp($op1$$Register, $op2$$constant); 9292 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9293 __ delayed()->nop(); 9294 %} 9295 ins_pipe(cmp_br_reg_imm); 9296 %} 9297 9298 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9299 match(If cmp (CmpL op1 op2)); 9300 effect(USE labl, KILL xcc); 9301 9302 size(12); 9303 ins_cost(BRANCH_COST); 9304 format %{ "CMP $op1,$op2\t! long\n\t" 9305 "BP$cmp $labl" %} 9306 ins_encode %{ 9307 Label* L = $labl$$label; 9308 Assembler::Predict predict_taken = 9309 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9310 __ cmp($op1$$Register, $op2$$Register); 9311 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9312 __ delayed()->nop(); 9313 %} 9314 ins_pipe(cmp_br_reg_reg); 9315 %} 9316 9317 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9318 match(If cmp (CmpL op1 op2)); 9319 effect(USE labl, KILL xcc); 9320 9321 size(12); 9322 ins_cost(BRANCH_COST); 9323 format %{ "CMP $op1,$op2\t! long\n\t" 9324 "BP$cmp $labl" %} 9325 ins_encode %{ 9326 Label* L = $labl$$label; 9327 Assembler::Predict predict_taken = 9328 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9329 __ cmp($op1$$Register, $op2$$constant); 9330 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9331 __ delayed()->nop(); 9332 %} 9333 ins_pipe(cmp_br_reg_imm); 9334 %} 9335 9336 // Compare Pointers and branch 9337 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9338 match(If cmp (CmpP op1 op2)); 9339 effect(USE labl, KILL pcc); 9340 9341 size(12); 9342 ins_cost(BRANCH_COST); 9343 format %{ "CMP $op1,$op2\t! ptr\n\t" 9344 "B$cmp $labl" %} 9345 ins_encode %{ 9346 Label* L = $labl$$label; 9347 Assembler::Predict predict_taken = 9348 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9349 __ cmp($op1$$Register, $op2$$Register); 9350 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9351 __ delayed()->nop(); 9352 %} 9353 ins_pipe(cmp_br_reg_reg); 9354 %} 9355 9356 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9357 match(If cmp (CmpP op1 null)); 9358 effect(USE labl, KILL pcc); 9359 9360 size(12); 9361 ins_cost(BRANCH_COST); 9362 format %{ "CMP $op1,0\t! ptr\n\t" 9363 "B$cmp $labl" %} 9364 ins_encode %{ 9365 Label* L = $labl$$label; 9366 Assembler::Predict predict_taken = 9367 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9368 __ cmp($op1$$Register, G0); 9369 // bpr() is not used here since it has shorter distance. 9370 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9371 __ delayed()->nop(); 9372 %} 9373 ins_pipe(cmp_br_reg_reg); 9374 %} 9375 9376 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9377 match(If cmp (CmpN op1 op2)); 9378 effect(USE labl, KILL icc); 9379 9380 size(12); 9381 ins_cost(BRANCH_COST); 9382 format %{ "CMP $op1,$op2\t! compressed ptr\n\t" 9383 "BP$cmp $labl" %} 9384 ins_encode %{ 9385 Label* L = $labl$$label; 9386 Assembler::Predict predict_taken = 9387 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9388 __ cmp($op1$$Register, $op2$$Register); 9389 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9390 __ delayed()->nop(); 9391 %} 9392 ins_pipe(cmp_br_reg_reg); 9393 %} 9394 9395 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9396 match(If cmp (CmpN op1 null)); 9397 effect(USE labl, KILL icc); 9398 9399 size(12); 9400 ins_cost(BRANCH_COST); 9401 format %{ "CMP $op1,0\t! compressed ptr\n\t" 9402 "BP$cmp $labl" %} 9403 ins_encode %{ 9404 Label* L = $labl$$label; 9405 Assembler::Predict predict_taken = 9406 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9407 __ cmp($op1$$Register, G0); 9408 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9409 __ delayed()->nop(); 9410 %} 9411 ins_pipe(cmp_br_reg_reg); 9412 %} 9413 9414 // Loop back branch 9415 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9416 match(CountedLoopEnd cmp (CmpI op1 op2)); 9417 effect(USE labl, KILL icc); 9418 9419 size(12); 9420 ins_cost(BRANCH_COST); 9421 format %{ "CMP $op1,$op2\t! int\n\t" 9422 "BP$cmp $labl\t! Loop end" %} 9423 ins_encode %{ 9424 Label* L = $labl$$label; 9425 Assembler::Predict predict_taken = 9426 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9427 __ cmp($op1$$Register, $op2$$Register); 9428 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9429 __ delayed()->nop(); 9430 %} 9431 ins_pipe(cmp_br_reg_reg); 9432 %} 9433 9434 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9435 match(CountedLoopEnd cmp (CmpI op1 op2)); 9436 effect(USE labl, KILL icc); 9437 9438 size(12); 9439 ins_cost(BRANCH_COST); 9440 format %{ "CMP $op1,$op2\t! int\n\t" 9441 "BP$cmp $labl\t! Loop end" %} 9442 ins_encode %{ 9443 Label* L = $labl$$label; 9444 Assembler::Predict predict_taken = 9445 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9446 __ cmp($op1$$Register, $op2$$constant); 9447 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9448 __ delayed()->nop(); 9449 %} 9450 ins_pipe(cmp_br_reg_imm); 9451 %} 9452 9453 // Short compare and branch instructions 9454 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9455 match(If cmp (CmpI op1 op2)); 9456 predicate(UseCBCond); 9457 effect(USE labl, KILL icc); 9458 9459 size(4); 9460 ins_cost(BRANCH_COST); 9461 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9462 ins_encode %{ 9463 Label* L = $labl$$label; 9464 assert(__ use_cbcond(*L), "back to back cbcond"); 9465 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9466 %} 9467 ins_short_branch(1); 9468 ins_avoid_back_to_back(1); 9469 ins_pipe(cbcond_reg_reg); 9470 %} 9471 9472 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9473 match(If cmp (CmpI op1 op2)); 9474 predicate(UseCBCond); 9475 effect(USE labl, KILL icc); 9476 9477 size(4); 9478 ins_cost(BRANCH_COST); 9479 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9480 ins_encode %{ 9481 Label* L = $labl$$label; 9482 assert(__ use_cbcond(*L), "back to back cbcond"); 9483 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9484 %} 9485 ins_short_branch(1); 9486 ins_avoid_back_to_back(1); 9487 ins_pipe(cbcond_reg_imm); 9488 %} 9489 9490 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9491 match(If cmp (CmpU op1 op2)); 9492 predicate(UseCBCond); 9493 effect(USE labl, KILL icc); 9494 9495 size(4); 9496 ins_cost(BRANCH_COST); 9497 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9498 ins_encode %{ 9499 Label* L = $labl$$label; 9500 assert(__ use_cbcond(*L), "back to back cbcond"); 9501 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9502 %} 9503 ins_short_branch(1); 9504 ins_avoid_back_to_back(1); 9505 ins_pipe(cbcond_reg_reg); 9506 %} 9507 9508 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9509 match(If cmp (CmpU op1 op2)); 9510 predicate(UseCBCond); 9511 effect(USE labl, KILL icc); 9512 9513 size(4); 9514 ins_cost(BRANCH_COST); 9515 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9516 ins_encode %{ 9517 Label* L = $labl$$label; 9518 assert(__ use_cbcond(*L), "back to back cbcond"); 9519 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9520 %} 9521 ins_short_branch(1); 9522 ins_avoid_back_to_back(1); 9523 ins_pipe(cbcond_reg_imm); 9524 %} 9525 9526 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9527 match(If cmp (CmpL op1 op2)); 9528 predicate(UseCBCond); 9529 effect(USE labl, KILL xcc); 9530 9531 size(4); 9532 ins_cost(BRANCH_COST); 9533 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9534 ins_encode %{ 9535 Label* L = $labl$$label; 9536 assert(__ use_cbcond(*L), "back to back cbcond"); 9537 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9538 %} 9539 ins_short_branch(1); 9540 ins_avoid_back_to_back(1); 9541 ins_pipe(cbcond_reg_reg); 9542 %} 9543 9544 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9545 match(If cmp (CmpL op1 op2)); 9546 predicate(UseCBCond); 9547 effect(USE labl, KILL xcc); 9548 9549 size(4); 9550 ins_cost(BRANCH_COST); 9551 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9552 ins_encode %{ 9553 Label* L = $labl$$label; 9554 assert(__ use_cbcond(*L), "back to back cbcond"); 9555 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9556 %} 9557 ins_short_branch(1); 9558 ins_avoid_back_to_back(1); 9559 ins_pipe(cbcond_reg_imm); 9560 %} 9561 9562 // Compare Pointers and branch 9563 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9564 match(If cmp (CmpP op1 op2)); 9565 predicate(UseCBCond); 9566 effect(USE labl, KILL pcc); 9567 9568 size(4); 9569 ins_cost(BRANCH_COST); 9570 #ifdef _LP64 9571 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} 9572 #else 9573 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %} 9574 #endif 9575 ins_encode %{ 9576 Label* L = $labl$$label; 9577 assert(__ use_cbcond(*L), "back to back cbcond"); 9578 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L); 9579 %} 9580 ins_short_branch(1); 9581 ins_avoid_back_to_back(1); 9582 ins_pipe(cbcond_reg_reg); 9583 %} 9584 9585 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9586 match(If cmp (CmpP op1 null)); 9587 predicate(UseCBCond); 9588 effect(USE labl, KILL pcc); 9589 9590 size(4); 9591 ins_cost(BRANCH_COST); 9592 #ifdef _LP64 9593 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} 9594 #else 9595 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %} 9596 #endif 9597 ins_encode %{ 9598 Label* L = $labl$$label; 9599 assert(__ use_cbcond(*L), "back to back cbcond"); 9600 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L); 9601 %} 9602 ins_short_branch(1); 9603 ins_avoid_back_to_back(1); 9604 ins_pipe(cbcond_reg_reg); 9605 %} 9606 9607 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9608 match(If cmp (CmpN op1 op2)); 9609 predicate(UseCBCond); 9610 effect(USE labl, KILL icc); 9611 9612 size(4); 9613 ins_cost(BRANCH_COST); 9614 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %} 9615 ins_encode %{ 9616 Label* L = $labl$$label; 9617 assert(__ use_cbcond(*L), "back to back cbcond"); 9618 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9619 %} 9620 ins_short_branch(1); 9621 ins_avoid_back_to_back(1); 9622 ins_pipe(cbcond_reg_reg); 9623 %} 9624 9625 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9626 match(If cmp (CmpN op1 null)); 9627 predicate(UseCBCond); 9628 effect(USE labl, KILL icc); 9629 9630 size(4); 9631 ins_cost(BRANCH_COST); 9632 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %} 9633 ins_encode %{ 9634 Label* L = $labl$$label; 9635 assert(__ use_cbcond(*L), "back to back cbcond"); 9636 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L); 9637 %} 9638 ins_short_branch(1); 9639 ins_avoid_back_to_back(1); 9640 ins_pipe(cbcond_reg_reg); 9641 %} 9642 9643 // Loop back branch 9644 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9645 match(CountedLoopEnd cmp (CmpI op1 op2)); 9646 predicate(UseCBCond); 9647 effect(USE labl, KILL icc); 9648 9649 size(4); 9650 ins_cost(BRANCH_COST); 9651 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9652 ins_encode %{ 9653 Label* L = $labl$$label; 9654 assert(__ use_cbcond(*L), "back to back cbcond"); 9655 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9656 %} 9657 ins_short_branch(1); 9658 ins_avoid_back_to_back(1); 9659 ins_pipe(cbcond_reg_reg); 9660 %} 9661 9662 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9663 match(CountedLoopEnd cmp (CmpI op1 op2)); 9664 predicate(UseCBCond); 9665 effect(USE labl, KILL icc); 9666 9667 size(4); 9668 ins_cost(BRANCH_COST); 9669 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9670 ins_encode %{ 9671 Label* L = $labl$$label; 9672 assert(__ use_cbcond(*L), "back to back cbcond"); 9673 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9674 %} 9675 ins_short_branch(1); 9676 ins_avoid_back_to_back(1); 9677 ins_pipe(cbcond_reg_imm); 9678 %} 9679 9680 // Branch-on-register tests all 64 bits. We assume that values 9681 // in 64-bit registers always remains zero or sign extended 9682 // unless our code munges the high bits. Interrupts can chop 9683 // the high order bits to zero or sign at any time. 9684 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ 9685 match(If cmp (CmpI op1 zero)); 9686 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9687 effect(USE labl); 9688 9689 size(8); 9690 ins_cost(BRANCH_COST); 9691 format %{ "BR$cmp $op1,$labl" %} 9692 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9693 ins_pipe(br_reg); 9694 %} 9695 9696 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{ 9697 match(If cmp (CmpP op1 null)); 9698 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9699 effect(USE labl); 9700 9701 size(8); 9702 ins_cost(BRANCH_COST); 9703 format %{ "BR$cmp $op1,$labl" %} 9704 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9705 ins_pipe(br_reg); 9706 %} 9707 9708 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{ 9709 match(If cmp (CmpL op1 zero)); 9710 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9711 effect(USE labl); 9712 9713 size(8); 9714 ins_cost(BRANCH_COST); 9715 format %{ "BR$cmp $op1,$labl" %} 9716 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9717 ins_pipe(br_reg); 9718 %} 9719 9720 9721 // ============================================================================ 9722 // Long Compare 9723 // 9724 // Currently we hold longs in 2 registers. Comparing such values efficiently 9725 // is tricky. The flavor of compare used depends on whether we are testing 9726 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit. 9727 // The GE test is the negated LT test. The LE test can be had by commuting 9728 // the operands (yielding a GE test) and then negating; negate again for the 9729 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the 9730 // NE test is negated from that. 9731 9732 // Due to a shortcoming in the ADLC, it mixes up expressions like: 9733 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the 9734 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections 9735 // are collapsed internally in the ADLC's dfa-gen code. The match for 9736 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the 9737 // foo match ends up with the wrong leaf. One fix is to not match both 9738 // reg-reg and reg-zero forms of long-compare. This is unfortunate because 9739 // both forms beat the trinary form of long-compare and both are very useful 9740 // on Intel which has so few registers. 9741 9742 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ 9743 match(If cmp xcc); 9744 effect(USE labl); 9745 9746 size(8); 9747 ins_cost(BRANCH_COST); 9748 format %{ "BP$cmp $xcc,$labl" %} 9749 ins_encode %{ 9750 Label* L = $labl$$label; 9751 Assembler::Predict predict_taken = 9752 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9753 9754 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9755 __ delayed()->nop(); 9756 %} 9757 ins_pipe(br_cc); 9758 %} 9759 9760 // Manifest a CmpL3 result in an integer register. Very painful. 9761 // This is the test to avoid. 9762 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{ 9763 match(Set dst (CmpL3 src1 src2) ); 9764 effect( KILL ccr ); 9765 ins_cost(6*DEFAULT_COST); 9766 size(24); 9767 format %{ "CMP $src1,$src2\t\t! long\n" 9768 "\tBLT,a,pn done\n" 9769 "\tMOV -1,$dst\t! delay slot\n" 9770 "\tBGT,a,pn done\n" 9771 "\tMOV 1,$dst\t! delay slot\n" 9772 "\tCLR $dst\n" 9773 "done:" %} 9774 ins_encode( cmpl_flag(src1,src2,dst) ); 9775 ins_pipe(cmpL_reg); 9776 %} 9777 9778 // Conditional move 9779 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{ 9780 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9781 ins_cost(150); 9782 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9783 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9784 ins_pipe(ialu_reg); 9785 %} 9786 9787 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{ 9788 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9789 ins_cost(140); 9790 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9791 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9792 ins_pipe(ialu_imm); 9793 %} 9794 9795 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{ 9796 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9797 ins_cost(150); 9798 format %{ "MOV$cmp $xcc,$src,$dst" %} 9799 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9800 ins_pipe(ialu_reg); 9801 %} 9802 9803 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{ 9804 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9805 ins_cost(140); 9806 format %{ "MOV$cmp $xcc,$src,$dst" %} 9807 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9808 ins_pipe(ialu_imm); 9809 %} 9810 9811 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ 9812 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); 9813 ins_cost(150); 9814 format %{ "MOV$cmp $xcc,$src,$dst" %} 9815 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9816 ins_pipe(ialu_reg); 9817 %} 9818 9819 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ 9820 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9821 ins_cost(150); 9822 format %{ "MOV$cmp $xcc,$src,$dst" %} 9823 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9824 ins_pipe(ialu_reg); 9825 %} 9826 9827 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{ 9828 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9829 ins_cost(140); 9830 format %{ "MOV$cmp $xcc,$src,$dst" %} 9831 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9832 ins_pipe(ialu_imm); 9833 %} 9834 9835 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{ 9836 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src))); 9837 ins_cost(150); 9838 opcode(0x101); 9839 format %{ "FMOVS$cmp $xcc,$src,$dst" %} 9840 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9841 ins_pipe(int_conditional_float_move); 9842 %} 9843 9844 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{ 9845 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src))); 9846 ins_cost(150); 9847 opcode(0x102); 9848 format %{ "FMOVD$cmp $xcc,$src,$dst" %} 9849 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9850 ins_pipe(int_conditional_float_move); 9851 %} 9852 9853 // ============================================================================ 9854 // Safepoint Instruction 9855 instruct safePoint_poll(iRegP poll) %{ 9856 match(SafePoint poll); 9857 effect(USE poll); 9858 9859 size(4); 9860 #ifdef _LP64 9861 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} 9862 #else 9863 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %} 9864 #endif 9865 ins_encode %{ 9866 __ relocate(relocInfo::poll_type); 9867 __ ld_ptr($poll$$Register, 0, G0); 9868 %} 9869 ins_pipe(loadPollP); 9870 %} 9871 9872 // ============================================================================ 9873 // Call Instructions 9874 // Call Java Static Instruction 9875 instruct CallStaticJavaDirect( method meth ) %{ 9876 match(CallStaticJava); 9877 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9878 effect(USE meth); 9879 9880 size(8); 9881 ins_cost(CALL_COST); 9882 format %{ "CALL,static ; NOP ==> " %} 9883 ins_encode( Java_Static_Call( meth ), call_epilog ); 9884 ins_pipe(simple_call); 9885 %} 9886 9887 // Call Java Static Instruction (method handle version) 9888 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ 9889 match(CallStaticJava); 9890 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9891 effect(USE meth, KILL l7_mh_SP_save); 9892 9893 size(16); 9894 ins_cost(CALL_COST); 9895 format %{ "CALL,static/MethodHandle" %} 9896 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); 9897 ins_pipe(simple_call); 9898 %} 9899 9900 // Call Java Dynamic Instruction 9901 instruct CallDynamicJavaDirect( method meth ) %{ 9902 match(CallDynamicJava); 9903 effect(USE meth); 9904 9905 ins_cost(CALL_COST); 9906 format %{ "SET (empty),R_G5\n\t" 9907 "CALL,dynamic ; NOP ==> " %} 9908 ins_encode( Java_Dynamic_Call( meth ), call_epilog ); 9909 ins_pipe(call); 9910 %} 9911 9912 // Call Runtime Instruction 9913 instruct CallRuntimeDirect(method meth, l7RegP l7) %{ 9914 match(CallRuntime); 9915 effect(USE meth, KILL l7); 9916 ins_cost(CALL_COST); 9917 format %{ "CALL,runtime" %} 9918 ins_encode( Java_To_Runtime( meth ), 9919 call_epilog, adjust_long_from_native_call ); 9920 ins_pipe(simple_call); 9921 %} 9922 9923 // Call runtime without safepoint - same as CallRuntime 9924 instruct CallLeafDirect(method meth, l7RegP l7) %{ 9925 match(CallLeaf); 9926 effect(USE meth, KILL l7); 9927 ins_cost(CALL_COST); 9928 format %{ "CALL,runtime leaf" %} 9929 ins_encode( Java_To_Runtime( meth ), 9930 call_epilog, 9931 adjust_long_from_native_call ); 9932 ins_pipe(simple_call); 9933 %} 9934 9935 // Call runtime without safepoint - same as CallLeaf 9936 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{ 9937 match(CallLeafNoFP); 9938 effect(USE meth, KILL l7); 9939 ins_cost(CALL_COST); 9940 format %{ "CALL,runtime leaf nofp" %} 9941 ins_encode( Java_To_Runtime( meth ), 9942 call_epilog, 9943 adjust_long_from_native_call ); 9944 ins_pipe(simple_call); 9945 %} 9946 9947 // Tail Call; Jump from runtime stub to Java code. 9948 // Also known as an 'interprocedural jump'. 9949 // Target of jump will eventually return to caller. 9950 // TailJump below removes the return address. 9951 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{ 9952 match(TailCall jump_target method_oop ); 9953 9954 ins_cost(CALL_COST); 9955 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %} 9956 ins_encode(form_jmpl(jump_target)); 9957 ins_pipe(tail_call); 9958 %} 9959 9960 9961 // Return Instruction 9962 instruct Ret() %{ 9963 match(Return); 9964 9965 // The epilogue node did the ret already. 9966 size(0); 9967 format %{ "! return" %} 9968 ins_encode(); 9969 ins_pipe(empty); 9970 %} 9971 9972 9973 // Tail Jump; remove the return address; jump to target. 9974 // TailCall above leaves the return address around. 9975 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 9976 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 9977 // "restore" before this instruction (in Epilogue), we need to materialize it 9978 // in %i0. 9979 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{ 9980 match( TailJump jump_target ex_oop ); 9981 ins_cost(CALL_COST); 9982 format %{ "! discard R_O7\n\t" 9983 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} 9984 ins_encode(form_jmpl_set_exception_pc(jump_target)); 9985 // opcode(Assembler::jmpl_op3, Assembler::arith_op); 9986 // The hack duplicates the exception oop into G3, so that CreateEx can use it there. 9987 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); 9988 ins_pipe(tail_call); 9989 %} 9990 9991 // Create exception oop: created by stack-crawling runtime code. 9992 // Created exception is now available to this handler, and is setup 9993 // just prior to jumping to this handler. No code emitted. 9994 instruct CreateException( o0RegP ex_oop ) 9995 %{ 9996 match(Set ex_oop (CreateEx)); 9997 ins_cost(0); 9998 9999 size(0); 10000 // use the following format syntax 10001 format %{ "! exception oop is in R_O0; no code emitted" %} 10002 ins_encode(); 10003 ins_pipe(empty); 10004 %} 10005 10006 10007 // Rethrow exception: 10008 // The exception oop will come in the first argument position. 10009 // Then JUMP (not call) to the rethrow stub code. 10010 instruct RethrowException() 10011 %{ 10012 match(Rethrow); 10013 ins_cost(CALL_COST); 10014 10015 // use the following format syntax 10016 format %{ "Jmp rethrow_stub" %} 10017 ins_encode(enc_rethrow); 10018 ins_pipe(tail_call); 10019 %} 10020 10021 10022 // Die now 10023 instruct ShouldNotReachHere( ) 10024 %{ 10025 match(Halt); 10026 ins_cost(CALL_COST); 10027 10028 size(4); 10029 // Use the following format syntax 10030 format %{ "ILLTRAP ; ShouldNotReachHere" %} 10031 ins_encode( form2_illtrap() ); 10032 ins_pipe(tail_call); 10033 %} 10034 10035 // ============================================================================ 10036 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 10037 // array for an instance of the superklass. Set a hidden internal cache on a 10038 // hit (cache is checked with exposed code in gen_subtype_check()). Return 10039 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 10040 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{ 10041 match(Set index (PartialSubtypeCheck sub super)); 10042 effect( KILL pcc, KILL o7 ); 10043 ins_cost(DEFAULT_COST*10); 10044 format %{ "CALL PartialSubtypeCheck\n\tNOP" %} 10045 ins_encode( enc_PartialSubtypeCheck() ); 10046 ins_pipe(partial_subtype_check_pipe); 10047 %} 10048 10049 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ 10050 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); 10051 effect( KILL idx, KILL o7 ); 10052 ins_cost(DEFAULT_COST*10); 10053 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %} 10054 ins_encode( enc_PartialSubtypeCheck() ); 10055 ins_pipe(partial_subtype_check_pipe); 10056 %} 10057 10058 10059 // ============================================================================ 10060 // inlined locking and unlocking 10061 10062 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10063 match(Set pcc (FastLock object box)); 10064 10065 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10066 ins_cost(100); 10067 10068 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10069 ins_encode( Fast_Lock(object, box, scratch, scratch2) ); 10070 ins_pipe(long_memory_op); 10071 %} 10072 10073 10074 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10075 match(Set pcc (FastUnlock object box)); 10076 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10077 ins_cost(100); 10078 10079 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10080 ins_encode( Fast_Unlock(object, box, scratch, scratch2) ); 10081 ins_pipe(long_memory_op); 10082 %} 10083 10084 // The encodings are generic. 10085 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ 10086 predicate(!use_block_zeroing(n->in(2)) ); 10087 match(Set dummy (ClearArray cnt base)); 10088 effect(TEMP temp, KILL ccr); 10089 ins_cost(300); 10090 format %{ "MOV $cnt,$temp\n" 10091 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" 10092 " BRge loop\t\t! Clearing loop\n" 10093 " STX G0,[$base+$temp]\t! delay slot" %} 10094 10095 ins_encode %{ 10096 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 10097 Register nof_bytes_arg = $cnt$$Register; 10098 Register nof_bytes_tmp = $temp$$Register; 10099 Register base_pointer_arg = $base$$Register; 10100 10101 Label loop; 10102 __ mov(nof_bytes_arg, nof_bytes_tmp); 10103 10104 // Loop and clear, walking backwards through the array. 10105 // nof_bytes_tmp (if >0) is always the number of bytes to zero 10106 __ bind(loop); 10107 __ deccc(nof_bytes_tmp, 8); 10108 __ br(Assembler::greaterEqual, true, Assembler::pt, loop); 10109 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp); 10110 // %%%% this mini-loop must not cross a cache boundary! 10111 %} 10112 ins_pipe(long_memory_op); 10113 %} 10114 10115 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{ 10116 predicate(use_block_zeroing(n->in(2))); 10117 match(Set dummy (ClearArray cnt base)); 10118 effect(USE_KILL cnt, USE_KILL base, KILL ccr); 10119 ins_cost(300); 10120 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10121 10122 ins_encode %{ 10123 10124 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10125 Register to = $base$$Register; 10126 Register count = $cnt$$Register; 10127 10128 Label Ldone; 10129 __ nop(); // Separate short branches 10130 // Use BIS for zeroing (temp is not used). 10131 __ bis_zeroing(to, count, G0, Ldone); 10132 __ bind(Ldone); 10133 10134 %} 10135 ins_pipe(long_memory_op); 10136 %} 10137 10138 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{ 10139 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit)); 10140 match(Set dummy (ClearArray cnt base)); 10141 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr); 10142 ins_cost(300); 10143 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10144 10145 ins_encode %{ 10146 10147 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10148 Register to = $base$$Register; 10149 Register count = $cnt$$Register; 10150 Register temp = $tmp$$Register; 10151 10152 Label Ldone; 10153 __ nop(); // Separate short branches 10154 // Use BIS for zeroing 10155 __ bis_zeroing(to, count, temp, Ldone); 10156 __ bind(Ldone); 10157 10158 %} 10159 ins_pipe(long_memory_op); 10160 %} 10161 10162 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10163 o7RegI tmp, flagsReg ccr) %{ 10164 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10165 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10166 ins_cost(300); 10167 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10168 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) ); 10169 ins_pipe(long_memory_op); 10170 %} 10171 10172 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10173 o7RegI tmp, flagsReg ccr) %{ 10174 match(Set result (StrEquals (Binary str1 str2) cnt)); 10175 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10176 ins_cost(300); 10177 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %} 10178 ins_encode( enc_String_Equals(str1, str2, cnt, result) ); 10179 ins_pipe(long_memory_op); 10180 %} 10181 10182 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10183 o7RegI tmp2, flagsReg ccr) %{ 10184 match(Set result (AryEq ary1 ary2)); 10185 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10186 ins_cost(300); 10187 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10188 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result)); 10189 ins_pipe(long_memory_op); 10190 %} 10191 10192 10193 //---------- Zeros Count Instructions ------------------------------------------ 10194 10195 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{ 10196 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10197 match(Set dst (CountLeadingZerosI src)); 10198 effect(TEMP dst, TEMP tmp, KILL cr); 10199 10200 // x |= (x >> 1); 10201 // x |= (x >> 2); 10202 // x |= (x >> 4); 10203 // x |= (x >> 8); 10204 // x |= (x >> 16); 10205 // return (WORDBITS - popc(x)); 10206 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t" 10207 "SRL $src,0,$dst\t! 32-bit zero extend\n\t" 10208 "OR $dst,$tmp,$dst\n\t" 10209 "SRL $dst,2,$tmp\n\t" 10210 "OR $dst,$tmp,$dst\n\t" 10211 "SRL $dst,4,$tmp\n\t" 10212 "OR $dst,$tmp,$dst\n\t" 10213 "SRL $dst,8,$tmp\n\t" 10214 "OR $dst,$tmp,$dst\n\t" 10215 "SRL $dst,16,$tmp\n\t" 10216 "OR $dst,$tmp,$dst\n\t" 10217 "POPC $dst,$dst\n\t" 10218 "MOV 32,$tmp\n\t" 10219 "SUB $tmp,$dst,$dst" %} 10220 ins_encode %{ 10221 Register Rdst = $dst$$Register; 10222 Register Rsrc = $src$$Register; 10223 Register Rtmp = $tmp$$Register; 10224 __ srl(Rsrc, 1, Rtmp); 10225 __ srl(Rsrc, 0, Rdst); 10226 __ or3(Rdst, Rtmp, Rdst); 10227 __ srl(Rdst, 2, Rtmp); 10228 __ or3(Rdst, Rtmp, Rdst); 10229 __ srl(Rdst, 4, Rtmp); 10230 __ or3(Rdst, Rtmp, Rdst); 10231 __ srl(Rdst, 8, Rtmp); 10232 __ or3(Rdst, Rtmp, Rdst); 10233 __ srl(Rdst, 16, Rtmp); 10234 __ or3(Rdst, Rtmp, Rdst); 10235 __ popc(Rdst, Rdst); 10236 __ mov(BitsPerInt, Rtmp); 10237 __ sub(Rtmp, Rdst, Rdst); 10238 %} 10239 ins_pipe(ialu_reg); 10240 %} 10241 10242 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{ 10243 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10244 match(Set dst (CountLeadingZerosL src)); 10245 effect(TEMP dst, TEMP tmp, KILL cr); 10246 10247 // x |= (x >> 1); 10248 // x |= (x >> 2); 10249 // x |= (x >> 4); 10250 // x |= (x >> 8); 10251 // x |= (x >> 16); 10252 // x |= (x >> 32); 10253 // return (WORDBITS - popc(x)); 10254 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t" 10255 "OR $src,$tmp,$dst\n\t" 10256 "SRLX $dst,2,$tmp\n\t" 10257 "OR $dst,$tmp,$dst\n\t" 10258 "SRLX $dst,4,$tmp\n\t" 10259 "OR $dst,$tmp,$dst\n\t" 10260 "SRLX $dst,8,$tmp\n\t" 10261 "OR $dst,$tmp,$dst\n\t" 10262 "SRLX $dst,16,$tmp\n\t" 10263 "OR $dst,$tmp,$dst\n\t" 10264 "SRLX $dst,32,$tmp\n\t" 10265 "OR $dst,$tmp,$dst\n\t" 10266 "POPC $dst,$dst\n\t" 10267 "MOV 64,$tmp\n\t" 10268 "SUB $tmp,$dst,$dst" %} 10269 ins_encode %{ 10270 Register Rdst = $dst$$Register; 10271 Register Rsrc = $src$$Register; 10272 Register Rtmp = $tmp$$Register; 10273 __ srlx(Rsrc, 1, Rtmp); 10274 __ or3( Rsrc, Rtmp, Rdst); 10275 __ srlx(Rdst, 2, Rtmp); 10276 __ or3( Rdst, Rtmp, Rdst); 10277 __ srlx(Rdst, 4, Rtmp); 10278 __ or3( Rdst, Rtmp, Rdst); 10279 __ srlx(Rdst, 8, Rtmp); 10280 __ or3( Rdst, Rtmp, Rdst); 10281 __ srlx(Rdst, 16, Rtmp); 10282 __ or3( Rdst, Rtmp, Rdst); 10283 __ srlx(Rdst, 32, Rtmp); 10284 __ or3( Rdst, Rtmp, Rdst); 10285 __ popc(Rdst, Rdst); 10286 __ mov(BitsPerLong, Rtmp); 10287 __ sub(Rtmp, Rdst, Rdst); 10288 %} 10289 ins_pipe(ialu_reg); 10290 %} 10291 10292 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{ 10293 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10294 match(Set dst (CountTrailingZerosI src)); 10295 effect(TEMP dst, KILL cr); 10296 10297 // return popc(~x & (x - 1)); 10298 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t" 10299 "ANDN $dst,$src,$dst\n\t" 10300 "SRL $dst,R_G0,$dst\n\t" 10301 "POPC $dst,$dst" %} 10302 ins_encode %{ 10303 Register Rdst = $dst$$Register; 10304 Register Rsrc = $src$$Register; 10305 __ sub(Rsrc, 1, Rdst); 10306 __ andn(Rdst, Rsrc, Rdst); 10307 __ srl(Rdst, G0, Rdst); 10308 __ popc(Rdst, Rdst); 10309 %} 10310 ins_pipe(ialu_reg); 10311 %} 10312 10313 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{ 10314 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10315 match(Set dst (CountTrailingZerosL src)); 10316 effect(TEMP dst, KILL cr); 10317 10318 // return popc(~x & (x - 1)); 10319 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t" 10320 "ANDN $dst,$src,$dst\n\t" 10321 "POPC $dst,$dst" %} 10322 ins_encode %{ 10323 Register Rdst = $dst$$Register; 10324 Register Rsrc = $src$$Register; 10325 __ sub(Rsrc, 1, Rdst); 10326 __ andn(Rdst, Rsrc, Rdst); 10327 __ popc(Rdst, Rdst); 10328 %} 10329 ins_pipe(ialu_reg); 10330 %} 10331 10332 10333 //---------- Population Count Instructions ------------------------------------- 10334 10335 instruct popCountI(iRegIsafe dst, iRegI src) %{ 10336 predicate(UsePopCountInstruction); 10337 match(Set dst (PopCountI src)); 10338 10339 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t" 10340 "POPC $dst, $dst" %} 10341 ins_encode %{ 10342 __ srl($src$$Register, G0, $dst$$Register); 10343 __ popc($dst$$Register, $dst$$Register); 10344 %} 10345 ins_pipe(ialu_reg); 10346 %} 10347 10348 // Note: Long.bitCount(long) returns an int. 10349 instruct popCountL(iRegIsafe dst, iRegL src) %{ 10350 predicate(UsePopCountInstruction); 10351 match(Set dst (PopCountL src)); 10352 10353 format %{ "POPC $src, $dst" %} 10354 ins_encode %{ 10355 __ popc($src$$Register, $dst$$Register); 10356 %} 10357 ins_pipe(ialu_reg); 10358 %} 10359 10360 10361 // ============================================================================ 10362 //------------Bytes reverse-------------------------------------------------- 10363 10364 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ 10365 match(Set dst (ReverseBytesI src)); 10366 10367 // Op cost is artificially doubled to make sure that load or store 10368 // instructions are preferred over this one which requires a spill 10369 // onto a stack slot. 10370 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10371 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10372 10373 ins_encode %{ 10374 __ set($src$$disp + STACK_BIAS, O7); 10375 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10376 %} 10377 ins_pipe( iload_mem ); 10378 %} 10379 10380 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ 10381 match(Set dst (ReverseBytesL src)); 10382 10383 // Op cost is artificially doubled to make sure that load or store 10384 // instructions are preferred over this one which requires a spill 10385 // onto a stack slot. 10386 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10387 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10388 10389 ins_encode %{ 10390 __ set($src$$disp + STACK_BIAS, O7); 10391 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10392 %} 10393 ins_pipe( iload_mem ); 10394 %} 10395 10396 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ 10397 match(Set dst (ReverseBytesUS src)); 10398 10399 // Op cost is artificially doubled to make sure that load or store 10400 // instructions are preferred over this one which requires a spill 10401 // onto a stack slot. 10402 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10403 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} 10404 10405 ins_encode %{ 10406 // the value was spilled as an int so bias the load 10407 __ set($src$$disp + STACK_BIAS + 2, O7); 10408 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10409 %} 10410 ins_pipe( iload_mem ); 10411 %} 10412 10413 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ 10414 match(Set dst (ReverseBytesS src)); 10415 10416 // Op cost is artificially doubled to make sure that load or store 10417 // instructions are preferred over this one which requires a spill 10418 // onto a stack slot. 10419 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10420 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} 10421 10422 ins_encode %{ 10423 // the value was spilled as an int so bias the load 10424 __ set($src$$disp + STACK_BIAS + 2, O7); 10425 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10426 %} 10427 ins_pipe( iload_mem ); 10428 %} 10429 10430 // Load Integer reversed byte order 10431 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ 10432 match(Set dst (ReverseBytesI (LoadI src))); 10433 10434 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 10435 size(4); 10436 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10437 10438 ins_encode %{ 10439 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10440 %} 10441 ins_pipe(iload_mem); 10442 %} 10443 10444 // Load Long - aligned and reversed 10445 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ 10446 match(Set dst (ReverseBytesL (LoadL src))); 10447 10448 ins_cost(MEMORY_REF_COST); 10449 size(4); 10450 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10451 10452 ins_encode %{ 10453 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10454 %} 10455 ins_pipe(iload_mem); 10456 %} 10457 10458 // Load unsigned short / char reversed byte order 10459 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ 10460 match(Set dst (ReverseBytesUS (LoadUS src))); 10461 10462 ins_cost(MEMORY_REF_COST); 10463 size(4); 10464 format %{ "LDUHA $src, $dst\t!asi=primary_little" %} 10465 10466 ins_encode %{ 10467 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10468 %} 10469 ins_pipe(iload_mem); 10470 %} 10471 10472 // Load short reversed byte order 10473 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ 10474 match(Set dst (ReverseBytesS (LoadS src))); 10475 10476 ins_cost(MEMORY_REF_COST); 10477 size(4); 10478 format %{ "LDSHA $src, $dst\t!asi=primary_little" %} 10479 10480 ins_encode %{ 10481 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10482 %} 10483 ins_pipe(iload_mem); 10484 %} 10485 10486 // Store Integer reversed byte order 10487 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ 10488 match(Set dst (StoreI dst (ReverseBytesI src))); 10489 10490 ins_cost(MEMORY_REF_COST); 10491 size(4); 10492 format %{ "STWA $src, $dst\t!asi=primary_little" %} 10493 10494 ins_encode %{ 10495 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10496 %} 10497 ins_pipe(istore_mem_reg); 10498 %} 10499 10500 // Store Long reversed byte order 10501 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ 10502 match(Set dst (StoreL dst (ReverseBytesL src))); 10503 10504 ins_cost(MEMORY_REF_COST); 10505 size(4); 10506 format %{ "STXA $src, $dst\t!asi=primary_little" %} 10507 10508 ins_encode %{ 10509 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10510 %} 10511 ins_pipe(istore_mem_reg); 10512 %} 10513 10514 // Store unsighed short/char reversed byte order 10515 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ 10516 match(Set dst (StoreC dst (ReverseBytesUS src))); 10517 10518 ins_cost(MEMORY_REF_COST); 10519 size(4); 10520 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10521 10522 ins_encode %{ 10523 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10524 %} 10525 ins_pipe(istore_mem_reg); 10526 %} 10527 10528 // Store short reversed byte order 10529 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ 10530 match(Set dst (StoreC dst (ReverseBytesS src))); 10531 10532 ins_cost(MEMORY_REF_COST); 10533 size(4); 10534 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10535 10536 ins_encode %{ 10537 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10538 %} 10539 ins_pipe(istore_mem_reg); 10540 %} 10541 10542 // ====================VECTOR INSTRUCTIONS===================================== 10543 10544 // Load Aligned Packed values into a Double Register 10545 instruct loadV8(regD dst, memory mem) %{ 10546 predicate(n->as_LoadVector()->memory_size() == 8); 10547 match(Set dst (LoadVector mem)); 10548 ins_cost(MEMORY_REF_COST); 10549 size(4); 10550 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %} 10551 ins_encode %{ 10552 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg)); 10553 %} 10554 ins_pipe(floadD_mem); 10555 %} 10556 10557 // Store Vector in Double register to memory 10558 instruct storeV8(memory mem, regD src) %{ 10559 predicate(n->as_StoreVector()->memory_size() == 8); 10560 match(Set mem (StoreVector mem src)); 10561 ins_cost(MEMORY_REF_COST); 10562 size(4); 10563 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %} 10564 ins_encode %{ 10565 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address); 10566 %} 10567 ins_pipe(fstoreD_mem_reg); 10568 %} 10569 10570 // Store Zero into vector in memory 10571 instruct storeV8B_zero(memory mem, immI0 zero) %{ 10572 predicate(n->as_StoreVector()->memory_size() == 8); 10573 match(Set mem (StoreVector mem (ReplicateB zero))); 10574 ins_cost(MEMORY_REF_COST); 10575 size(4); 10576 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %} 10577 ins_encode %{ 10578 __ stx(G0, $mem$$Address); 10579 %} 10580 ins_pipe(fstoreD_mem_zero); 10581 %} 10582 10583 instruct storeV4S_zero(memory mem, immI0 zero) %{ 10584 predicate(n->as_StoreVector()->memory_size() == 8); 10585 match(Set mem (StoreVector mem (ReplicateS zero))); 10586 ins_cost(MEMORY_REF_COST); 10587 size(4); 10588 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %} 10589 ins_encode %{ 10590 __ stx(G0, $mem$$Address); 10591 %} 10592 ins_pipe(fstoreD_mem_zero); 10593 %} 10594 10595 instruct storeV2I_zero(memory mem, immI0 zero) %{ 10596 predicate(n->as_StoreVector()->memory_size() == 8); 10597 match(Set mem (StoreVector mem (ReplicateI zero))); 10598 ins_cost(MEMORY_REF_COST); 10599 size(4); 10600 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %} 10601 ins_encode %{ 10602 __ stx(G0, $mem$$Address); 10603 %} 10604 ins_pipe(fstoreD_mem_zero); 10605 %} 10606 10607 instruct storeV2F_zero(memory mem, immF0 zero) %{ 10608 predicate(n->as_StoreVector()->memory_size() == 8); 10609 match(Set mem (StoreVector mem (ReplicateF zero))); 10610 ins_cost(MEMORY_REF_COST); 10611 size(4); 10612 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %} 10613 ins_encode %{ 10614 __ stx(G0, $mem$$Address); 10615 %} 10616 ins_pipe(fstoreD_mem_zero); 10617 %} 10618 10619 // Replicate scalar to packed byte values into Double register 10620 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10621 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3); 10622 match(Set dst (ReplicateB src)); 10623 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10624 format %{ "SLLX $src,56,$tmp\n\t" 10625 "SRLX $tmp, 8,$tmp2\n\t" 10626 "OR $tmp,$tmp2,$tmp\n\t" 10627 "SRLX $tmp,16,$tmp2\n\t" 10628 "OR $tmp,$tmp2,$tmp\n\t" 10629 "SRLX $tmp,32,$tmp2\n\t" 10630 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10631 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10632 ins_encode %{ 10633 Register Rsrc = $src$$Register; 10634 Register Rtmp = $tmp$$Register; 10635 Register Rtmp2 = $tmp2$$Register; 10636 __ sllx(Rsrc, 56, Rtmp); 10637 __ srlx(Rtmp, 8, Rtmp2); 10638 __ or3 (Rtmp, Rtmp2, Rtmp); 10639 __ srlx(Rtmp, 16, Rtmp2); 10640 __ or3 (Rtmp, Rtmp2, Rtmp); 10641 __ srlx(Rtmp, 32, Rtmp2); 10642 __ or3 (Rtmp, Rtmp2, Rtmp); 10643 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10644 %} 10645 ins_pipe(ialu_reg); 10646 %} 10647 10648 // Replicate scalar to packed byte values into Double stack 10649 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10650 predicate(n->as_Vector()->length() == 8 && UseVIS < 3); 10651 match(Set dst (ReplicateB src)); 10652 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10653 format %{ "SLLX $src,56,$tmp\n\t" 10654 "SRLX $tmp, 8,$tmp2\n\t" 10655 "OR $tmp,$tmp2,$tmp\n\t" 10656 "SRLX $tmp,16,$tmp2\n\t" 10657 "OR $tmp,$tmp2,$tmp\n\t" 10658 "SRLX $tmp,32,$tmp2\n\t" 10659 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10660 "STX $tmp,$dst\t! regL to stkD" %} 10661 ins_encode %{ 10662 Register Rsrc = $src$$Register; 10663 Register Rtmp = $tmp$$Register; 10664 Register Rtmp2 = $tmp2$$Register; 10665 __ sllx(Rsrc, 56, Rtmp); 10666 __ srlx(Rtmp, 8, Rtmp2); 10667 __ or3 (Rtmp, Rtmp2, Rtmp); 10668 __ srlx(Rtmp, 16, Rtmp2); 10669 __ or3 (Rtmp, Rtmp2, Rtmp); 10670 __ srlx(Rtmp, 32, Rtmp2); 10671 __ or3 (Rtmp, Rtmp2, Rtmp); 10672 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10673 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10674 %} 10675 ins_pipe(ialu_reg); 10676 %} 10677 10678 // Replicate scalar constant to packed byte values in Double register 10679 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 10680 predicate(n->as_Vector()->length() == 8); 10681 match(Set dst (ReplicateB con)); 10682 effect(KILL tmp); 10683 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 10684 ins_encode %{ 10685 // XXX This is a quick fix for 6833573. 10686 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 10687 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 10688 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10689 %} 10690 ins_pipe(loadConFD); 10691 %} 10692 10693 // Replicate scalar to packed char/short values into Double register 10694 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10695 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3); 10696 match(Set dst (ReplicateS src)); 10697 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10698 format %{ "SLLX $src,48,$tmp\n\t" 10699 "SRLX $tmp,16,$tmp2\n\t" 10700 "OR $tmp,$tmp2,$tmp\n\t" 10701 "SRLX $tmp,32,$tmp2\n\t" 10702 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10703 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10704 ins_encode %{ 10705 Register Rsrc = $src$$Register; 10706 Register Rtmp = $tmp$$Register; 10707 Register Rtmp2 = $tmp2$$Register; 10708 __ sllx(Rsrc, 48, Rtmp); 10709 __ srlx(Rtmp, 16, Rtmp2); 10710 __ or3 (Rtmp, Rtmp2, Rtmp); 10711 __ srlx(Rtmp, 32, Rtmp2); 10712 __ or3 (Rtmp, Rtmp2, Rtmp); 10713 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10714 %} 10715 ins_pipe(ialu_reg); 10716 %} 10717 10718 // Replicate scalar to packed char/short values into Double stack 10719 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10720 predicate(n->as_Vector()->length() == 4 && UseVIS < 3); 10721 match(Set dst (ReplicateS src)); 10722 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10723 format %{ "SLLX $src,48,$tmp\n\t" 10724 "SRLX $tmp,16,$tmp2\n\t" 10725 "OR $tmp,$tmp2,$tmp\n\t" 10726 "SRLX $tmp,32,$tmp2\n\t" 10727 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10728 "STX $tmp,$dst\t! regL to stkD" %} 10729 ins_encode %{ 10730 Register Rsrc = $src$$Register; 10731 Register Rtmp = $tmp$$Register; 10732 Register Rtmp2 = $tmp2$$Register; 10733 __ sllx(Rsrc, 48, Rtmp); 10734 __ srlx(Rtmp, 16, Rtmp2); 10735 __ or3 (Rtmp, Rtmp2, Rtmp); 10736 __ srlx(Rtmp, 32, Rtmp2); 10737 __ or3 (Rtmp, Rtmp2, Rtmp); 10738 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10739 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10740 %} 10741 ins_pipe(ialu_reg); 10742 %} 10743 10744 // Replicate scalar constant to packed char/short values in Double register 10745 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 10746 predicate(n->as_Vector()->length() == 4); 10747 match(Set dst (ReplicateS con)); 10748 effect(KILL tmp); 10749 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 10750 ins_encode %{ 10751 // XXX This is a quick fix for 6833573. 10752 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 10753 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 10754 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10755 %} 10756 ins_pipe(loadConFD); 10757 %} 10758 10759 // Replicate scalar to packed int values into Double register 10760 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10761 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3); 10762 match(Set dst (ReplicateI src)); 10763 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10764 format %{ "SLLX $src,32,$tmp\n\t" 10765 "SRLX $tmp,32,$tmp2\n\t" 10766 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10767 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10768 ins_encode %{ 10769 Register Rsrc = $src$$Register; 10770 Register Rtmp = $tmp$$Register; 10771 Register Rtmp2 = $tmp2$$Register; 10772 __ sllx(Rsrc, 32, Rtmp); 10773 __ srlx(Rtmp, 32, Rtmp2); 10774 __ or3 (Rtmp, Rtmp2, Rtmp); 10775 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10776 %} 10777 ins_pipe(ialu_reg); 10778 %} 10779 10780 // Replicate scalar to packed int values into Double stack 10781 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10782 predicate(n->as_Vector()->length() == 2 && UseVIS < 3); 10783 match(Set dst (ReplicateI src)); 10784 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10785 format %{ "SLLX $src,32,$tmp\n\t" 10786 "SRLX $tmp,32,$tmp2\n\t" 10787 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10788 "STX $tmp,$dst\t! regL to stkD" %} 10789 ins_encode %{ 10790 Register Rsrc = $src$$Register; 10791 Register Rtmp = $tmp$$Register; 10792 Register Rtmp2 = $tmp2$$Register; 10793 __ sllx(Rsrc, 32, Rtmp); 10794 __ srlx(Rtmp, 32, Rtmp2); 10795 __ or3 (Rtmp, Rtmp2, Rtmp); 10796 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10797 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10798 %} 10799 ins_pipe(ialu_reg); 10800 %} 10801 10802 // Replicate scalar zero constant to packed int values in Double register 10803 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 10804 predicate(n->as_Vector()->length() == 2); 10805 match(Set dst (ReplicateI con)); 10806 effect(KILL tmp); 10807 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 10808 ins_encode %{ 10809 // XXX This is a quick fix for 6833573. 10810 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 10811 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 10812 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10813 %} 10814 ins_pipe(loadConFD); 10815 %} 10816 10817 // Replicate scalar to packed float values into Double stack 10818 instruct Repl2F_stk(stackSlotD dst, regF src) %{ 10819 predicate(n->as_Vector()->length() == 2); 10820 match(Set dst (ReplicateF src)); 10821 ins_cost(MEMORY_REF_COST*2); 10822 format %{ "STF $src,$dst.hi\t! packed2F\n\t" 10823 "STF $src,$dst.lo" %} 10824 opcode(Assembler::stf_op3); 10825 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src)); 10826 ins_pipe(fstoreF_stk_reg); 10827 %} 10828 10829 // Replicate scalar zero constant to packed float values in Double register 10830 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{ 10831 predicate(n->as_Vector()->length() == 2); 10832 match(Set dst (ReplicateF con)); 10833 effect(KILL tmp); 10834 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %} 10835 ins_encode %{ 10836 // XXX This is a quick fix for 6833573. 10837 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister); 10838 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register); 10839 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10840 %} 10841 ins_pipe(loadConFD); 10842 %} 10843 10844 //----------PEEPHOLE RULES----------------------------------------------------- 10845 // These must follow all instruction definitions as they use the names 10846 // defined in the instructions definitions. 10847 // 10848 // peepmatch ( root_instr_name [preceding_instruction]* ); 10849 // 10850 // peepconstraint %{ 10851 // (instruction_number.operand_name relational_op instruction_number.operand_name 10852 // [, ...] ); 10853 // // instruction numbers are zero-based using left to right order in peepmatch 10854 // 10855 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 10856 // // provide an instruction_number.operand_name for each operand that appears 10857 // // in the replacement instruction's match rule 10858 // 10859 // ---------VM FLAGS--------------------------------------------------------- 10860 // 10861 // All peephole optimizations can be turned off using -XX:-OptoPeephole 10862 // 10863 // Each peephole rule is given an identifying number starting with zero and 10864 // increasing by one in the order seen by the parser. An individual peephole 10865 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 10866 // on the command-line. 10867 // 10868 // ---------CURRENT LIMITATIONS---------------------------------------------- 10869 // 10870 // Only match adjacent instructions in same basic block 10871 // Only equality constraints 10872 // Only constraints between operands, not (0.dest_reg == EAX_enc) 10873 // Only one replacement instruction 10874 // 10875 // ---------EXAMPLE---------------------------------------------------------- 10876 // 10877 // // pertinent parts of existing instructions in architecture description 10878 // instruct movI(eRegI dst, eRegI src) %{ 10879 // match(Set dst (CopyI src)); 10880 // %} 10881 // 10882 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 10883 // match(Set dst (AddI dst src)); 10884 // effect(KILL cr); 10885 // %} 10886 // 10887 // // Change (inc mov) to lea 10888 // peephole %{ 10889 // // increment preceeded by register-register move 10890 // peepmatch ( incI_eReg movI ); 10891 // // require that the destination register of the increment 10892 // // match the destination register of the move 10893 // peepconstraint ( 0.dst == 1.dst ); 10894 // // construct a replacement instruction that sets 10895 // // the destination to ( move's source register + one ) 10896 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); 10897 // %} 10898 // 10899 10900 // // Change load of spilled value to only a spill 10901 // instruct storeI(memory mem, eRegI src) %{ 10902 // match(Set mem (StoreI mem src)); 10903 // %} 10904 // 10905 // instruct loadI(eRegI dst, memory mem) %{ 10906 // match(Set dst (LoadI mem)); 10907 // %} 10908 // 10909 // peephole %{ 10910 // peepmatch ( loadI storeI ); 10911 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 10912 // peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 10913 // %} 10914 10915 //----------SMARTSPILL RULES--------------------------------------------------- 10916 // These must follow all instruction definitions as they use the names 10917 // defined in the instructions definitions. 10918 // 10919 // SPARC will probably not have any of these rules due to RISC instruction set. 10920 10921 //----------PIPELINE----------------------------------------------------------- 10922 // Rules which define the behavior of the target architectures pipeline.