1 // 2 // Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // SPARC Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 register %{ 32 //----------Architecture Description Register Definitions---------------------- 33 // General Registers 34 // "reg_def" name ( register save type, C convention save type, 35 // ideal register type, encoding, vm name ); 36 // Register Save Types: 37 // 38 // NS = No-Save: The register allocator assumes that these registers 39 // can be used without saving upon entry to the method, & 40 // that they do not need to be saved at call sites. 41 // 42 // SOC = Save-On-Call: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, 44 // but that they must be saved at call sites. 45 // 46 // SOE = Save-On-Entry: The register allocator assumes that these registers 47 // must be saved before using them upon entry to the 48 // method, but they do not need to be saved at call 49 // sites. 50 // 51 // AS = Always-Save: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, & that they must be saved at call sites. 54 // 55 // Ideal Register Type is used to determine how to save & restore a 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 58 // 59 // The encoding number is the actual bit-pattern placed into the opcodes. 60 61 62 // ---------------------------- 63 // Integer/Long Registers 64 // ---------------------------- 65 66 // Need to expose the hi/lo aspect of 64-bit registers 67 // This register set is used for both the 64-bit build and 68 // the 32-bit build with 1-register longs. 69 70 // Global Registers 0-7 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next()); 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg()); 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next()); 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg()); 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next()); 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg()); 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next()); 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg()); 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next()); 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg()); 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next()); 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg()); 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next()); 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg()); 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next()); 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg()); 87 88 // Output Registers 0-7 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next()); 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg()); 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next()); 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg()); 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next()); 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg()); 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next()); 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg()); 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next()); 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg()); 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next()); 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg()); 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next()); 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg()); 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next()); 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg()); 105 106 // Local Registers 0-7 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next()); 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg()); 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next()); 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg()); 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next()); 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg()); 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next()); 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg()); 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next()); 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg()); 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next()); 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg()); 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next()); 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg()); 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next()); 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg()); 123 124 // Input Registers 0-7 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next()); 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg()); 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next()); 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg()); 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next()); 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg()); 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next()); 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg()); 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next()); 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg()); 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next()); 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg()); 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next()); 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg()); 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next()); 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg()); 141 142 // ---------------------------- 143 // Float/Double Registers 144 // ---------------------------- 145 146 // Float Registers 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); 179 180 // Double Registers 181 // The rules of ADL require that double registers be defined in pairs. 182 // Each pair must be two 32-bit values, but not necessarily a pair of 183 // single float registers. In each pair, ADLC-assigned register numbers 184 // must be adjacent, with the lower number even. Finally, when the 185 // CPU stores such a register pair to memory, the word associated with 186 // the lower ADLC-assigned number must be stored to the lower address. 187 188 // These definitions specify the actual bit encodings of the sparc 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 190 // wants 0-63, so we have to convert every time we want to use fp regs 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 192 // 255 is a flag meaning "don't go here". 193 // I believe we can't handle callee-save doubles D32 and up until 194 // the place in the sparc stack crawler that asserts on the 255 is 195 // fixed up. 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg()); 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next()); 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg()); 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next()); 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg()); 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next()); 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg()); 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next()); 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg()); 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next()); 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()); 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next()); 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()); 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next()); 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()); 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next()); 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()); 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next()); 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()); 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next()); 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()); 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next()); 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()); 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next()); 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()); 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next()); 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()); 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next()); 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()); 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next()); 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()); 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next()); 228 229 230 // ---------------------------- 231 // Special Registers 232 // Condition Codes Flag Registers 233 // I tried to break out ICC and XCC but it's not very pretty. 234 // Every Sparc instruction which defs/kills one also kills the other. 235 // Hence every compare instruction which defs one kind of flags ends 236 // up needing a kill of the other. 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 238 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad()); 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad()); 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad()); 243 244 // ---------------------------- 245 // Specify the enum values for the registers. These enums are only used by the 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed 247 // for visibility to the rest of the vm. The order of this enum influences the 248 // register allocator so having the freedom to set this order and not be stuck 249 // with the order that is natural for the rest of the vm is worth it. 250 alloc_class chunk0( 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H, 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H, 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H, 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H); 255 256 // Note that a register is not allocatable unless it is also mentioned 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg. 258 259 alloc_class chunk1( 260 // The first registers listed here are those most likely to be used 261 // as temporaries. We move F0..F7 away from the front of the list, 262 // to reduce the likelihood of interferences with parameters and 263 // return values. Likewise, we avoid using F0/F1 for parameters, 264 // since they are used for return values. 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean. 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23, 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31, 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x, 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x, 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x); 274 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3); 276 277 //----------Architecture Description Register Classes-------------------------- 278 // Several register classes are automatically defined based upon information in 279 // this architecture description. 280 // 1) reg_class inline_cache_reg ( as defined in frame section ) 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // G0 is not included in integer class since it has special meaning. 286 reg_class g0_reg(R_G0); 287 288 // ---------------------------- 289 // Integer Register Classes 290 // ---------------------------- 291 // Exclusions from i_reg: 292 // R_G0: hardwired zero 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java) 294 // R_G6: reserved by Solaris ABI to tools 295 // R_G7: reserved by Solaris ABI to libthread 296 // R_O7: Used as a temp in many encodings 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 298 299 // Class for all integer registers, except the G registers. This is used for 300 // encodings which use G registers as temps. The regular inputs to such 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator 302 // will not put an input into a temp register. 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 304 305 reg_class g1_regI(R_G1); 306 reg_class g3_regI(R_G3); 307 reg_class g4_regI(R_G4); 308 reg_class o0_regI(R_O0); 309 reg_class o7_regI(R_O7); 310 311 // ---------------------------- 312 // Pointer Register Classes 313 // ---------------------------- 314 #ifdef _LP64 315 // 64-bit build means 64-bit pointers means hi/lo pairs 316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 320 // Lock encodings use G3 and G4 internally 321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5, 322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 325 // Special class for storeP instructions, which can store SP or RPC to TLS. 326 // It is also used for memory addressing, allowing direct TLS addressing. 327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP, 329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP ); 331 // R_L7 is the lowest-priority callee-save (i.e., NS) register 332 // We use it to save R_G2 across calls out of Java. 333 reg_class l7_regP(R_L7H,R_L7); 334 335 // Other special pointer regs 336 reg_class g1_regP(R_G1H,R_G1); 337 reg_class g2_regP(R_G2H,R_G2); 338 reg_class g3_regP(R_G3H,R_G3); 339 reg_class g4_regP(R_G4H,R_G4); 340 reg_class g5_regP(R_G5H,R_G5); 341 reg_class i0_regP(R_I0H,R_I0); 342 reg_class o0_regP(R_O0H,R_O0); 343 reg_class o1_regP(R_O1H,R_O1); 344 reg_class o2_regP(R_O2H,R_O2); 345 reg_class o7_regP(R_O7H,R_O7); 346 347 #else // _LP64 348 // 32-bit build means 32-bit pointers means 1 register. 349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5, 350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 353 // Lock encodings use G3 and G4 internally 354 reg_class lock_ptr_reg(R_G1, R_G5, 355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 358 // Special class for storeP instructions, which can store SP or RPC to TLS. 359 // It is also used for memory addressing, allowing direct TLS addressing. 360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5, 361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP, 362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP); 364 // R_L7 is the lowest-priority callee-save (i.e., NS) register 365 // We use it to save R_G2 across calls out of Java. 366 reg_class l7_regP(R_L7); 367 368 // Other special pointer regs 369 reg_class g1_regP(R_G1); 370 reg_class g2_regP(R_G2); 371 reg_class g3_regP(R_G3); 372 reg_class g4_regP(R_G4); 373 reg_class g5_regP(R_G5); 374 reg_class i0_regP(R_I0); 375 reg_class o0_regP(R_O0); 376 reg_class o1_regP(R_O1); 377 reg_class o2_regP(R_O2); 378 reg_class o7_regP(R_O7); 379 #endif // _LP64 380 381 382 // ---------------------------- 383 // Long Register Classes 384 // ---------------------------- 385 // Longs in 1 register. Aligned adjacent hi/lo pairs. 386 // Note: O7 is never in this class; it is sometimes used as an encoding temp. 387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 389 #ifdef _LP64 390 // 64-bit, longs in 1 register: use all 64-bit integer registers 391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's. 392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 394 #endif // _LP64 395 ); 396 397 reg_class g1_regL(R_G1H,R_G1); 398 reg_class g3_regL(R_G3H,R_G3); 399 reg_class o2_regL(R_O2H,R_O2); 400 reg_class o7_regL(R_O7H,R_O7); 401 402 // ---------------------------- 403 // Special Class for Condition Code Flags Register 404 reg_class int_flags(CCR); 405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3); 406 reg_class float_flag0(FCC0); 407 408 409 // ---------------------------- 410 // Float Point Register Classes 411 // ---------------------------- 412 // Skip F30/F31, they are reserved for mem-mem copies 413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 414 415 // Paired floating point registers--they show up in the same order as the floats, 416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29, 419 /* Use extra V9 double registers; this AD file does not support V8 */ 420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x 422 ); 423 424 // Paired floating point registers--they show up in the same order as the floats, 425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 426 // This class is usable for mis-aligned loads as happen in I2C adapters. 427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 429 %} 430 431 //----------DEFINITION BLOCK--------------------------------------------------- 432 // Define name --> value mappings to inform the ADLC of an integer valued name 433 // Current support includes integer values in the range [0, 0x7FFFFFFF] 434 // Format: 435 // int_def <name> ( <int_value>, <expression>); 436 // Generated Code in ad_<arch>.hpp 437 // #define <name> (<expression>) 438 // // value == <int_value> 439 // Generated code in ad_<arch>.cpp adlc_verification() 440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 441 // 442 definitions %{ 443 // The default cost (of an ALU instruction). 444 int_def DEFAULT_COST ( 100, 100); 445 int_def HUGE_COST (1000000, 1000000); 446 447 // Memory refs are twice as expensive as run-of-the-mill. 448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); 449 450 // Branches are even more expensive. 451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3); 452 int_def CALL_COST ( 300, DEFAULT_COST * 3); 453 %} 454 455 456 //----------SOURCE BLOCK------------------------------------------------------- 457 // This is a block of C++ code which provides values, functions, and 458 // definitions necessary in the rest of the architecture description 459 source_hpp %{ 460 // Must be visible to the DFA in dfa_sparc.cpp 461 extern bool can_branch_register( Node *bol, Node *cmp ); 462 463 extern bool use_block_zeroing(Node* count); 464 465 // Macros to extract hi & lo halves from a long pair. 466 // G0 is not part of any long pair, so assert on that. 467 // Prevents accidentally using G1 instead of G0. 468 #define LONG_HI_REG(x) (x) 469 #define LONG_LO_REG(x) (x) 470 471 %} 472 473 source %{ 474 #define __ _masm. 475 476 // tertiary op of a LoadP or StoreP encoding 477 #define REGP_OP true 478 479 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding); 480 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding); 481 static Register reg_to_register_object(int register_encoding); 482 483 // Used by the DFA in dfa_sparc.cpp. 484 // Check for being able to use a V9 branch-on-register. Requires a 485 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign- 486 // extended. Doesn't work following an integer ADD, for example, because of 487 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On 488 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and 489 // replace them with zero, which could become sign-extension in a different OS 490 // release. There's no obvious reason why an interrupt will ever fill these 491 // bits with non-zero junk (the registers are reloaded with standard LD 492 // instructions which either zero-fill or sign-fill). 493 bool can_branch_register( Node *bol, Node *cmp ) { 494 if( !BranchOnRegister ) return false; 495 #ifdef _LP64 496 if( cmp->Opcode() == Op_CmpP ) 497 return true; // No problems with pointer compares 498 #endif 499 if( cmp->Opcode() == Op_CmpL ) 500 return true; // No problems with long compares 501 502 if( !SparcV9RegsHiBitsZero ) return false; 503 if( bol->as_Bool()->_test._test != BoolTest::ne && 504 bol->as_Bool()->_test._test != BoolTest::eq ) 505 return false; 506 507 // Check for comparing against a 'safe' value. Any operation which 508 // clears out the high word is safe. Thus, loads and certain shifts 509 // are safe, as are non-negative constants. Any operation which 510 // preserves zero bits in the high word is safe as long as each of its 511 // inputs are safe. Thus, phis and bitwise booleans are safe if their 512 // inputs are safe. At present, the only important case to recognize 513 // seems to be loads. Constants should fold away, and shifts & 514 // logicals can use the 'cc' forms. 515 Node *x = cmp->in(1); 516 if( x->is_Load() ) return true; 517 if( x->is_Phi() ) { 518 for( uint i = 1; i < x->req(); i++ ) 519 if( !x->in(i)->is_Load() ) 520 return false; 521 return true; 522 } 523 return false; 524 } 525 526 bool use_block_zeroing(Node* count) { 527 // Use BIS for zeroing if count is not constant 528 // or it is >= BlockZeroingLowLimit. 529 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit); 530 } 531 532 // **************************************************************************** 533 534 // REQUIRED FUNCTIONALITY 535 536 // !!!!! Special hack to get all type of calls to specify the byte offset 537 // from the start of the call to the point where the return address 538 // will point. 539 // The "return address" is the address of the call instruction, plus 8. 540 541 int MachCallStaticJavaNode::ret_addr_offset() { 542 int offset = NativeCall::instruction_size; // call; delay slot 543 if (_method_handle_invoke) 544 offset += 4; // restore SP 545 return offset; 546 } 547 548 int MachCallDynamicJavaNode::ret_addr_offset() { 549 int vtable_index = this->_vtable_index; 550 if (vtable_index < 0) { 551 // must be invalid_vtable_index, not nonvirtual_vtable_index 552 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 553 return (NativeMovConstReg::instruction_size + 554 NativeCall::instruction_size); // sethi; setlo; call; delay slot 555 } else { 556 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 557 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 558 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 559 int klass_load_size; 560 if (UseCompressedClassPointers) { 561 assert(Universe::heap() != NULL, "java heap should be initialized"); 562 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 563 } else { 564 klass_load_size = 1*BytesPerInstWord; 565 } 566 if (Assembler::is_simm13(v_off)) { 567 return klass_load_size + 568 (2*BytesPerInstWord + // ld_ptr, ld_ptr 569 NativeCall::instruction_size); // call; delay slot 570 } else { 571 return klass_load_size + 572 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr 573 NativeCall::instruction_size); // call; delay slot 574 } 575 } 576 } 577 578 int MachCallRuntimeNode::ret_addr_offset() { 579 #ifdef _LP64 580 if (MacroAssembler::is_far_target(entry_point())) { 581 return NativeFarCall::instruction_size; 582 } else { 583 return NativeCall::instruction_size; 584 } 585 #else 586 return NativeCall::instruction_size; // call; delay slot 587 #endif 588 } 589 590 // Indicate if the safepoint node needs the polling page as an input. 591 // Since Sparc does not have absolute addressing, it does. 592 bool SafePointNode::needs_polling_address_input() { 593 return true; 594 } 595 596 // emit an interrupt that is caught by the debugger (for debugging compiler) 597 void emit_break(CodeBuffer &cbuf) { 598 MacroAssembler _masm(&cbuf); 599 __ breakpoint_trap(); 600 } 601 602 #ifndef PRODUCT 603 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const { 604 st->print("TA"); 605 } 606 #endif 607 608 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 609 emit_break(cbuf); 610 } 611 612 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 613 return MachNode::size(ra_); 614 } 615 616 // Traceable jump 617 void emit_jmpl(CodeBuffer &cbuf, int jump_target) { 618 MacroAssembler _masm(&cbuf); 619 Register rdest = reg_to_register_object(jump_target); 620 __ JMP(rdest, 0); 621 __ delayed()->nop(); 622 } 623 624 // Traceable jump and set exception pc 625 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) { 626 MacroAssembler _masm(&cbuf); 627 Register rdest = reg_to_register_object(jump_target); 628 __ JMP(rdest, 0); 629 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc ); 630 } 631 632 void emit_nop(CodeBuffer &cbuf) { 633 MacroAssembler _masm(&cbuf); 634 __ nop(); 635 } 636 637 void emit_illtrap(CodeBuffer &cbuf) { 638 MacroAssembler _masm(&cbuf); 639 __ illtrap(0); 640 } 641 642 643 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) { 644 assert(n->rule() != loadUB_rule, ""); 645 646 intptr_t offset = 0; 647 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP 648 const Node* addr = n->get_base_and_disp(offset, adr_type); 649 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP"); 650 assert(addr != NULL && addr != (Node*)-1, "invalid addr"); 651 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 652 atype = atype->add_offset(offset); 653 assert(disp32 == offset, "wrong disp32"); 654 return atype->_offset; 655 } 656 657 658 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) { 659 assert(n->rule() != loadUB_rule, ""); 660 661 intptr_t offset = 0; 662 Node* addr = n->in(2); 663 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 664 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) { 665 Node* a = addr->in(2/*AddPNode::Address*/); 666 Node* o = addr->in(3/*AddPNode::Offset*/); 667 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot; 668 atype = a->bottom_type()->is_ptr()->add_offset(offset); 669 assert(atype->isa_oop_ptr(), "still an oop"); 670 } 671 offset = atype->is_ptr()->_offset; 672 if (offset != Type::OffsetBot) offset += disp32; 673 return offset; 674 } 675 676 static inline jdouble replicate_immI(int con, int count, int width) { 677 // Load a constant replicated "count" times with width "width" 678 assert(count*width == 8 && width <= 4, "sanity"); 679 int bit_width = width * 8; 680 jlong val = con; 681 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 682 for (int i = 0; i < count - 1; i++) { 683 val |= (val << bit_width); 684 } 685 jdouble dval = *((jdouble*) &val); // coerce to double type 686 return dval; 687 } 688 689 static inline jdouble replicate_immF(float con) { 690 // Replicate float con 2 times and pack into vector. 691 int val = *((int*)&con); 692 jlong lval = val; 693 lval = (lval << 32) | (lval & 0xFFFFFFFFl); 694 jdouble dval = *((jdouble*) &lval); // coerce to double type 695 return dval; 696 } 697 698 // Standard Sparc opcode form2 field breakdown 699 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 700 f0 &= (1<<19)-1; // Mask displacement to 19 bits 701 int op = (f30 << 30) | 702 (f29 << 29) | 703 (f25 << 25) | 704 (f22 << 22) | 705 (f20 << 20) | 706 (f19 << 19) | 707 (f0 << 0); 708 cbuf.insts()->emit_int32(op); 709 } 710 711 // Standard Sparc opcode form2 field breakdown 712 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) { 713 f0 >>= 10; // Drop 10 bits 714 f0 &= (1<<22)-1; // Mask displacement to 22 bits 715 int op = (f30 << 30) | 716 (f25 << 25) | 717 (f22 << 22) | 718 (f0 << 0); 719 cbuf.insts()->emit_int32(op); 720 } 721 722 // Standard Sparc opcode form3 field breakdown 723 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) { 724 int op = (f30 << 30) | 725 (f25 << 25) | 726 (f19 << 19) | 727 (f14 << 14) | 728 (f5 << 5) | 729 (f0 << 0); 730 cbuf.insts()->emit_int32(op); 731 } 732 733 // Standard Sparc opcode form3 field breakdown 734 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) { 735 simm13 &= (1<<13)-1; // Mask to 13 bits 736 int op = (f30 << 30) | 737 (f25 << 25) | 738 (f19 << 19) | 739 (f14 << 14) | 740 (1 << 13) | // bit to indicate immediate-mode 741 (simm13<<0); 742 cbuf.insts()->emit_int32(op); 743 } 744 745 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 746 simm10 &= (1<<10)-1; // Mask to 10 bits 747 emit3_simm13(cbuf,f30,f25,f19,f14,simm10); 748 } 749 750 #ifdef ASSERT 751 // Helper function for VerifyOops in emit_form3_mem_reg 752 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) { 753 warning("VerifyOops encountered unexpected instruction:"); 754 n->dump(2); 755 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]); 756 } 757 #endif 758 759 760 void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary, 761 int src1_enc, int disp32, int src2_enc, int dst_enc) { 762 763 #ifdef ASSERT 764 // The following code implements the +VerifyOops feature. 765 // It verifies oop values which are loaded into or stored out of 766 // the current method activation. +VerifyOops complements techniques 767 // like ScavengeALot, because it eagerly inspects oops in transit, 768 // as they enter or leave the stack, as opposed to ScavengeALot, 769 // which inspects oops "at rest", in the stack or heap, at safepoints. 770 // For this reason, +VerifyOops can sometimes detect bugs very close 771 // to their point of creation. It can also serve as a cross-check 772 // on the validity of oop maps, when used toegether with ScavengeALot. 773 774 // It would be good to verify oops at other points, especially 775 // when an oop is used as a base pointer for a load or store. 776 // This is presently difficult, because it is hard to know when 777 // a base address is biased or not. (If we had such information, 778 // it would be easy and useful to make a two-argument version of 779 // verify_oop which unbiases the base, and performs verification.) 780 781 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary"); 782 bool is_verified_oop_base = false; 783 bool is_verified_oop_load = false; 784 bool is_verified_oop_store = false; 785 int tmp_enc = -1; 786 if (VerifyOops && src1_enc != R_SP_enc) { 787 // classify the op, mainly for an assert check 788 int st_op = 0, ld_op = 0; 789 switch (primary) { 790 case Assembler::stb_op3: st_op = Op_StoreB; break; 791 case Assembler::sth_op3: st_op = Op_StoreC; break; 792 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0 793 case Assembler::stw_op3: st_op = Op_StoreI; break; 794 case Assembler::std_op3: st_op = Op_StoreL; break; 795 case Assembler::stf_op3: st_op = Op_StoreF; break; 796 case Assembler::stdf_op3: st_op = Op_StoreD; break; 797 798 case Assembler::ldsb_op3: ld_op = Op_LoadB; break; 799 case Assembler::ldub_op3: ld_op = Op_LoadUB; break; 800 case Assembler::lduh_op3: ld_op = Op_LoadUS; break; 801 case Assembler::ldsh_op3: ld_op = Op_LoadS; break; 802 case Assembler::ldx_op3: // may become LoadP or stay LoadI 803 case Assembler::ldsw_op3: // may become LoadP or stay LoadI 804 case Assembler::lduw_op3: ld_op = Op_LoadI; break; 805 case Assembler::ldd_op3: ld_op = Op_LoadL; break; 806 case Assembler::ldf_op3: ld_op = Op_LoadF; break; 807 case Assembler::lddf_op3: ld_op = Op_LoadD; break; 808 case Assembler::prefetch_op3: ld_op = Op_LoadI; break; 809 810 default: ShouldNotReachHere(); 811 } 812 if (tertiary == REGP_OP) { 813 if (st_op == Op_StoreI) st_op = Op_StoreP; 814 else if (ld_op == Op_LoadI) ld_op = Op_LoadP; 815 else ShouldNotReachHere(); 816 if (st_op) { 817 // a store 818 // inputs are (0:control, 1:memory, 2:address, 3:value) 819 Node* n2 = n->in(3); 820 if (n2 != NULL) { 821 const Type* t = n2->bottom_type(); 822 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 823 } 824 } else { 825 // a load 826 const Type* t = n->bottom_type(); 827 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 828 } 829 } 830 831 if (ld_op) { 832 // a Load 833 // inputs are (0:control, 1:memory, 2:address) 834 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases 835 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && 836 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && 837 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && 838 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) && 839 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) && 840 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) && 841 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) && 842 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) && 843 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) && 844 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && 845 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) && 846 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) && 847 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) && 848 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) && 849 !(n->rule() == loadUB_rule)) { 850 verify_oops_warning(n, n->ideal_Opcode(), ld_op); 851 } 852 } else if (st_op) { 853 // a Store 854 // inputs are (0:control, 1:memory, 2:address, 3:value) 855 if (!(n->ideal_Opcode()==st_op) && // Following are special cases 856 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) && 857 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && 858 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && 859 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && 860 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) && 861 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { 862 verify_oops_warning(n, n->ideal_Opcode(), st_op); 863 } 864 } 865 866 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) { 867 Node* addr = n->in(2); 868 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) { 869 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr? 870 if (atype != NULL) { 871 intptr_t offset = get_offset_from_base(n, atype, disp32); 872 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32); 873 if (offset != offset_2) { 874 get_offset_from_base(n, atype, disp32); 875 get_offset_from_base_2(n, atype, disp32); 876 } 877 assert(offset == offset_2, "different offsets"); 878 if (offset == disp32) { 879 // we now know that src1 is a true oop pointer 880 is_verified_oop_base = true; 881 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) { 882 if( primary == Assembler::ldd_op3 ) { 883 is_verified_oop_base = false; // Cannot 'ldd' into O7 884 } else { 885 tmp_enc = dst_enc; 886 dst_enc = R_O7_enc; // Load into O7; preserve source oop 887 assert(src1_enc != dst_enc, ""); 888 } 889 } 890 } 891 if (st_op && (( offset == oopDesc::klass_offset_in_bytes()) 892 || offset == oopDesc::mark_offset_in_bytes())) { 893 // loading the mark should not be allowed either, but 894 // we don't check this since it conflicts with InlineObjectHash 895 // usage of LoadINode to get the mark. We could keep the 896 // check if we create a new LoadMarkNode 897 // but do not verify the object before its header is initialized 898 ShouldNotReachHere(); 899 } 900 } 901 } 902 } 903 } 904 #endif 905 906 uint instr; 907 instr = (Assembler::ldst_op << 30) 908 | (dst_enc << 25) 909 | (primary << 19) 910 | (src1_enc << 14); 911 912 uint index = src2_enc; 913 int disp = disp32; 914 915 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) 916 disp += STACK_BIAS; 917 918 // We should have a compiler bailout here rather than a guarantee. 919 // Better yet would be some mechanism to handle variable-size matches correctly. 920 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" ); 921 922 if( disp == 0 ) { 923 // use reg-reg form 924 // bit 13 is already zero 925 instr |= index; 926 } else { 927 // use reg-imm form 928 instr |= 0x00002000; // set bit 13 to one 929 instr |= disp & 0x1FFF; 930 } 931 932 cbuf.insts()->emit_int32(instr); 933 934 #ifdef ASSERT 935 { 936 MacroAssembler _masm(&cbuf); 937 if (is_verified_oop_base) { 938 __ verify_oop(reg_to_register_object(src1_enc)); 939 } 940 if (is_verified_oop_store) { 941 __ verify_oop(reg_to_register_object(dst_enc)); 942 } 943 if (tmp_enc != -1) { 944 __ mov(O7, reg_to_register_object(tmp_enc)); 945 } 946 if (is_verified_oop_load) { 947 __ verify_oop(reg_to_register_object(dst_enc)); 948 } 949 } 950 #endif 951 } 952 953 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) { 954 // The method which records debug information at every safepoint 955 // expects the call to be the first instruction in the snippet as 956 // it creates a PcDesc structure which tracks the offset of a call 957 // from the start of the codeBlob. This offset is computed as 958 // code_end() - code_begin() of the code which has been emitted 959 // so far. 960 // In this particular case we have skirted around the problem by 961 // putting the "mov" instruction in the delay slot but the problem 962 // may bite us again at some other point and a cleaner/generic 963 // solution using relocations would be needed. 964 MacroAssembler _masm(&cbuf); 965 __ set_inst_mark(); 966 967 // We flush the current window just so that there is a valid stack copy 968 // the fact that the current window becomes active again instantly is 969 // not a problem there is nothing live in it. 970 971 #ifdef ASSERT 972 int startpos = __ offset(); 973 #endif /* ASSERT */ 974 975 __ call((address)entry_point, rtype); 976 977 if (preserve_g2) __ delayed()->mov(G2, L7); 978 else __ delayed()->nop(); 979 980 if (preserve_g2) __ mov(L7, G2); 981 982 #ifdef ASSERT 983 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { 984 #ifdef _LP64 985 // Trash argument dump slots. 986 __ set(0xb0b8ac0db0b8ac0d, G1); 987 __ mov(G1, G5); 988 __ stx(G1, SP, STACK_BIAS + 0x80); 989 __ stx(G1, SP, STACK_BIAS + 0x88); 990 __ stx(G1, SP, STACK_BIAS + 0x90); 991 __ stx(G1, SP, STACK_BIAS + 0x98); 992 __ stx(G1, SP, STACK_BIAS + 0xA0); 993 __ stx(G1, SP, STACK_BIAS + 0xA8); 994 #else // _LP64 995 // this is also a native call, so smash the first 7 stack locations, 996 // and the various registers 997 998 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset], 999 // while [SP+0x44..0x58] are the argument dump slots. 1000 __ set((intptr_t)0xbaadf00d, G1); 1001 __ mov(G1, G5); 1002 __ sllx(G1, 32, G1); 1003 __ or3(G1, G5, G1); 1004 __ mov(G1, G5); 1005 __ stx(G1, SP, 0x40); 1006 __ stx(G1, SP, 0x48); 1007 __ stx(G1, SP, 0x50); 1008 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot 1009 #endif // _LP64 1010 } 1011 #endif /*ASSERT*/ 1012 } 1013 1014 //============================================================================= 1015 // REQUIRED FUNCTIONALITY for encoding 1016 void emit_lo(CodeBuffer &cbuf, int val) { } 1017 void emit_hi(CodeBuffer &cbuf, int val) { } 1018 1019 1020 //============================================================================= 1021 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask(); 1022 1023 int Compile::ConstantTable::calculate_table_base_offset() const { 1024 if (UseRDPCForConstantTableBase) { 1025 // The table base offset might be less but then it fits into 1026 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit). 1027 return Assembler::min_simm13(); 1028 } else { 1029 int offset = -(size() / 2); 1030 if (!Assembler::is_simm13(offset)) { 1031 offset = Assembler::min_simm13(); 1032 } 1033 return offset; 1034 } 1035 } 1036 1037 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1038 Compile* C = ra_->C; 1039 Compile::ConstantTable& constant_table = C->constant_table(); 1040 MacroAssembler _masm(&cbuf); 1041 1042 Register r = as_Register(ra_->get_encode(this)); 1043 CodeSection* consts_section = __ code()->consts(); 1044 int consts_size = consts_section->align_at_start(consts_section->size()); 1045 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size)); 1046 1047 if (UseRDPCForConstantTableBase) { 1048 // For the following RDPC logic to work correctly the consts 1049 // section must be allocated right before the insts section. This 1050 // assert checks for that. The layout and the SECT_* constants 1051 // are defined in src/share/vm/asm/codeBuffer.hpp. 1052 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 1053 int insts_offset = __ offset(); 1054 1055 // Layout: 1056 // 1057 // |----------- consts section ------------|----------- insts section -----------... 1058 // |------ constant table -----|- padding -|------------------x---- 1059 // \ current PC (RDPC instruction) 1060 // |<------------- consts_size ----------->|<- insts_offset ->| 1061 // \ table base 1062 // The table base offset is later added to the load displacement 1063 // so it has to be negative. 1064 int table_base_offset = -(consts_size + insts_offset); 1065 int disp; 1066 1067 // If the displacement from the current PC to the constant table 1068 // base fits into simm13 we set the constant table base to the 1069 // current PC. 1070 if (Assembler::is_simm13(table_base_offset)) { 1071 constant_table.set_table_base_offset(table_base_offset); 1072 disp = 0; 1073 } else { 1074 // Otherwise we set the constant table base offset to the 1075 // maximum negative displacement of load instructions to keep 1076 // the disp as small as possible: 1077 // 1078 // |<------------- consts_size ----------->|<- insts_offset ->| 1079 // |<--------- min_simm13 --------->|<-------- disp --------->| 1080 // \ table base 1081 table_base_offset = Assembler::min_simm13(); 1082 constant_table.set_table_base_offset(table_base_offset); 1083 disp = (consts_size + insts_offset) + table_base_offset; 1084 } 1085 1086 __ rdpc(r); 1087 1088 if (disp != 0) { 1089 assert(r != O7, "need temporary"); 1090 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 1091 } 1092 } 1093 else { 1094 // Materialize the constant table base. 1095 address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); 1096 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 1097 AddressLiteral base(baseaddr, rspec); 1098 __ set(base, r); 1099 } 1100 } 1101 1102 uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 1103 if (UseRDPCForConstantTableBase) { 1104 // This is really the worst case but generally it's only 1 instruction. 1105 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord; 1106 } else { 1107 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord; 1108 } 1109 } 1110 1111 #ifndef PRODUCT 1112 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1113 char reg[128]; 1114 ra_->dump_register(this, reg); 1115 if (UseRDPCForConstantTableBase) { 1116 st->print("RDPC %s\t! constant table base", reg); 1117 } else { 1118 st->print("SET &constanttable,%s\t! constant table base", reg); 1119 } 1120 } 1121 #endif 1122 1123 1124 //============================================================================= 1125 1126 #ifndef PRODUCT 1127 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1128 Compile* C = ra_->C; 1129 1130 for (int i = 0; i < OptoPrologueNops; i++) { 1131 st->print_cr("NOP"); st->print("\t"); 1132 } 1133 1134 if( VerifyThread ) { 1135 st->print_cr("Verify_Thread"); st->print("\t"); 1136 } 1137 1138 size_t framesize = C->frame_slots() << LogBytesPerInt; 1139 1140 // Calls to C2R adapters often do not accept exceptional returns. 1141 // We require that their callers must bang for them. But be careful, because 1142 // some VM calls (such as call site linkage) can use several kilobytes of 1143 // stack. But the stack safety zone should account for that. 1144 // See bugs 4446381, 4468289, 4497237. 1145 if (C->need_stack_bang(framesize)) { 1146 st->print_cr("! stack bang"); st->print("\t"); 1147 } 1148 1149 if (Assembler::is_simm13(-framesize)) { 1150 st->print ("SAVE R_SP,-%d,R_SP",framesize); 1151 } else { 1152 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t"); 1153 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t"); 1154 st->print ("SAVE R_SP,R_G3,R_SP"); 1155 } 1156 1157 } 1158 #endif 1159 1160 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1161 Compile* C = ra_->C; 1162 MacroAssembler _masm(&cbuf); 1163 1164 for (int i = 0; i < OptoPrologueNops; i++) { 1165 __ nop(); 1166 } 1167 1168 __ verify_thread(); 1169 1170 size_t framesize = C->frame_slots() << LogBytesPerInt; 1171 assert(framesize >= 16*wordSize, "must have room for reg. save area"); 1172 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1173 1174 // Calls to C2R adapters often do not accept exceptional returns. 1175 // We require that their callers must bang for them. But be careful, because 1176 // some VM calls (such as call site linkage) can use several kilobytes of 1177 // stack. But the stack safety zone should account for that. 1178 // See bugs 4446381, 4468289, 4497237. 1179 if (C->need_stack_bang(framesize)) { 1180 __ generate_stack_overflow_check(framesize); 1181 } 1182 1183 if (Assembler::is_simm13(-framesize)) { 1184 __ save(SP, -framesize, SP); 1185 } else { 1186 __ sethi(-framesize & ~0x3ff, G3); 1187 __ add(G3, -framesize & 0x3ff, G3); 1188 __ save(SP, G3, SP); 1189 } 1190 C->set_frame_complete( __ offset() ); 1191 1192 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) { 1193 // NOTE: We set the table base offset here because users might be 1194 // emitted before MachConstantBaseNode. 1195 Compile::ConstantTable& constant_table = C->constant_table(); 1196 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 1197 } 1198 } 1199 1200 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1201 return MachNode::size(ra_); 1202 } 1203 1204 int MachPrologNode::reloc() const { 1205 return 10; // a large enough number 1206 } 1207 1208 //============================================================================= 1209 #ifndef PRODUCT 1210 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1211 Compile* C = ra_->C; 1212 1213 if( do_polling() && ra_->C->is_method_compilation() ) { 1214 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); 1215 #ifdef _LP64 1216 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); 1217 #else 1218 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t"); 1219 #endif 1220 } 1221 1222 if( do_polling() ) 1223 st->print("RET\n\t"); 1224 1225 st->print("RESTORE"); 1226 } 1227 #endif 1228 1229 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1230 MacroAssembler _masm(&cbuf); 1231 Compile* C = ra_->C; 1232 1233 __ verify_thread(); 1234 1235 // If this does safepoint polling, then do it here 1236 if( do_polling() && ra_->C->is_method_compilation() ) { 1237 AddressLiteral polling_page(os::get_polling_page()); 1238 __ sethi(polling_page, L0); 1239 __ relocate(relocInfo::poll_return_type); 1240 __ ld_ptr( L0, 0, G0 ); 1241 } 1242 1243 // If this is a return, then stuff the restore in the delay slot 1244 if( do_polling() ) { 1245 __ ret(); 1246 __ delayed()->restore(); 1247 } else { 1248 __ restore(); 1249 } 1250 } 1251 1252 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1253 return MachNode::size(ra_); 1254 } 1255 1256 int MachEpilogNode::reloc() const { 1257 return 16; // a large enough number 1258 } 1259 1260 const Pipeline * MachEpilogNode::pipeline() const { 1261 return MachNode::pipeline_class(); 1262 } 1263 1264 int MachEpilogNode::safepoint_offset() const { 1265 assert( do_polling(), "no return for this epilog node"); 1266 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; 1267 } 1268 1269 //============================================================================= 1270 1271 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack 1272 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1273 static enum RC rc_class( OptoReg::Name reg ) { 1274 if( !OptoReg::is_valid(reg) ) return rc_bad; 1275 if (OptoReg::is_stack(reg)) return rc_stack; 1276 VMReg r = OptoReg::as_VMReg(reg); 1277 if (r->is_Register()) return rc_int; 1278 assert(r->is_FloatRegister(), "must be"); 1279 return rc_float; 1280 } 1281 1282 static int impl_helper( const MachNode *mach, CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) { 1283 if( cbuf ) { 1284 // Better yet would be some mechanism to handle variable-size matches correctly 1285 if (!Assembler::is_simm13(offset + STACK_BIAS)) { 1286 ra_->C->record_method_not_compilable("unable to handle large constant offsets"); 1287 } else { 1288 emit_form3_mem_reg(*cbuf, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]); 1289 } 1290 } 1291 #ifndef PRODUCT 1292 else if( !do_size ) { 1293 if( size != 0 ) st->print("\n\t"); 1294 if( is_load ) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg)); 1295 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset); 1296 } 1297 #endif 1298 return size+4; 1299 } 1300 1301 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) { 1302 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] ); 1303 #ifndef PRODUCT 1304 else if( !do_size ) { 1305 if( size != 0 ) st->print("\n\t"); 1306 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst)); 1307 } 1308 #endif 1309 return size+4; 1310 } 1311 1312 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, 1313 PhaseRegAlloc *ra_, 1314 bool do_size, 1315 outputStream* st ) const { 1316 // Get registers to move 1317 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1318 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1319 OptoReg::Name dst_second = ra_->get_reg_second(this ); 1320 OptoReg::Name dst_first = ra_->get_reg_first(this ); 1321 1322 enum RC src_second_rc = rc_class(src_second); 1323 enum RC src_first_rc = rc_class(src_first); 1324 enum RC dst_second_rc = rc_class(dst_second); 1325 enum RC dst_first_rc = rc_class(dst_first); 1326 1327 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" ); 1328 1329 // Generate spill code! 1330 int size = 0; 1331 1332 if( src_first == dst_first && src_second == dst_second ) 1333 return size; // Self copy, no move 1334 1335 // -------------------------------------- 1336 // Check for mem-mem move. Load into unused float registers and fall into 1337 // the float-store case. 1338 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { 1339 int offset = ra_->reg2offset(src_first); 1340 // Further check for aligned-adjacent pair, so we can use a double load 1341 if( (src_first&1)==0 && src_first+1 == src_second ) { 1342 src_second = OptoReg::Name(R_F31_num); 1343 src_second_rc = rc_float; 1344 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st); 1345 } else { 1346 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st); 1347 } 1348 src_first = OptoReg::Name(R_F30_num); 1349 src_first_rc = rc_float; 1350 } 1351 1352 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { 1353 int offset = ra_->reg2offset(src_second); 1354 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st); 1355 src_second = OptoReg::Name(R_F31_num); 1356 src_second_rc = rc_float; 1357 } 1358 1359 // -------------------------------------- 1360 // Check for float->int copy; requires a trip through memory 1361 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) { 1362 int offset = frame::register_save_words*wordSize; 1363 if (cbuf) { 1364 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 ); 1365 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1366 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1367 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 ); 1368 } 1369 #ifndef PRODUCT 1370 else if (!do_size) { 1371 if (size != 0) st->print("\n\t"); 1372 st->print( "SUB R_SP,16,R_SP\n"); 1373 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1374 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1375 st->print("\tADD R_SP,16,R_SP\n"); 1376 } 1377 #endif 1378 size += 16; 1379 } 1380 1381 // Check for float->int copy on T4 1382 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) { 1383 // Further check for aligned-adjacent pair, so we can use a double move 1384 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1385 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st); 1386 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st); 1387 } 1388 // Check for int->float copy on T4 1389 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) { 1390 // Further check for aligned-adjacent pair, so we can use a double move 1391 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1392 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st); 1393 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st); 1394 } 1395 1396 // -------------------------------------- 1397 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. 1398 // In such cases, I have to do the big-endian swap. For aligned targets, the 1399 // hardware does the flop for me. Doubles are always aligned, so no problem 1400 // there. Misaligned sources only come from native-long-returns (handled 1401 // special below). 1402 #ifndef _LP64 1403 if( src_first_rc == rc_int && // source is already big-endian 1404 src_second_rc != rc_bad && // 64-bit move 1405 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst 1406 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" ); 1407 // Do the big-endian flop. 1408 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ; 1409 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc; 1410 } 1411 #endif 1412 1413 // -------------------------------------- 1414 // Check for integer reg-reg copy 1415 if( src_first_rc == rc_int && dst_first_rc == rc_int ) { 1416 #ifndef _LP64 1417 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case 1418 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1419 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1420 // operand contains the least significant word of the 64-bit value and vice versa. 1421 OptoReg::Name tmp = OptoReg::Name(R_O7_num); 1422 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" ); 1423 // Shift O0 left in-place, zero-extend O1, then OR them into the dst 1424 if( cbuf ) { 1425 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 ); 1426 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 ); 1427 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] ); 1428 #ifndef PRODUCT 1429 } else if( !do_size ) { 1430 if( size != 0 ) st->print("\n\t"); 1431 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp)); 1432 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second)); 1433 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first)); 1434 #endif 1435 } 1436 return size+12; 1437 } 1438 else if( dst_first == R_I0_num && dst_second == R_I1_num ) { 1439 // returning a long value in I0/I1 1440 // a SpillCopy must be able to target a return instruction's reg_class 1441 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1442 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1443 // operand contains the least significant word of the 64-bit value and vice versa. 1444 OptoReg::Name tdest = dst_first; 1445 1446 if (src_first == dst_first) { 1447 tdest = OptoReg::Name(R_O7_num); 1448 size += 4; 1449 } 1450 1451 if( cbuf ) { 1452 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg"); 1453 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1 1454 // ShrL_reg_imm6 1455 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 ); 1456 // ShrR_reg_imm6 src, 0, dst 1457 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 ); 1458 if (tdest != dst_first) { 1459 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] ); 1460 } 1461 } 1462 #ifndef PRODUCT 1463 else if( !do_size ) { 1464 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!! 1465 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest)); 1466 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second)); 1467 if (tdest != dst_first) { 1468 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first)); 1469 } 1470 } 1471 #endif // PRODUCT 1472 return size+8; 1473 } 1474 #endif // !_LP64 1475 // Else normal reg-reg copy 1476 assert( src_second != dst_first, "smashed second before evacuating it" ); 1477 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st); 1478 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" ); 1479 // This moves an aligned adjacent pair. 1480 // See if we are done. 1481 if( src_first+1 == src_second && dst_first+1 == dst_second ) 1482 return size; 1483 } 1484 1485 // Check for integer store 1486 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) { 1487 int offset = ra_->reg2offset(dst_first); 1488 // Further check for aligned-adjacent pair, so we can use a double store 1489 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1490 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st); 1491 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st); 1492 } 1493 1494 // Check for integer load 1495 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) { 1496 int offset = ra_->reg2offset(src_first); 1497 // Further check for aligned-adjacent pair, so we can use a double load 1498 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1499 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st); 1500 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1501 } 1502 1503 // Check for float reg-reg copy 1504 if( src_first_rc == rc_float && dst_first_rc == rc_float ) { 1505 // Further check for aligned-adjacent pair, so we can use a double move 1506 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1507 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st); 1508 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st); 1509 } 1510 1511 // Check for float store 1512 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) { 1513 int offset = ra_->reg2offset(dst_first); 1514 // Further check for aligned-adjacent pair, so we can use a double store 1515 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1516 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st); 1517 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1518 } 1519 1520 // Check for float load 1521 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) { 1522 int offset = ra_->reg2offset(src_first); 1523 // Further check for aligned-adjacent pair, so we can use a double load 1524 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1525 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st); 1526 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st); 1527 } 1528 1529 // -------------------------------------------------------------------- 1530 // Check for hi bits still needing moving. Only happens for misaligned 1531 // arguments to native calls. 1532 if( src_second == dst_second ) 1533 return size; // Self copy; no move 1534 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" ); 1535 1536 #ifndef _LP64 1537 // In the LP64 build, all registers can be moved as aligned/adjacent 1538 // pairs, so there's never any need to move the high bits separately. 1539 // The 32-bit builds have to deal with the 32-bit ABI which can force 1540 // all sorts of silly alignment problems. 1541 1542 // Check for integer reg-reg copy. Hi bits are stuck up in the top 1543 // 32-bits of a 64-bit register, but are needed in low bits of another 1544 // register (else it's a hi-bits-to-hi-bits copy which should have 1545 // happened already as part of a 64-bit move) 1546 if( src_second_rc == rc_int && dst_second_rc == rc_int ) { 1547 assert( (src_second&1)==1, "its the evil O0/O1 native return case" ); 1548 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" ); 1549 // Shift src_second down to dst_second's low bits. 1550 if( cbuf ) { 1551 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1552 #ifndef PRODUCT 1553 } else if( !do_size ) { 1554 if( size != 0 ) st->print("\n\t"); 1555 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second)); 1556 #endif 1557 } 1558 return size+4; 1559 } 1560 1561 // Check for high word integer store. Must down-shift the hi bits 1562 // into a temp register, then fall into the case of storing int bits. 1563 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) { 1564 // Shift src_second down to dst_second's low bits. 1565 if( cbuf ) { 1566 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1567 #ifndef PRODUCT 1568 } else if( !do_size ) { 1569 if( size != 0 ) st->print("\n\t"); 1570 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num)); 1571 #endif 1572 } 1573 size+=4; 1574 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num! 1575 } 1576 1577 // Check for high word integer load 1578 if( dst_second_rc == rc_int && src_second_rc == rc_stack ) 1579 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st); 1580 1581 // Check for high word integer store 1582 if( src_second_rc == rc_int && dst_second_rc == rc_stack ) 1583 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st); 1584 1585 // Check for high word float store 1586 if( src_second_rc == rc_float && dst_second_rc == rc_stack ) 1587 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st); 1588 1589 #endif // !_LP64 1590 1591 Unimplemented(); 1592 } 1593 1594 #ifndef PRODUCT 1595 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1596 implementation( NULL, ra_, false, st ); 1597 } 1598 #endif 1599 1600 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1601 implementation( &cbuf, ra_, false, NULL ); 1602 } 1603 1604 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1605 return implementation( NULL, ra_, true, NULL ); 1606 } 1607 1608 //============================================================================= 1609 #ifndef PRODUCT 1610 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const { 1611 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); 1612 } 1613 #endif 1614 1615 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const { 1616 MacroAssembler _masm(&cbuf); 1617 for(int i = 0; i < _count; i += 1) { 1618 __ nop(); 1619 } 1620 } 1621 1622 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 1623 return 4 * _count; 1624 } 1625 1626 1627 //============================================================================= 1628 #ifndef PRODUCT 1629 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1630 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1631 int reg = ra_->get_reg_first(this); 1632 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]); 1633 } 1634 #endif 1635 1636 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1637 MacroAssembler _masm(&cbuf); 1638 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS; 1639 int reg = ra_->get_encode(this); 1640 1641 if (Assembler::is_simm13(offset)) { 1642 __ add(SP, offset, reg_to_register_object(reg)); 1643 } else { 1644 __ set(offset, O7); 1645 __ add(SP, O7, reg_to_register_object(reg)); 1646 } 1647 } 1648 1649 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 1650 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_) 1651 assert(ra_ == ra_->C->regalloc(), "sanity"); 1652 return ra_->C->scratch_emit_size(this); 1653 } 1654 1655 //============================================================================= 1656 #ifndef PRODUCT 1657 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1658 st->print_cr("\nUEP:"); 1659 #ifdef _LP64 1660 if (UseCompressedClassPointers) { 1661 assert(Universe::heap() != NULL, "java heap should be initialized"); 1662 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 1663 if (Universe::narrow_klass_base() != 0) { 1664 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); 1665 if (Universe::narrow_klass_shift() != 0) { 1666 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1667 } 1668 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 1669 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); 1670 } else { 1671 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1672 } 1673 } else { 1674 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1675 } 1676 st->print_cr("\tCMP R_G5,R_G3" ); 1677 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1678 #else // _LP64 1679 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1680 st->print_cr("\tCMP R_G5,R_G3" ); 1681 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1682 #endif // _LP64 1683 } 1684 #endif 1685 1686 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1687 MacroAssembler _masm(&cbuf); 1688 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 1689 Register temp_reg = G3; 1690 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 1691 1692 // Load klass from receiver 1693 __ load_klass(O0, temp_reg); 1694 // Compare against expected klass 1695 __ cmp(temp_reg, G5_ic_reg); 1696 // Branch to miss code, checks xcc or icc depending 1697 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2); 1698 } 1699 1700 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 1701 return MachNode::size(ra_); 1702 } 1703 1704 1705 //============================================================================= 1706 1707 uint size_exception_handler() { 1708 if (TraceJumps) { 1709 return (400); // just a guess 1710 } 1711 return ( NativeJump::instruction_size ); // sethi;jmp;nop 1712 } 1713 1714 uint size_deopt_handler() { 1715 if (TraceJumps) { 1716 return (400); // just a guess 1717 } 1718 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore 1719 } 1720 1721 // Emit exception handler code. 1722 int emit_exception_handler(CodeBuffer& cbuf) { 1723 Register temp_reg = G3; 1724 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 1725 MacroAssembler _masm(&cbuf); 1726 1727 address base = 1728 __ start_a_stub(size_exception_handler()); 1729 if (base == NULL) return 0; // CodeBuffer::expand failed 1730 1731 int offset = __ offset(); 1732 1733 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp 1734 __ delayed()->nop(); 1735 1736 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1737 1738 __ end_a_stub(); 1739 1740 return offset; 1741 } 1742 1743 int emit_deopt_handler(CodeBuffer& cbuf) { 1744 // Can't use any of the current frame's registers as we may have deopted 1745 // at a poll and everything (including G3) can be live. 1746 Register temp_reg = L0; 1747 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 1748 MacroAssembler _masm(&cbuf); 1749 1750 address base = 1751 __ start_a_stub(size_deopt_handler()); 1752 if (base == NULL) return 0; // CodeBuffer::expand failed 1753 1754 int offset = __ offset(); 1755 __ save_frame(0); 1756 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp 1757 __ delayed()->restore(); 1758 1759 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1760 1761 __ end_a_stub(); 1762 return offset; 1763 1764 } 1765 1766 // Given a register encoding, produce a Integer Register object 1767 static Register reg_to_register_object(int register_encoding) { 1768 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding"); 1769 return as_Register(register_encoding); 1770 } 1771 1772 // Given a register encoding, produce a single-precision Float Register object 1773 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) { 1774 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding"); 1775 return as_SingleFloatRegister(register_encoding); 1776 } 1777 1778 // Given a register encoding, produce a double-precision Float Register object 1779 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) { 1780 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding"); 1781 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding"); 1782 return as_DoubleFloatRegister(register_encoding); 1783 } 1784 1785 const bool Matcher::match_rule_supported(int opcode) { 1786 if (!has_match_rule(opcode)) 1787 return false; 1788 1789 switch (opcode) { 1790 case Op_CountLeadingZerosI: 1791 case Op_CountLeadingZerosL: 1792 case Op_CountTrailingZerosI: 1793 case Op_CountTrailingZerosL: 1794 case Op_PopCountI: 1795 case Op_PopCountL: 1796 if (!UsePopCountInstruction) 1797 return false; 1798 case Op_CompareAndSwapL: 1799 #ifdef _LP64 1800 case Op_CompareAndSwapP: 1801 #endif 1802 if (!VM_Version::supports_cx8()) 1803 return false; 1804 break; 1805 } 1806 1807 return true; // Per default match rules are supported. 1808 } 1809 1810 int Matcher::regnum_to_fpu_offset(int regnum) { 1811 return regnum - 32; // The FP registers are in the second chunk 1812 } 1813 1814 #ifdef ASSERT 1815 address last_rethrow = NULL; // debugging aid for Rethrow encoding 1816 #endif 1817 1818 // Vector width in bytes 1819 const int Matcher::vector_width_in_bytes(BasicType bt) { 1820 assert(MaxVectorSize == 8, ""); 1821 return 8; 1822 } 1823 1824 // Vector ideal reg 1825 const int Matcher::vector_ideal_reg(int size) { 1826 assert(MaxVectorSize == 8, ""); 1827 return Op_RegD; 1828 } 1829 1830 const int Matcher::vector_shift_count_ideal_reg(int size) { 1831 fatal("vector shift is not supported"); 1832 return Node::NotAMachineReg; 1833 } 1834 1835 // Limits on vector size (number of elements) loaded into vector. 1836 const int Matcher::max_vector_size(const BasicType bt) { 1837 assert(is_java_primitive(bt), "only primitive type vectors"); 1838 return vector_width_in_bytes(bt)/type2aelembytes(bt); 1839 } 1840 1841 const int Matcher::min_vector_size(const BasicType bt) { 1842 return max_vector_size(bt); // Same as max. 1843 } 1844 1845 // SPARC doesn't support misaligned vectors store/load. 1846 const bool Matcher::misaligned_vectors_ok() { 1847 return false; 1848 } 1849 1850 // USII supports fxtof through the whole range of number, USIII doesn't 1851 const bool Matcher::convL2FSupported(void) { 1852 return VM_Version::has_fast_fxtof(); 1853 } 1854 1855 // Is this branch offset short enough that a short branch can be used? 1856 // 1857 // NOTE: If the platform does not provide any short branch variants, then 1858 // this method should return false for offset 0. 1859 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1860 // The passed offset is relative to address of the branch. 1861 // Don't need to adjust the offset. 1862 return UseCBCond && Assembler::is_simm12(offset); 1863 } 1864 1865 const bool Matcher::isSimpleConstant64(jlong value) { 1866 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1867 // Depends on optimizations in MacroAssembler::setx. 1868 int hi = (int)(value >> 32); 1869 int lo = (int)(value & ~0); 1870 return (hi == 0) || (hi == -1) || (lo == 0); 1871 } 1872 1873 // No scaling for the parameter the ClearArray node. 1874 const bool Matcher::init_array_count_is_in_bytes = true; 1875 1876 // Threshold size for cleararray. 1877 const int Matcher::init_array_short_size = 8 * BytesPerLong; 1878 1879 // No additional cost for CMOVL. 1880 const int Matcher::long_cmove_cost() { return 0; } 1881 1882 // CMOVF/CMOVD are expensive on T4 and on SPARC64. 1883 const int Matcher::float_cmove_cost() { 1884 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0; 1885 } 1886 1887 // Should the Matcher clone shifts on addressing modes, expecting them to 1888 // be subsumed into complex addressing expressions or compute them into 1889 // registers? True for Intel but false for most RISCs 1890 const bool Matcher::clone_shift_expressions = false; 1891 1892 // Do we need to mask the count passed to shift instructions or does 1893 // the cpu only look at the lower 5/6 bits anyway? 1894 const bool Matcher::need_masked_shift_count = false; 1895 1896 bool Matcher::narrow_oop_use_complex_address() { 1897 NOT_LP64(ShouldNotCallThis()); 1898 assert(UseCompressedOops, "only for compressed oops code"); 1899 return false; 1900 } 1901 1902 bool Matcher::narrow_klass_use_complex_address() { 1903 NOT_LP64(ShouldNotCallThis()); 1904 assert(UseCompressedClassPointers, "only for compressed klass code"); 1905 return false; 1906 } 1907 1908 // Is it better to copy float constants, or load them directly from memory? 1909 // Intel can load a float constant from a direct address, requiring no 1910 // extra registers. Most RISCs will have to materialize an address into a 1911 // register first, so they would do better to copy the constant from stack. 1912 const bool Matcher::rematerialize_float_constants = false; 1913 1914 // If CPU can load and store mis-aligned doubles directly then no fixup is 1915 // needed. Else we split the double into 2 integer pieces and move it 1916 // piece-by-piece. Only happens when passing doubles into C code as the 1917 // Java calling convention forces doubles to be aligned. 1918 #ifdef _LP64 1919 const bool Matcher::misaligned_doubles_ok = true; 1920 #else 1921 const bool Matcher::misaligned_doubles_ok = false; 1922 #endif 1923 1924 // No-op on SPARC. 1925 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 1926 } 1927 1928 // Advertise here if the CPU requires explicit rounding operations 1929 // to implement the UseStrictFP mode. 1930 const bool Matcher::strict_fp_requires_explicit_rounding = false; 1931 1932 // Are floats conerted to double when stored to stack during deoptimization? 1933 // Sparc does not handle callee-save floats. 1934 bool Matcher::float_in_double() { return false; } 1935 1936 // Do ints take an entire long register or just half? 1937 // Note that we if-def off of _LP64. 1938 // The relevant question is how the int is callee-saved. In _LP64 1939 // the whole long is written but de-opt'ing will have to extract 1940 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written. 1941 #ifdef _LP64 1942 const bool Matcher::int_in_long = true; 1943 #else 1944 const bool Matcher::int_in_long = false; 1945 #endif 1946 1947 // Return whether or not this register is ever used as an argument. This 1948 // function is used on startup to build the trampoline stubs in generateOptoStub. 1949 // Registers not mentioned will be killed by the VM call in the trampoline, and 1950 // arguments in those registers not be available to the callee. 1951 bool Matcher::can_be_java_arg( int reg ) { 1952 // Standard sparc 6 args in registers 1953 if( reg == R_I0_num || 1954 reg == R_I1_num || 1955 reg == R_I2_num || 1956 reg == R_I3_num || 1957 reg == R_I4_num || 1958 reg == R_I5_num ) return true; 1959 #ifdef _LP64 1960 // 64-bit builds can pass 64-bit pointers and longs in 1961 // the high I registers 1962 if( reg == R_I0H_num || 1963 reg == R_I1H_num || 1964 reg == R_I2H_num || 1965 reg == R_I3H_num || 1966 reg == R_I4H_num || 1967 reg == R_I5H_num ) return true; 1968 1969 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) { 1970 return true; 1971 } 1972 1973 #else 1974 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4. 1975 // Longs cannot be passed in O regs, because O regs become I regs 1976 // after a 'save' and I regs get their high bits chopped off on 1977 // interrupt. 1978 if( reg == R_G1H_num || reg == R_G1_num ) return true; 1979 if( reg == R_G4H_num || reg == R_G4_num ) return true; 1980 #endif 1981 // A few float args in registers 1982 if( reg >= R_F0_num && reg <= R_F7_num ) return true; 1983 1984 return false; 1985 } 1986 1987 bool Matcher::is_spillable_arg( int reg ) { 1988 return can_be_java_arg(reg); 1989 } 1990 1991 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 1992 // Use hardware SDIVX instruction when it is 1993 // faster than a code which use multiply. 1994 return VM_Version::has_fast_idiv(); 1995 } 1996 1997 // Register for DIVI projection of divmodI 1998 RegMask Matcher::divI_proj_mask() { 1999 ShouldNotReachHere(); 2000 return RegMask(); 2001 } 2002 2003 // Register for MODI projection of divmodI 2004 RegMask Matcher::modI_proj_mask() { 2005 ShouldNotReachHere(); 2006 return RegMask(); 2007 } 2008 2009 // Register for DIVL projection of divmodL 2010 RegMask Matcher::divL_proj_mask() { 2011 ShouldNotReachHere(); 2012 return RegMask(); 2013 } 2014 2015 // Register for MODL projection of divmodL 2016 RegMask Matcher::modL_proj_mask() { 2017 ShouldNotReachHere(); 2018 return RegMask(); 2019 } 2020 2021 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2022 return L7_REGP_mask(); 2023 } 2024 2025 const RegMask Matcher::mathExactI_result_proj_mask() { 2026 return G1_REGI_mask(); 2027 } 2028 2029 const RegMask Matcher::mathExactL_result_proj_mask() { 2030 return G1_REGL_mask(); 2031 } 2032 2033 const RegMask Matcher::mathExactI_flags_proj_mask() { 2034 return INT_FLAGS_mask(); 2035 } 2036 2037 2038 %} 2039 2040 2041 // The intptr_t operand types, defined by textual substitution. 2042 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) 2043 #ifdef _LP64 2044 #define immX immL 2045 #define immX13 immL13 2046 #define immX13m7 immL13m7 2047 #define iRegX iRegL 2048 #define g1RegX g1RegL 2049 #else 2050 #define immX immI 2051 #define immX13 immI13 2052 #define immX13m7 immI13m7 2053 #define iRegX iRegI 2054 #define g1RegX g1RegI 2055 #endif 2056 2057 //----------ENCODING BLOCK----------------------------------------------------- 2058 // This block specifies the encoding classes used by the compiler to output 2059 // byte streams. Encoding classes are parameterized macros used by 2060 // Machine Instruction Nodes in order to generate the bit encoding of the 2061 // instruction. Operands specify their base encoding interface with the 2062 // interface keyword. There are currently supported four interfaces, 2063 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 2064 // operand to generate a function which returns its register number when 2065 // queried. CONST_INTER causes an operand to generate a function which 2066 // returns the value of the constant when queried. MEMORY_INTER causes an 2067 // operand to generate four functions which return the Base Register, the 2068 // Index Register, the Scale Value, and the Offset Value of the operand when 2069 // queried. COND_INTER causes an operand to generate six functions which 2070 // return the encoding code (ie - encoding bits for the instruction) 2071 // associated with each basic boolean condition for a conditional instruction. 2072 // 2073 // Instructions specify two basic values for encoding. Again, a function 2074 // is available to check if the constant displacement is an oop. They use the 2075 // ins_encode keyword to specify their encoding classes (which must be 2076 // a sequence of enc_class names, and their parameters, specified in 2077 // the encoding block), and they use the 2078 // opcode keyword to specify, in order, their primary, secondary, and 2079 // tertiary opcode. Only the opcode sections which a particular instruction 2080 // needs for encoding need to be specified. 2081 encode %{ 2082 enc_class enc_untested %{ 2083 #ifdef ASSERT 2084 MacroAssembler _masm(&cbuf); 2085 __ untested("encoding"); 2086 #endif 2087 %} 2088 2089 enc_class form3_mem_reg( memory mem, iRegI dst ) %{ 2090 emit_form3_mem_reg(cbuf, this, $primary, $tertiary, 2091 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2092 %} 2093 2094 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{ 2095 emit_form3_mem_reg(cbuf, this, $primary, -1, 2096 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2097 %} 2098 2099 enc_class form3_mem_prefetch_read( memory mem ) %{ 2100 emit_form3_mem_reg(cbuf, this, $primary, -1, 2101 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); 2102 %} 2103 2104 enc_class form3_mem_prefetch_write( memory mem ) %{ 2105 emit_form3_mem_reg(cbuf, this, $primary, -1, 2106 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/); 2107 %} 2108 2109 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{ 2110 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2111 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2112 guarantee($mem$$index == R_G0_enc, "double index?"); 2113 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc ); 2114 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg ); 2115 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 ); 2116 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc ); 2117 %} 2118 2119 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{ 2120 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2121 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2122 guarantee($mem$$index == R_G0_enc, "double index?"); 2123 // Load long with 2 instructions 2124 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 ); 2125 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 ); 2126 %} 2127 2128 //%%% form3_mem_plus_4_reg is a hack--get rid of it 2129 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{ 2130 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4"); 2131 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg); 2132 %} 2133 2134 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{ 2135 // Encode a reg-reg copy. If it is useless, then empty encoding. 2136 if( $rs2$$reg != $rd$$reg ) 2137 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg ); 2138 %} 2139 2140 // Target lo half of long 2141 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{ 2142 // Encode a reg-reg copy. If it is useless, then empty encoding. 2143 if( $rs2$$reg != LONG_LO_REG($rd$$reg) ) 2144 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg ); 2145 %} 2146 2147 // Source lo half of long 2148 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{ 2149 // Encode a reg-reg copy. If it is useless, then empty encoding. 2150 if( LONG_LO_REG($rs2$$reg) != $rd$$reg ) 2151 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) ); 2152 %} 2153 2154 // Target hi half of long 2155 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{ 2156 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 ); 2157 %} 2158 2159 // Source lo half of long, and leave it sign extended. 2160 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{ 2161 // Sign extend low half 2162 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 ); 2163 %} 2164 2165 // Source hi half of long, and leave it sign extended. 2166 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{ 2167 // Shift high half to low half 2168 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 ); 2169 %} 2170 2171 // Source hi half of long 2172 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{ 2173 // Encode a reg-reg copy. If it is useless, then empty encoding. 2174 if( LONG_HI_REG($rs2$$reg) != $rd$$reg ) 2175 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) ); 2176 %} 2177 2178 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{ 2179 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg ); 2180 %} 2181 2182 enc_class enc_to_bool( iRegI src, iRegI dst ) %{ 2183 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg ); 2184 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 ); 2185 %} 2186 2187 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{ 2188 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg ); 2189 // clear if nothing else is happening 2190 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 ); 2191 // blt,a,pn done 2192 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 ); 2193 // mov dst,-1 in delay slot 2194 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2195 %} 2196 2197 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{ 2198 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F ); 2199 %} 2200 2201 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{ 2202 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 ); 2203 %} 2204 2205 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{ 2206 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg ); 2207 %} 2208 2209 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{ 2210 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant ); 2211 %} 2212 2213 enc_class move_return_pc_to_o1() %{ 2214 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); 2215 %} 2216 2217 #ifdef _LP64 2218 /* %%% merge with enc_to_bool */ 2219 enc_class enc_convP2B( iRegI dst, iRegP src ) %{ 2220 MacroAssembler _masm(&cbuf); 2221 2222 Register src_reg = reg_to_register_object($src$$reg); 2223 Register dst_reg = reg_to_register_object($dst$$reg); 2224 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg); 2225 %} 2226 #endif 2227 2228 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ 2229 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) 2230 MacroAssembler _masm(&cbuf); 2231 2232 Register p_reg = reg_to_register_object($p$$reg); 2233 Register q_reg = reg_to_register_object($q$$reg); 2234 Register y_reg = reg_to_register_object($y$$reg); 2235 Register tmp_reg = reg_to_register_object($tmp$$reg); 2236 2237 __ subcc( p_reg, q_reg, p_reg ); 2238 __ add ( p_reg, y_reg, tmp_reg ); 2239 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg ); 2240 %} 2241 2242 enc_class form_d2i_helper(regD src, regF dst) %{ 2243 // fcmp %fcc0,$src,$src 2244 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2245 // branch %fcc0 not-nan, predict taken 2246 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2247 // fdtoi $src,$dst 2248 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg ); 2249 // fitos $dst,$dst (if nan) 2250 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2251 // clear $dst (if nan) 2252 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2253 // carry on here... 2254 %} 2255 2256 enc_class form_d2l_helper(regD src, regD dst) %{ 2257 // fcmp %fcc0,$src,$src check for NAN 2258 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2259 // branch %fcc0 not-nan, predict taken 2260 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2261 // fdtox $src,$dst convert in delay slot 2262 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg ); 2263 // fxtod $dst,$dst (if nan) 2264 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2265 // clear $dst (if nan) 2266 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2267 // carry on here... 2268 %} 2269 2270 enc_class form_f2i_helper(regF src, regF dst) %{ 2271 // fcmps %fcc0,$src,$src 2272 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2273 // branch %fcc0 not-nan, predict taken 2274 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2275 // fstoi $src,$dst 2276 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg ); 2277 // fitos $dst,$dst (if nan) 2278 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2279 // clear $dst (if nan) 2280 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2281 // carry on here... 2282 %} 2283 2284 enc_class form_f2l_helper(regF src, regD dst) %{ 2285 // fcmps %fcc0,$src,$src 2286 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2287 // branch %fcc0 not-nan, predict taken 2288 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2289 // fstox $src,$dst 2290 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg ); 2291 // fxtod $dst,$dst (if nan) 2292 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2293 // clear $dst (if nan) 2294 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2295 // carry on here... 2296 %} 2297 2298 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2299 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2300 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2301 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2302 2303 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %} 2304 2305 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2306 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %} 2307 2308 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{ 2309 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2310 %} 2311 2312 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{ 2313 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2314 %} 2315 2316 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{ 2317 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2318 %} 2319 2320 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{ 2321 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2322 %} 2323 2324 enc_class form3_convI2F(regF rs2, regF rd) %{ 2325 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg); 2326 %} 2327 2328 // Encloding class for traceable jumps 2329 enc_class form_jmpl(g3RegP dest) %{ 2330 emit_jmpl(cbuf, $dest$$reg); 2331 %} 2332 2333 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{ 2334 emit_jmpl_set_exception_pc(cbuf, $dest$$reg); 2335 %} 2336 2337 enc_class form2_nop() %{ 2338 emit_nop(cbuf); 2339 %} 2340 2341 enc_class form2_illtrap() %{ 2342 emit_illtrap(cbuf); 2343 %} 2344 2345 2346 // Compare longs and convert into -1, 0, 1. 2347 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{ 2348 // CMP $src1,$src2 2349 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg ); 2350 // blt,a,pn done 2351 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 ); 2352 // mov dst,-1 in delay slot 2353 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2354 // bgt,a,pn done 2355 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 ); 2356 // mov dst,1 in delay slot 2357 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 ); 2358 // CLR $dst 2359 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 ); 2360 %} 2361 2362 enc_class enc_PartialSubtypeCheck() %{ 2363 MacroAssembler _masm(&cbuf); 2364 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type); 2365 __ delayed()->nop(); 2366 %} 2367 2368 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{ 2369 MacroAssembler _masm(&cbuf); 2370 Label* L = $labl$$label; 2371 Assembler::Predict predict_taken = 2372 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2373 2374 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 2375 __ delayed()->nop(); 2376 %} 2377 2378 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{ 2379 MacroAssembler _masm(&cbuf); 2380 Label* L = $labl$$label; 2381 Assembler::Predict predict_taken = 2382 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2383 2384 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L); 2385 __ delayed()->nop(); 2386 %} 2387 2388 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{ 2389 int op = (Assembler::arith_op << 30) | 2390 ($dst$$reg << 25) | 2391 (Assembler::movcc_op3 << 19) | 2392 (1 << 18) | // cc2 bit for 'icc' 2393 ($cmp$$cmpcode << 14) | 2394 (0 << 13) | // select register move 2395 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 2396 ($src$$reg << 0); 2397 cbuf.insts()->emit_int32(op); 2398 %} 2399 2400 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 2401 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2402 int op = (Assembler::arith_op << 30) | 2403 ($dst$$reg << 25) | 2404 (Assembler::movcc_op3 << 19) | 2405 (1 << 18) | // cc2 bit for 'icc' 2406 ($cmp$$cmpcode << 14) | 2407 (1 << 13) | // select immediate move 2408 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 2409 (simm11 << 0); 2410 cbuf.insts()->emit_int32(op); 2411 %} 2412 2413 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 2414 int op = (Assembler::arith_op << 30) | 2415 ($dst$$reg << 25) | 2416 (Assembler::movcc_op3 << 19) | 2417 (0 << 18) | // cc2 bit for 'fccX' 2418 ($cmp$$cmpcode << 14) | 2419 (0 << 13) | // select register move 2420 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2421 ($src$$reg << 0); 2422 cbuf.insts()->emit_int32(op); 2423 %} 2424 2425 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 2426 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2427 int op = (Assembler::arith_op << 30) | 2428 ($dst$$reg << 25) | 2429 (Assembler::movcc_op3 << 19) | 2430 (0 << 18) | // cc2 bit for 'fccX' 2431 ($cmp$$cmpcode << 14) | 2432 (1 << 13) | // select immediate move 2433 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2434 (simm11 << 0); 2435 cbuf.insts()->emit_int32(op); 2436 %} 2437 2438 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 2439 int op = (Assembler::arith_op << 30) | 2440 ($dst$$reg << 25) | 2441 (Assembler::fpop2_op3 << 19) | 2442 (0 << 18) | 2443 ($cmp$$cmpcode << 14) | 2444 (1 << 13) | // select register move 2445 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 2446 ($primary << 5) | // select single, double or quad 2447 ($src$$reg << 0); 2448 cbuf.insts()->emit_int32(op); 2449 %} 2450 2451 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 2452 int op = (Assembler::arith_op << 30) | 2453 ($dst$$reg << 25) | 2454 (Assembler::fpop2_op3 << 19) | 2455 (0 << 18) | 2456 ($cmp$$cmpcode << 14) | 2457 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 2458 ($primary << 5) | // select single, double or quad 2459 ($src$$reg << 0); 2460 cbuf.insts()->emit_int32(op); 2461 %} 2462 2463 // Used by the MIN/MAX encodings. Same as a CMOV, but 2464 // the condition comes from opcode-field instead of an argument. 2465 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{ 2466 int op = (Assembler::arith_op << 30) | 2467 ($dst$$reg << 25) | 2468 (Assembler::movcc_op3 << 19) | 2469 (1 << 18) | // cc2 bit for 'icc' 2470 ($primary << 14) | 2471 (0 << 13) | // select register move 2472 (0 << 11) | // cc1, cc0 bits for 'icc' 2473 ($src$$reg << 0); 2474 cbuf.insts()->emit_int32(op); 2475 %} 2476 2477 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 2478 int op = (Assembler::arith_op << 30) | 2479 ($dst$$reg << 25) | 2480 (Assembler::movcc_op3 << 19) | 2481 (6 << 16) | // cc2 bit for 'xcc' 2482 ($primary << 14) | 2483 (0 << 13) | // select register move 2484 (0 << 11) | // cc1, cc0 bits for 'icc' 2485 ($src$$reg << 0); 2486 cbuf.insts()->emit_int32(op); 2487 %} 2488 2489 enc_class Set13( immI13 src, iRegI rd ) %{ 2490 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 2491 %} 2492 2493 enc_class SetHi22( immI src, iRegI rd ) %{ 2494 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant ); 2495 %} 2496 2497 enc_class Set32( immI src, iRegI rd ) %{ 2498 MacroAssembler _masm(&cbuf); 2499 __ set($src$$constant, reg_to_register_object($rd$$reg)); 2500 %} 2501 2502 enc_class call_epilog %{ 2503 if( VerifyStackAtCalls ) { 2504 MacroAssembler _masm(&cbuf); 2505 int framesize = ra_->C->frame_slots() << LogBytesPerInt; 2506 Register temp_reg = G3; 2507 __ add(SP, framesize, temp_reg); 2508 __ cmp(temp_reg, FP); 2509 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc); 2510 } 2511 %} 2512 2513 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value 2514 // to G1 so the register allocator will not have to deal with the misaligned register 2515 // pair. 2516 enc_class adjust_long_from_native_call %{ 2517 #ifndef _LP64 2518 if (returns_long()) { 2519 // sllx O0,32,O0 2520 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 ); 2521 // srl O1,0,O1 2522 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 ); 2523 // or O0,O1,G1 2524 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc ); 2525 } 2526 #endif 2527 %} 2528 2529 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime 2530 // CALL directly to the runtime 2531 // The user of this is responsible for ensuring that R_L7 is empty (killed). 2532 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, 2533 /*preserve_g2=*/true); 2534 %} 2535 2536 enc_class preserve_SP %{ 2537 MacroAssembler _masm(&cbuf); 2538 __ mov(SP, L7_mh_SP_save); 2539 %} 2540 2541 enc_class restore_SP %{ 2542 MacroAssembler _masm(&cbuf); 2543 __ mov(L7_mh_SP_save, SP); 2544 %} 2545 2546 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 2547 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2548 // who we intended to call. 2549 if (!_method) { 2550 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); 2551 } else if (_optimized_virtual) { 2552 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); 2553 } else { 2554 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); 2555 } 2556 if (_method) { // Emit stub for static call. 2557 CompiledStaticCall::emit_to_interp_stub(cbuf); 2558 } 2559 %} 2560 2561 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL 2562 MacroAssembler _masm(&cbuf); 2563 __ set_inst_mark(); 2564 int vtable_index = this->_vtable_index; 2565 // MachCallDynamicJavaNode::ret_addr_offset uses this same test 2566 if (vtable_index < 0) { 2567 // must be invalid_vtable_index, not nonvirtual_vtable_index 2568 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 2569 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2570 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); 2571 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); 2572 __ ic_call((address)$meth$$method); 2573 } else { 2574 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 2575 // Just go thru the vtable 2576 // get receiver klass (receiver already checked for non-null) 2577 // If we end up going thru a c2i adapter interpreter expects method in G5 2578 int off = __ offset(); 2579 __ load_klass(O0, G3_scratch); 2580 int klass_load_size; 2581 if (UseCompressedClassPointers) { 2582 assert(Universe::heap() != NULL, "java heap should be initialized"); 2583 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 2584 } else { 2585 klass_load_size = 1*BytesPerInstWord; 2586 } 2587 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 2588 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 2589 if (Assembler::is_simm13(v_off)) { 2590 __ ld_ptr(G3, v_off, G5_method); 2591 } else { 2592 // Generate 2 instructions 2593 __ Assembler::sethi(v_off & ~0x3ff, G5_method); 2594 __ or3(G5_method, v_off & 0x3ff, G5_method); 2595 // ld_ptr, set_hi, set 2596 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord, 2597 "Unexpected instruction size(s)"); 2598 __ ld_ptr(G3, G5_method, G5_method); 2599 } 2600 // NOTE: for vtable dispatches, the vtable entry will never be null. 2601 // However it may very well end up in handle_wrong_method if the 2602 // method is abstract for the particular class. 2603 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 2604 // jump to target (either compiled code or c2iadapter) 2605 __ jmpl(G3_scratch, G0, O7); 2606 __ delayed()->nop(); 2607 } 2608 %} 2609 2610 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL 2611 MacroAssembler _masm(&cbuf); 2612 2613 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2614 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because 2615 // we might be calling a C2I adapter which needs it. 2616 2617 assert(temp_reg != G5_ic_reg, "conflicting registers"); 2618 // Load nmethod 2619 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg); 2620 2621 // CALL to compiled java, indirect the contents of G3 2622 __ set_inst_mark(); 2623 __ callr(temp_reg, G0); 2624 __ delayed()->nop(); 2625 %} 2626 2627 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{ 2628 MacroAssembler _masm(&cbuf); 2629 Register Rdividend = reg_to_register_object($src1$$reg); 2630 Register Rdivisor = reg_to_register_object($src2$$reg); 2631 Register Rresult = reg_to_register_object($dst$$reg); 2632 2633 __ sra(Rdivisor, 0, Rdivisor); 2634 __ sra(Rdividend, 0, Rdividend); 2635 __ sdivx(Rdividend, Rdivisor, Rresult); 2636 %} 2637 2638 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{ 2639 MacroAssembler _masm(&cbuf); 2640 2641 Register Rdividend = reg_to_register_object($src1$$reg); 2642 int divisor = $imm$$constant; 2643 Register Rresult = reg_to_register_object($dst$$reg); 2644 2645 __ sra(Rdividend, 0, Rdividend); 2646 __ sdivx(Rdividend, divisor, Rresult); 2647 %} 2648 2649 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{ 2650 MacroAssembler _masm(&cbuf); 2651 Register Rsrc1 = reg_to_register_object($src1$$reg); 2652 Register Rsrc2 = reg_to_register_object($src2$$reg); 2653 Register Rdst = reg_to_register_object($dst$$reg); 2654 2655 __ sra( Rsrc1, 0, Rsrc1 ); 2656 __ sra( Rsrc2, 0, Rsrc2 ); 2657 __ mulx( Rsrc1, Rsrc2, Rdst ); 2658 __ srlx( Rdst, 32, Rdst ); 2659 %} 2660 2661 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{ 2662 MacroAssembler _masm(&cbuf); 2663 Register Rdividend = reg_to_register_object($src1$$reg); 2664 Register Rdivisor = reg_to_register_object($src2$$reg); 2665 Register Rresult = reg_to_register_object($dst$$reg); 2666 Register Rscratch = reg_to_register_object($scratch$$reg); 2667 2668 assert(Rdividend != Rscratch, ""); 2669 assert(Rdivisor != Rscratch, ""); 2670 2671 __ sra(Rdividend, 0, Rdividend); 2672 __ sra(Rdivisor, 0, Rdivisor); 2673 __ sdivx(Rdividend, Rdivisor, Rscratch); 2674 __ mulx(Rscratch, Rdivisor, Rscratch); 2675 __ sub(Rdividend, Rscratch, Rresult); 2676 %} 2677 2678 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{ 2679 MacroAssembler _masm(&cbuf); 2680 2681 Register Rdividend = reg_to_register_object($src1$$reg); 2682 int divisor = $imm$$constant; 2683 Register Rresult = reg_to_register_object($dst$$reg); 2684 Register Rscratch = reg_to_register_object($scratch$$reg); 2685 2686 assert(Rdividend != Rscratch, ""); 2687 2688 __ sra(Rdividend, 0, Rdividend); 2689 __ sdivx(Rdividend, divisor, Rscratch); 2690 __ mulx(Rscratch, divisor, Rscratch); 2691 __ sub(Rdividend, Rscratch, Rresult); 2692 %} 2693 2694 enc_class fabss (sflt_reg dst, sflt_reg src) %{ 2695 MacroAssembler _masm(&cbuf); 2696 2697 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2698 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2699 2700 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst); 2701 %} 2702 2703 enc_class fabsd (dflt_reg dst, dflt_reg src) %{ 2704 MacroAssembler _masm(&cbuf); 2705 2706 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2707 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2708 2709 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst); 2710 %} 2711 2712 enc_class fnegd (dflt_reg dst, dflt_reg src) %{ 2713 MacroAssembler _masm(&cbuf); 2714 2715 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2716 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2717 2718 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst); 2719 %} 2720 2721 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{ 2722 MacroAssembler _masm(&cbuf); 2723 2724 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2725 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2726 2727 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst); 2728 %} 2729 2730 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{ 2731 MacroAssembler _masm(&cbuf); 2732 2733 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2734 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2735 2736 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst); 2737 %} 2738 2739 enc_class fmovs (dflt_reg dst, dflt_reg src) %{ 2740 MacroAssembler _masm(&cbuf); 2741 2742 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2743 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2744 2745 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst); 2746 %} 2747 2748 enc_class fmovd (dflt_reg dst, dflt_reg src) %{ 2749 MacroAssembler _masm(&cbuf); 2750 2751 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2752 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2753 2754 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst); 2755 %} 2756 2757 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2758 MacroAssembler _masm(&cbuf); 2759 2760 Register Roop = reg_to_register_object($oop$$reg); 2761 Register Rbox = reg_to_register_object($box$$reg); 2762 Register Rscratch = reg_to_register_object($scratch$$reg); 2763 Register Rmark = reg_to_register_object($scratch2$$reg); 2764 2765 assert(Roop != Rscratch, ""); 2766 assert(Roop != Rmark, ""); 2767 assert(Rbox != Rscratch, ""); 2768 assert(Rbox != Rmark, ""); 2769 2770 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining); 2771 %} 2772 2773 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2774 MacroAssembler _masm(&cbuf); 2775 2776 Register Roop = reg_to_register_object($oop$$reg); 2777 Register Rbox = reg_to_register_object($box$$reg); 2778 Register Rscratch = reg_to_register_object($scratch$$reg); 2779 Register Rmark = reg_to_register_object($scratch2$$reg); 2780 2781 assert(Roop != Rscratch, ""); 2782 assert(Roop != Rmark, ""); 2783 assert(Rbox != Rscratch, ""); 2784 assert(Rbox != Rmark, ""); 2785 2786 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining); 2787 %} 2788 2789 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{ 2790 MacroAssembler _masm(&cbuf); 2791 Register Rmem = reg_to_register_object($mem$$reg); 2792 Register Rold = reg_to_register_object($old$$reg); 2793 Register Rnew = reg_to_register_object($new$$reg); 2794 2795 __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 2796 __ cmp( Rold, Rnew ); 2797 %} 2798 2799 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{ 2800 Register Rmem = reg_to_register_object($mem$$reg); 2801 Register Rold = reg_to_register_object($old$$reg); 2802 Register Rnew = reg_to_register_object($new$$reg); 2803 2804 MacroAssembler _masm(&cbuf); 2805 __ mov(Rnew, O7); 2806 __ casx(Rmem, Rold, O7); 2807 __ cmp( Rold, O7 ); 2808 %} 2809 2810 // raw int cas, used for compareAndSwap 2811 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{ 2812 Register Rmem = reg_to_register_object($mem$$reg); 2813 Register Rold = reg_to_register_object($old$$reg); 2814 Register Rnew = reg_to_register_object($new$$reg); 2815 2816 MacroAssembler _masm(&cbuf); 2817 __ mov(Rnew, O7); 2818 __ cas(Rmem, Rold, O7); 2819 __ cmp( Rold, O7 ); 2820 %} 2821 2822 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{ 2823 Register Rres = reg_to_register_object($res$$reg); 2824 2825 MacroAssembler _masm(&cbuf); 2826 __ mov(1, Rres); 2827 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres ); 2828 %} 2829 2830 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{ 2831 Register Rres = reg_to_register_object($res$$reg); 2832 2833 MacroAssembler _masm(&cbuf); 2834 __ mov(1, Rres); 2835 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres ); 2836 %} 2837 2838 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{ 2839 MacroAssembler _masm(&cbuf); 2840 Register Rdst = reg_to_register_object($dst$$reg); 2841 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg) 2842 : reg_to_DoubleFloatRegister_object($src1$$reg); 2843 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg) 2844 : reg_to_DoubleFloatRegister_object($src2$$reg); 2845 2846 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1) 2847 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 2848 %} 2849 2850 2851 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{ 2852 Label Ldone, Lloop; 2853 MacroAssembler _masm(&cbuf); 2854 2855 Register str1_reg = reg_to_register_object($str1$$reg); 2856 Register str2_reg = reg_to_register_object($str2$$reg); 2857 Register cnt1_reg = reg_to_register_object($cnt1$$reg); 2858 Register cnt2_reg = reg_to_register_object($cnt2$$reg); 2859 Register result_reg = reg_to_register_object($result$$reg); 2860 2861 assert(result_reg != str1_reg && 2862 result_reg != str2_reg && 2863 result_reg != cnt1_reg && 2864 result_reg != cnt2_reg , 2865 "need different registers"); 2866 2867 // Compute the minimum of the string lengths(str1_reg) and the 2868 // difference of the string lengths (stack) 2869 2870 // See if the lengths are different, and calculate min in str1_reg. 2871 // Stash diff in O7 in case we need it for a tie-breaker. 2872 Label Lskip; 2873 __ subcc(cnt1_reg, cnt2_reg, O7); 2874 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2875 __ br(Assembler::greater, true, Assembler::pt, Lskip); 2876 // cnt2 is shorter, so use its count: 2877 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2878 __ bind(Lskip); 2879 2880 // reallocate cnt1_reg, cnt2_reg, result_reg 2881 // Note: limit_reg holds the string length pre-scaled by 2 2882 Register limit_reg = cnt1_reg; 2883 Register chr2_reg = cnt2_reg; 2884 Register chr1_reg = result_reg; 2885 // str{12} are the base pointers 2886 2887 // Is the minimum length zero? 2888 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity 2889 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2890 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2891 2892 // Load first characters 2893 __ lduh(str1_reg, 0, chr1_reg); 2894 __ lduh(str2_reg, 0, chr2_reg); 2895 2896 // Compare first characters 2897 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2898 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2899 assert(chr1_reg == result_reg, "result must be pre-placed"); 2900 __ delayed()->nop(); 2901 2902 { 2903 // Check after comparing first character to see if strings are equivalent 2904 Label LSkip2; 2905 // Check if the strings start at same location 2906 __ cmp(str1_reg, str2_reg); 2907 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2); 2908 __ delayed()->nop(); 2909 2910 // Check if the length difference is zero (in O7) 2911 __ cmp(G0, O7); 2912 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2913 __ delayed()->mov(G0, result_reg); // result is zero 2914 2915 // Strings might not be equal 2916 __ bind(LSkip2); 2917 } 2918 2919 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); 2920 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2921 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2922 2923 // Shift str1_reg and str2_reg to the end of the arrays, negate limit 2924 __ add(str1_reg, limit_reg, str1_reg); 2925 __ add(str2_reg, limit_reg, str2_reg); 2926 __ neg(chr1_reg, limit_reg); // limit = -(limit-2) 2927 2928 // Compare the rest of the characters 2929 __ lduh(str1_reg, limit_reg, chr1_reg); 2930 __ bind(Lloop); 2931 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2932 __ lduh(str2_reg, limit_reg, chr2_reg); 2933 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2934 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2935 assert(chr1_reg == result_reg, "result must be pre-placed"); 2936 __ delayed()->inccc(limit_reg, sizeof(jchar)); 2937 // annul LDUH if branch is not taken to prevent access past end of string 2938 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 2939 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2940 2941 // If strings are equal up to min length, return the length difference. 2942 __ mov(O7, result_reg); 2943 2944 // Otherwise, return the difference between the first mismatched chars. 2945 __ bind(Ldone); 2946 %} 2947 2948 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{ 2949 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone; 2950 MacroAssembler _masm(&cbuf); 2951 2952 Register str1_reg = reg_to_register_object($str1$$reg); 2953 Register str2_reg = reg_to_register_object($str2$$reg); 2954 Register cnt_reg = reg_to_register_object($cnt$$reg); 2955 Register tmp1_reg = O7; 2956 Register result_reg = reg_to_register_object($result$$reg); 2957 2958 assert(result_reg != str1_reg && 2959 result_reg != str2_reg && 2960 result_reg != cnt_reg && 2961 result_reg != tmp1_reg , 2962 "need different registers"); 2963 2964 __ cmp(str1_reg, str2_reg); //same char[] ? 2965 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 2966 __ delayed()->add(G0, 1, result_reg); 2967 2968 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn); 2969 __ delayed()->add(G0, 1, result_reg); // count == 0 2970 2971 //rename registers 2972 Register limit_reg = cnt_reg; 2973 Register chr1_reg = result_reg; 2974 Register chr2_reg = tmp1_reg; 2975 2976 //check for alignment and position the pointers to the ends 2977 __ or3(str1_reg, str2_reg, chr1_reg); 2978 __ andcc(chr1_reg, 0x3, chr1_reg); 2979 // notZero means at least one not 4-byte aligned. 2980 // We could optimize the case when both arrays are not aligned 2981 // but it is not frequent case and it requires additional checks. 2982 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare 2983 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count 2984 2985 // Compare char[] arrays aligned to 4 bytes. 2986 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg, 2987 chr1_reg, chr2_reg, Ldone); 2988 __ ba(Ldone); 2989 __ delayed()->add(G0, 1, result_reg); 2990 2991 // char by char compare 2992 __ bind(Lchar); 2993 __ add(str1_reg, limit_reg, str1_reg); 2994 __ add(str2_reg, limit_reg, str2_reg); 2995 __ neg(limit_reg); //negate count 2996 2997 __ lduh(str1_reg, limit_reg, chr1_reg); 2998 // Lchar_loop 2999 __ bind(Lchar_loop); 3000 __ lduh(str2_reg, limit_reg, chr2_reg); 3001 __ cmp(chr1_reg, chr2_reg); 3002 __ br(Assembler::notEqual, true, Assembler::pt, Ldone); 3003 __ delayed()->mov(G0, result_reg); //not equal 3004 __ inccc(limit_reg, sizeof(jchar)); 3005 // annul LDUH if branch is not taken to prevent access past end of string 3006 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop); 3007 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3008 3009 __ add(G0, 1, result_reg); //equal 3010 3011 __ bind(Ldone); 3012 %} 3013 3014 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{ 3015 Label Lvector, Ldone, Lloop; 3016 MacroAssembler _masm(&cbuf); 3017 3018 Register ary1_reg = reg_to_register_object($ary1$$reg); 3019 Register ary2_reg = reg_to_register_object($ary2$$reg); 3020 Register tmp1_reg = reg_to_register_object($tmp1$$reg); 3021 Register tmp2_reg = O7; 3022 Register result_reg = reg_to_register_object($result$$reg); 3023 3024 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3025 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 3026 3027 // return true if the same array 3028 __ cmp(ary1_reg, ary2_reg); 3029 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3030 __ delayed()->add(G0, 1, result_reg); // equal 3031 3032 __ br_null(ary1_reg, true, Assembler::pn, Ldone); 3033 __ delayed()->mov(G0, result_reg); // not equal 3034 3035 __ br_null(ary2_reg, true, Assembler::pn, Ldone); 3036 __ delayed()->mov(G0, result_reg); // not equal 3037 3038 //load the lengths of arrays 3039 __ ld(Address(ary1_reg, length_offset), tmp1_reg); 3040 __ ld(Address(ary2_reg, length_offset), tmp2_reg); 3041 3042 // return false if the two arrays are not equal length 3043 __ cmp(tmp1_reg, tmp2_reg); 3044 __ br(Assembler::notEqual, true, Assembler::pn, Ldone); 3045 __ delayed()->mov(G0, result_reg); // not equal 3046 3047 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn); 3048 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal 3049 3050 // load array addresses 3051 __ add(ary1_reg, base_offset, ary1_reg); 3052 __ add(ary2_reg, base_offset, ary2_reg); 3053 3054 // renaming registers 3055 Register chr1_reg = result_reg; // for characters in ary1 3056 Register chr2_reg = tmp2_reg; // for characters in ary2 3057 Register limit_reg = tmp1_reg; // length 3058 3059 // set byte count 3060 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); 3061 3062 // Compare char[] arrays aligned to 4 bytes. 3063 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg, 3064 chr1_reg, chr2_reg, Ldone); 3065 __ add(G0, 1, result_reg); // equals 3066 3067 __ bind(Ldone); 3068 %} 3069 3070 enc_class enc_rethrow() %{ 3071 cbuf.set_insts_mark(); 3072 Register temp_reg = G3; 3073 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 3074 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 3075 MacroAssembler _masm(&cbuf); 3076 #ifdef ASSERT 3077 __ save_frame(0); 3078 AddressLiteral last_rethrow_addrlit(&last_rethrow); 3079 __ sethi(last_rethrow_addrlit, L1); 3080 Address addr(L1, last_rethrow_addrlit.low10()); 3081 __ rdpc(L2); 3082 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 3083 __ st_ptr(L2, addr); 3084 __ restore(); 3085 #endif 3086 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp 3087 __ delayed()->nop(); 3088 %} 3089 3090 enc_class emit_mem_nop() %{ 3091 // Generates the instruction LDUXA [o6,g0],#0x82,g0 3092 cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 3093 %} 3094 3095 enc_class emit_fadd_nop() %{ 3096 // Generates the instruction FMOVS f31,f31 3097 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 3098 %} 3099 3100 enc_class emit_br_nop() %{ 3101 // Generates the instruction BPN,PN . 3102 cbuf.insts()->emit_int32((unsigned int) 0x00400000); 3103 %} 3104 3105 enc_class enc_membar_acquire %{ 3106 MacroAssembler _masm(&cbuf); 3107 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) ); 3108 %} 3109 3110 enc_class enc_membar_release %{ 3111 MacroAssembler _masm(&cbuf); 3112 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) ); 3113 %} 3114 3115 enc_class enc_membar_volatile %{ 3116 MacroAssembler _masm(&cbuf); 3117 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3118 %} 3119 3120 %} 3121 3122 //----------FRAME-------------------------------------------------------------- 3123 // Definition of frame structure and management information. 3124 // 3125 // S T A C K L A Y O U T Allocators stack-slot number 3126 // | (to get allocators register number 3127 // G Owned by | | v add VMRegImpl::stack0) 3128 // r CALLER | | 3129 // o | +--------+ pad to even-align allocators stack-slot 3130 // w V | pad0 | numbers; owned by CALLER 3131 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 3132 // h ^ | in | 5 3133 // | | args | 4 Holes in incoming args owned by SELF 3134 // | | | | 3 3135 // | | +--------+ 3136 // V | | old out| Empty on Intel, window on Sparc 3137 // | old |preserve| Must be even aligned. 3138 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned 3139 // | | in | 3 area for Intel ret address 3140 // Owned by |preserve| Empty on Sparc. 3141 // SELF +--------+ 3142 // | | pad2 | 2 pad to align old SP 3143 // | +--------+ 1 3144 // | | locks | 0 3145 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned 3146 // | | pad1 | 11 pad to align new SP 3147 // | +--------+ 3148 // | | | 10 3149 // | | spills | 9 spills 3150 // V | | 8 (pad0 slot for callee) 3151 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 3152 // ^ | out | 7 3153 // | | args | 6 Holes in outgoing args owned by CALLEE 3154 // Owned by +--------+ 3155 // CALLEE | new out| 6 Empty on Intel, window on Sparc 3156 // | new |preserve| Must be even-aligned. 3157 // | SP-+--------+----> Matcher::_new_SP, even aligned 3158 // | | | 3159 // 3160 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 3161 // known from SELF's arguments and the Java calling convention. 3162 // Region 6-7 is determined per call site. 3163 // Note 2: If the calling convention leaves holes in the incoming argument 3164 // area, those holes are owned by SELF. Holes in the outgoing area 3165 // are owned by the CALLEE. Holes should not be nessecary in the 3166 // incoming area, as the Java calling convention is completely under 3167 // the control of the AD file. Doubles can be sorted and packed to 3168 // avoid holes. Holes in the outgoing arguments may be nessecary for 3169 // varargs C calling conventions. 3170 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 3171 // even aligned with pad0 as needed. 3172 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 3173 // region 6-11 is even aligned; it may be padded out more so that 3174 // the region from SP to FP meets the minimum stack alignment. 3175 3176 frame %{ 3177 // What direction does stack grow in (assumed to be same for native & Java) 3178 stack_direction(TOWARDS_LOW); 3179 3180 // These two registers define part of the calling convention 3181 // between compiled code and the interpreter. 3182 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C 3183 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter 3184 3185 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] 3186 cisc_spilling_operand_name(indOffset); 3187 3188 // Number of stack slots consumed by a Monitor enter 3189 #ifdef _LP64 3190 sync_stack_slots(2); 3191 #else 3192 sync_stack_slots(1); 3193 #endif 3194 3195 // Compiled code's Frame Pointer 3196 frame_pointer(R_SP); 3197 3198 // Stack alignment requirement 3199 stack_alignment(StackAlignmentInBytes); 3200 // LP64: Alignment size in bytes (128-bit -> 16 bytes) 3201 // !LP64: Alignment size in bytes (64-bit -> 8 bytes) 3202 3203 // Number of stack slots between incoming argument block and the start of 3204 // a new frame. The PROLOG must add this many slots to the stack. The 3205 // EPILOG must remove this many slots. 3206 in_preserve_stack_slots(0); 3207 3208 // Number of outgoing stack slots killed above the out_preserve_stack_slots 3209 // for calls to C. Supports the var-args backing area for register parms. 3210 // ADLC doesn't support parsing expressions, so I folded the math by hand. 3211 #ifdef _LP64 3212 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word 3213 varargs_C_out_slots_killed(12); 3214 #else 3215 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word 3216 varargs_C_out_slots_killed( 7); 3217 #endif 3218 3219 // The after-PROLOG location of the return address. Location of 3220 // return address specifies a type (REG or STACK) and a number 3221 // representing the register number (i.e. - use a register name) or 3222 // stack slot. 3223 return_addr(REG R_I7); // Ret Addr is in register I7 3224 3225 // Body of function which returns an OptoRegs array locating 3226 // arguments either in registers or in stack slots for calling 3227 // java 3228 calling_convention %{ 3229 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); 3230 3231 %} 3232 3233 // Body of function which returns an OptoRegs array locating 3234 // arguments either in registers or in stack slots for callin 3235 // C. 3236 c_calling_convention %{ 3237 // This is obviously always outgoing 3238 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length); 3239 %} 3240 3241 // Location of native (C/C++) and interpreter return values. This is specified to 3242 // be the same as Java. In the 32-bit VM, long values are actually returned from 3243 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying 3244 // to and from the register pairs is done by the appropriate call and epilog 3245 // opcodes. This simplifies the register allocator. 3246 c_return_value %{ 3247 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3248 #ifdef _LP64 3249 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3250 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3251 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3252 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3253 #else // !_LP64 3254 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3255 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3256 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3257 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3258 #endif 3259 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3260 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3261 %} 3262 3263 // Location of compiled Java return values. Same as C 3264 return_value %{ 3265 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3266 #ifdef _LP64 3267 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3268 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3269 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3270 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3271 #else // !_LP64 3272 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3273 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3274 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3275 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3276 #endif 3277 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3278 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3279 %} 3280 3281 %} 3282 3283 3284 //----------ATTRIBUTES--------------------------------------------------------- 3285 //----------Operand Attributes------------------------------------------------- 3286 op_attrib op_cost(1); // Required cost attribute 3287 3288 //----------Instruction Attributes--------------------------------------------- 3289 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute 3290 ins_attrib ins_size(32); // Required size attribute (in bits) 3291 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back 3292 ins_attrib ins_short_branch(0); // Required flag: is this instruction a 3293 // non-matching short branch variant of some 3294 // long branch? 3295 3296 //----------OPERANDS----------------------------------------------------------- 3297 // Operand definitions must precede instruction definitions for correct parsing 3298 // in the ADLC because operands constitute user defined types which are used in 3299 // instruction definitions. 3300 3301 //----------Simple Operands---------------------------------------------------- 3302 // Immediate Operands 3303 // Integer Immediate: 32-bit 3304 operand immI() %{ 3305 match(ConI); 3306 3307 op_cost(0); 3308 // formats are generated automatically for constants and base registers 3309 format %{ %} 3310 interface(CONST_INTER); 3311 %} 3312 3313 // Integer Immediate: 8-bit 3314 operand immI8() %{ 3315 predicate(Assembler::is_simm8(n->get_int())); 3316 match(ConI); 3317 op_cost(0); 3318 format %{ %} 3319 interface(CONST_INTER); 3320 %} 3321 3322 // Integer Immediate: 13-bit 3323 operand immI13() %{ 3324 predicate(Assembler::is_simm13(n->get_int())); 3325 match(ConI); 3326 op_cost(0); 3327 3328 format %{ %} 3329 interface(CONST_INTER); 3330 %} 3331 3332 // Integer Immediate: 13-bit minus 7 3333 operand immI13m7() %{ 3334 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095)); 3335 match(ConI); 3336 op_cost(0); 3337 3338 format %{ %} 3339 interface(CONST_INTER); 3340 %} 3341 3342 // Integer Immediate: 16-bit 3343 operand immI16() %{ 3344 predicate(Assembler::is_simm16(n->get_int())); 3345 match(ConI); 3346 op_cost(0); 3347 format %{ %} 3348 interface(CONST_INTER); 3349 %} 3350 3351 // Unsigned (positive) Integer Immediate: 13-bit 3352 operand immU13() %{ 3353 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); 3354 match(ConI); 3355 op_cost(0); 3356 3357 format %{ %} 3358 interface(CONST_INTER); 3359 %} 3360 3361 // Integer Immediate: 6-bit 3362 operand immU6() %{ 3363 predicate(n->get_int() >= 0 && n->get_int() <= 63); 3364 match(ConI); 3365 op_cost(0); 3366 format %{ %} 3367 interface(CONST_INTER); 3368 %} 3369 3370 // Integer Immediate: 11-bit 3371 operand immI11() %{ 3372 predicate(Assembler::is_simm11(n->get_int())); 3373 match(ConI); 3374 op_cost(0); 3375 format %{ %} 3376 interface(CONST_INTER); 3377 %} 3378 3379 // Integer Immediate: 5-bit 3380 operand immI5() %{ 3381 predicate(Assembler::is_simm5(n->get_int())); 3382 match(ConI); 3383 op_cost(0); 3384 format %{ %} 3385 interface(CONST_INTER); 3386 %} 3387 3388 // Integer Immediate: 0-bit 3389 operand immI0() %{ 3390 predicate(n->get_int() == 0); 3391 match(ConI); 3392 op_cost(0); 3393 3394 format %{ %} 3395 interface(CONST_INTER); 3396 %} 3397 3398 // Integer Immediate: the value 10 3399 operand immI10() %{ 3400 predicate(n->get_int() == 10); 3401 match(ConI); 3402 op_cost(0); 3403 3404 format %{ %} 3405 interface(CONST_INTER); 3406 %} 3407 3408 // Integer Immediate: the values 0-31 3409 operand immU5() %{ 3410 predicate(n->get_int() >= 0 && n->get_int() <= 31); 3411 match(ConI); 3412 op_cost(0); 3413 3414 format %{ %} 3415 interface(CONST_INTER); 3416 %} 3417 3418 // Integer Immediate: the values 1-31 3419 operand immI_1_31() %{ 3420 predicate(n->get_int() >= 1 && n->get_int() <= 31); 3421 match(ConI); 3422 op_cost(0); 3423 3424 format %{ %} 3425 interface(CONST_INTER); 3426 %} 3427 3428 // Integer Immediate: the values 32-63 3429 operand immI_32_63() %{ 3430 predicate(n->get_int() >= 32 && n->get_int() <= 63); 3431 match(ConI); 3432 op_cost(0); 3433 3434 format %{ %} 3435 interface(CONST_INTER); 3436 %} 3437 3438 // Immediates for special shifts (sign extend) 3439 3440 // Integer Immediate: the value 16 3441 operand immI_16() %{ 3442 predicate(n->get_int() == 16); 3443 match(ConI); 3444 op_cost(0); 3445 3446 format %{ %} 3447 interface(CONST_INTER); 3448 %} 3449 3450 // Integer Immediate: the value 24 3451 operand immI_24() %{ 3452 predicate(n->get_int() == 24); 3453 match(ConI); 3454 op_cost(0); 3455 3456 format %{ %} 3457 interface(CONST_INTER); 3458 %} 3459 3460 // Integer Immediate: the value 255 3461 operand immI_255() %{ 3462 predicate( n->get_int() == 255 ); 3463 match(ConI); 3464 op_cost(0); 3465 3466 format %{ %} 3467 interface(CONST_INTER); 3468 %} 3469 3470 // Integer Immediate: the value 65535 3471 operand immI_65535() %{ 3472 predicate(n->get_int() == 65535); 3473 match(ConI); 3474 op_cost(0); 3475 3476 format %{ %} 3477 interface(CONST_INTER); 3478 %} 3479 3480 // Long Immediate: the value FF 3481 operand immL_FF() %{ 3482 predicate( n->get_long() == 0xFFL ); 3483 match(ConL); 3484 op_cost(0); 3485 3486 format %{ %} 3487 interface(CONST_INTER); 3488 %} 3489 3490 // Long Immediate: the value FFFF 3491 operand immL_FFFF() %{ 3492 predicate( n->get_long() == 0xFFFFL ); 3493 match(ConL); 3494 op_cost(0); 3495 3496 format %{ %} 3497 interface(CONST_INTER); 3498 %} 3499 3500 // Pointer Immediate: 32 or 64-bit 3501 operand immP() %{ 3502 match(ConP); 3503 3504 op_cost(5); 3505 // formats are generated automatically for constants and base registers 3506 format %{ %} 3507 interface(CONST_INTER); 3508 %} 3509 3510 #ifdef _LP64 3511 // Pointer Immediate: 64-bit 3512 operand immP_set() %{ 3513 predicate(!VM_Version::is_niagara_plus()); 3514 match(ConP); 3515 3516 op_cost(5); 3517 // formats are generated automatically for constants and base registers 3518 format %{ %} 3519 interface(CONST_INTER); 3520 %} 3521 3522 // Pointer Immediate: 64-bit 3523 // From Niagara2 processors on a load should be better than materializing. 3524 operand immP_load() %{ 3525 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3))); 3526 match(ConP); 3527 3528 op_cost(5); 3529 // formats are generated automatically for constants and base registers 3530 format %{ %} 3531 interface(CONST_INTER); 3532 %} 3533 3534 // Pointer Immediate: 64-bit 3535 operand immP_no_oop_cheap() %{ 3536 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3)); 3537 match(ConP); 3538 3539 op_cost(5); 3540 // formats are generated automatically for constants and base registers 3541 format %{ %} 3542 interface(CONST_INTER); 3543 %} 3544 #endif 3545 3546 operand immP13() %{ 3547 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 3548 match(ConP); 3549 op_cost(0); 3550 3551 format %{ %} 3552 interface(CONST_INTER); 3553 %} 3554 3555 operand immP0() %{ 3556 predicate(n->get_ptr() == 0); 3557 match(ConP); 3558 op_cost(0); 3559 3560 format %{ %} 3561 interface(CONST_INTER); 3562 %} 3563 3564 operand immP_poll() %{ 3565 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 3566 match(ConP); 3567 3568 // formats are generated automatically for constants and base registers 3569 format %{ %} 3570 interface(CONST_INTER); 3571 %} 3572 3573 // Pointer Immediate 3574 operand immN() 3575 %{ 3576 match(ConN); 3577 3578 op_cost(10); 3579 format %{ %} 3580 interface(CONST_INTER); 3581 %} 3582 3583 operand immNKlass() 3584 %{ 3585 match(ConNKlass); 3586 3587 op_cost(10); 3588 format %{ %} 3589 interface(CONST_INTER); 3590 %} 3591 3592 // NULL Pointer Immediate 3593 operand immN0() 3594 %{ 3595 predicate(n->get_narrowcon() == 0); 3596 match(ConN); 3597 3598 op_cost(0); 3599 format %{ %} 3600 interface(CONST_INTER); 3601 %} 3602 3603 operand immL() %{ 3604 match(ConL); 3605 op_cost(40); 3606 // formats are generated automatically for constants and base registers 3607 format %{ %} 3608 interface(CONST_INTER); 3609 %} 3610 3611 operand immL0() %{ 3612 predicate(n->get_long() == 0L); 3613 match(ConL); 3614 op_cost(0); 3615 // formats are generated automatically for constants and base registers 3616 format %{ %} 3617 interface(CONST_INTER); 3618 %} 3619 3620 // Integer Immediate: 5-bit 3621 operand immL5() %{ 3622 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long())); 3623 match(ConL); 3624 op_cost(0); 3625 format %{ %} 3626 interface(CONST_INTER); 3627 %} 3628 3629 // Long Immediate: 13-bit 3630 operand immL13() %{ 3631 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L)); 3632 match(ConL); 3633 op_cost(0); 3634 3635 format %{ %} 3636 interface(CONST_INTER); 3637 %} 3638 3639 // Long Immediate: 13-bit minus 7 3640 operand immL13m7() %{ 3641 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L)); 3642 match(ConL); 3643 op_cost(0); 3644 3645 format %{ %} 3646 interface(CONST_INTER); 3647 %} 3648 3649 // Long Immediate: low 32-bit mask 3650 operand immL_32bits() %{ 3651 predicate(n->get_long() == 0xFFFFFFFFL); 3652 match(ConL); 3653 op_cost(0); 3654 3655 format %{ %} 3656 interface(CONST_INTER); 3657 %} 3658 3659 // Long Immediate: cheap (materialize in <= 3 instructions) 3660 operand immL_cheap() %{ 3661 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3); 3662 match(ConL); 3663 op_cost(0); 3664 3665 format %{ %} 3666 interface(CONST_INTER); 3667 %} 3668 3669 // Long Immediate: expensive (materialize in > 3 instructions) 3670 operand immL_expensive() %{ 3671 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3); 3672 match(ConL); 3673 op_cost(0); 3674 3675 format %{ %} 3676 interface(CONST_INTER); 3677 %} 3678 3679 // Double Immediate 3680 operand immD() %{ 3681 match(ConD); 3682 3683 op_cost(40); 3684 format %{ %} 3685 interface(CONST_INTER); 3686 %} 3687 3688 operand immD0() %{ 3689 #ifdef _LP64 3690 // on 64-bit architectures this comparision is faster 3691 predicate(jlong_cast(n->getd()) == 0); 3692 #else 3693 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO)); 3694 #endif 3695 match(ConD); 3696 3697 op_cost(0); 3698 format %{ %} 3699 interface(CONST_INTER); 3700 %} 3701 3702 // Float Immediate 3703 operand immF() %{ 3704 match(ConF); 3705 3706 op_cost(20); 3707 format %{ %} 3708 interface(CONST_INTER); 3709 %} 3710 3711 // Float Immediate: 0 3712 operand immF0() %{ 3713 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO)); 3714 match(ConF); 3715 3716 op_cost(0); 3717 format %{ %} 3718 interface(CONST_INTER); 3719 %} 3720 3721 // Integer Register Operands 3722 // Integer Register 3723 operand iRegI() %{ 3724 constraint(ALLOC_IN_RC(int_reg)); 3725 match(RegI); 3726 3727 match(notemp_iRegI); 3728 match(g1RegI); 3729 match(o0RegI); 3730 match(iRegIsafe); 3731 3732 format %{ %} 3733 interface(REG_INTER); 3734 %} 3735 3736 operand notemp_iRegI() %{ 3737 constraint(ALLOC_IN_RC(notemp_int_reg)); 3738 match(RegI); 3739 3740 match(o0RegI); 3741 3742 format %{ %} 3743 interface(REG_INTER); 3744 %} 3745 3746 operand o0RegI() %{ 3747 constraint(ALLOC_IN_RC(o0_regI)); 3748 match(iRegI); 3749 3750 format %{ %} 3751 interface(REG_INTER); 3752 %} 3753 3754 // Pointer Register 3755 operand iRegP() %{ 3756 constraint(ALLOC_IN_RC(ptr_reg)); 3757 match(RegP); 3758 3759 match(lock_ptr_RegP); 3760 match(g1RegP); 3761 match(g2RegP); 3762 match(g3RegP); 3763 match(g4RegP); 3764 match(i0RegP); 3765 match(o0RegP); 3766 match(o1RegP); 3767 match(l7RegP); 3768 3769 format %{ %} 3770 interface(REG_INTER); 3771 %} 3772 3773 operand sp_ptr_RegP() %{ 3774 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3775 match(RegP); 3776 match(iRegP); 3777 3778 format %{ %} 3779 interface(REG_INTER); 3780 %} 3781 3782 operand lock_ptr_RegP() %{ 3783 constraint(ALLOC_IN_RC(lock_ptr_reg)); 3784 match(RegP); 3785 match(i0RegP); 3786 match(o0RegP); 3787 match(o1RegP); 3788 match(l7RegP); 3789 3790 format %{ %} 3791 interface(REG_INTER); 3792 %} 3793 3794 operand g1RegP() %{ 3795 constraint(ALLOC_IN_RC(g1_regP)); 3796 match(iRegP); 3797 3798 format %{ %} 3799 interface(REG_INTER); 3800 %} 3801 3802 operand g2RegP() %{ 3803 constraint(ALLOC_IN_RC(g2_regP)); 3804 match(iRegP); 3805 3806 format %{ %} 3807 interface(REG_INTER); 3808 %} 3809 3810 operand g3RegP() %{ 3811 constraint(ALLOC_IN_RC(g3_regP)); 3812 match(iRegP); 3813 3814 format %{ %} 3815 interface(REG_INTER); 3816 %} 3817 3818 operand g1RegI() %{ 3819 constraint(ALLOC_IN_RC(g1_regI)); 3820 match(iRegI); 3821 3822 format %{ %} 3823 interface(REG_INTER); 3824 %} 3825 3826 operand g3RegI() %{ 3827 constraint(ALLOC_IN_RC(g3_regI)); 3828 match(iRegI); 3829 3830 format %{ %} 3831 interface(REG_INTER); 3832 %} 3833 3834 operand g4RegI() %{ 3835 constraint(ALLOC_IN_RC(g4_regI)); 3836 match(iRegI); 3837 3838 format %{ %} 3839 interface(REG_INTER); 3840 %} 3841 3842 operand g4RegP() %{ 3843 constraint(ALLOC_IN_RC(g4_regP)); 3844 match(iRegP); 3845 3846 format %{ %} 3847 interface(REG_INTER); 3848 %} 3849 3850 operand i0RegP() %{ 3851 constraint(ALLOC_IN_RC(i0_regP)); 3852 match(iRegP); 3853 3854 format %{ %} 3855 interface(REG_INTER); 3856 %} 3857 3858 operand o0RegP() %{ 3859 constraint(ALLOC_IN_RC(o0_regP)); 3860 match(iRegP); 3861 3862 format %{ %} 3863 interface(REG_INTER); 3864 %} 3865 3866 operand o1RegP() %{ 3867 constraint(ALLOC_IN_RC(o1_regP)); 3868 match(iRegP); 3869 3870 format %{ %} 3871 interface(REG_INTER); 3872 %} 3873 3874 operand o2RegP() %{ 3875 constraint(ALLOC_IN_RC(o2_regP)); 3876 match(iRegP); 3877 3878 format %{ %} 3879 interface(REG_INTER); 3880 %} 3881 3882 operand o7RegP() %{ 3883 constraint(ALLOC_IN_RC(o7_regP)); 3884 match(iRegP); 3885 3886 format %{ %} 3887 interface(REG_INTER); 3888 %} 3889 3890 operand l7RegP() %{ 3891 constraint(ALLOC_IN_RC(l7_regP)); 3892 match(iRegP); 3893 3894 format %{ %} 3895 interface(REG_INTER); 3896 %} 3897 3898 operand o7RegI() %{ 3899 constraint(ALLOC_IN_RC(o7_regI)); 3900 match(iRegI); 3901 3902 format %{ %} 3903 interface(REG_INTER); 3904 %} 3905 3906 operand iRegN() %{ 3907 constraint(ALLOC_IN_RC(int_reg)); 3908 match(RegN); 3909 3910 format %{ %} 3911 interface(REG_INTER); 3912 %} 3913 3914 // Long Register 3915 operand iRegL() %{ 3916 constraint(ALLOC_IN_RC(long_reg)); 3917 match(RegL); 3918 3919 format %{ %} 3920 interface(REG_INTER); 3921 %} 3922 3923 operand o2RegL() %{ 3924 constraint(ALLOC_IN_RC(o2_regL)); 3925 match(iRegL); 3926 3927 format %{ %} 3928 interface(REG_INTER); 3929 %} 3930 3931 operand o7RegL() %{ 3932 constraint(ALLOC_IN_RC(o7_regL)); 3933 match(iRegL); 3934 3935 format %{ %} 3936 interface(REG_INTER); 3937 %} 3938 3939 operand g1RegL() %{ 3940 constraint(ALLOC_IN_RC(g1_regL)); 3941 match(iRegL); 3942 3943 format %{ %} 3944 interface(REG_INTER); 3945 %} 3946 3947 operand g3RegL() %{ 3948 constraint(ALLOC_IN_RC(g3_regL)); 3949 match(iRegL); 3950 3951 format %{ %} 3952 interface(REG_INTER); 3953 %} 3954 3955 // Int Register safe 3956 // This is 64bit safe 3957 operand iRegIsafe() %{ 3958 constraint(ALLOC_IN_RC(long_reg)); 3959 3960 match(iRegI); 3961 3962 format %{ %} 3963 interface(REG_INTER); 3964 %} 3965 3966 // Condition Code Flag Register 3967 operand flagsReg() %{ 3968 constraint(ALLOC_IN_RC(int_flags)); 3969 match(RegFlags); 3970 3971 format %{ "ccr" %} // both ICC and XCC 3972 interface(REG_INTER); 3973 %} 3974 3975 // Condition Code Register, unsigned comparisons. 3976 operand flagsRegU() %{ 3977 constraint(ALLOC_IN_RC(int_flags)); 3978 match(RegFlags); 3979 3980 format %{ "icc_U" %} 3981 interface(REG_INTER); 3982 %} 3983 3984 // Condition Code Register, pointer comparisons. 3985 operand flagsRegP() %{ 3986 constraint(ALLOC_IN_RC(int_flags)); 3987 match(RegFlags); 3988 3989 #ifdef _LP64 3990 format %{ "xcc_P" %} 3991 #else 3992 format %{ "icc_P" %} 3993 #endif 3994 interface(REG_INTER); 3995 %} 3996 3997 // Condition Code Register, long comparisons. 3998 operand flagsRegL() %{ 3999 constraint(ALLOC_IN_RC(int_flags)); 4000 match(RegFlags); 4001 4002 format %{ "xcc_L" %} 4003 interface(REG_INTER); 4004 %} 4005 4006 // Condition Code Register, floating comparisons, unordered same as "less". 4007 operand flagsRegF() %{ 4008 constraint(ALLOC_IN_RC(float_flags)); 4009 match(RegFlags); 4010 match(flagsRegF0); 4011 4012 format %{ %} 4013 interface(REG_INTER); 4014 %} 4015 4016 operand flagsRegF0() %{ 4017 constraint(ALLOC_IN_RC(float_flag0)); 4018 match(RegFlags); 4019 4020 format %{ %} 4021 interface(REG_INTER); 4022 %} 4023 4024 4025 // Condition Code Flag Register used by long compare 4026 operand flagsReg_long_LTGE() %{ 4027 constraint(ALLOC_IN_RC(int_flags)); 4028 match(RegFlags); 4029 format %{ "icc_LTGE" %} 4030 interface(REG_INTER); 4031 %} 4032 operand flagsReg_long_EQNE() %{ 4033 constraint(ALLOC_IN_RC(int_flags)); 4034 match(RegFlags); 4035 format %{ "icc_EQNE" %} 4036 interface(REG_INTER); 4037 %} 4038 operand flagsReg_long_LEGT() %{ 4039 constraint(ALLOC_IN_RC(int_flags)); 4040 match(RegFlags); 4041 format %{ "icc_LEGT" %} 4042 interface(REG_INTER); 4043 %} 4044 4045 4046 operand regD() %{ 4047 constraint(ALLOC_IN_RC(dflt_reg)); 4048 match(RegD); 4049 4050 match(regD_low); 4051 4052 format %{ %} 4053 interface(REG_INTER); 4054 %} 4055 4056 operand regF() %{ 4057 constraint(ALLOC_IN_RC(sflt_reg)); 4058 match(RegF); 4059 4060 format %{ %} 4061 interface(REG_INTER); 4062 %} 4063 4064 operand regD_low() %{ 4065 constraint(ALLOC_IN_RC(dflt_low_reg)); 4066 match(regD); 4067 4068 format %{ %} 4069 interface(REG_INTER); 4070 %} 4071 4072 // Special Registers 4073 4074 // Method Register 4075 operand inline_cache_regP(iRegP reg) %{ 4076 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1 4077 match(reg); 4078 format %{ %} 4079 interface(REG_INTER); 4080 %} 4081 4082 operand interpreter_method_oop_regP(iRegP reg) %{ 4083 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1 4084 match(reg); 4085 format %{ %} 4086 interface(REG_INTER); 4087 %} 4088 4089 4090 //----------Complex Operands--------------------------------------------------- 4091 // Indirect Memory Reference 4092 operand indirect(sp_ptr_RegP reg) %{ 4093 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4094 match(reg); 4095 4096 op_cost(100); 4097 format %{ "[$reg]" %} 4098 interface(MEMORY_INTER) %{ 4099 base($reg); 4100 index(0x0); 4101 scale(0x0); 4102 disp(0x0); 4103 %} 4104 %} 4105 4106 // Indirect with simm13 Offset 4107 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{ 4108 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4109 match(AddP reg offset); 4110 4111 op_cost(100); 4112 format %{ "[$reg + $offset]" %} 4113 interface(MEMORY_INTER) %{ 4114 base($reg); 4115 index(0x0); 4116 scale(0x0); 4117 disp($offset); 4118 %} 4119 %} 4120 4121 // Indirect with simm13 Offset minus 7 4122 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{ 4123 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4124 match(AddP reg offset); 4125 4126 op_cost(100); 4127 format %{ "[$reg + $offset]" %} 4128 interface(MEMORY_INTER) %{ 4129 base($reg); 4130 index(0x0); 4131 scale(0x0); 4132 disp($offset); 4133 %} 4134 %} 4135 4136 // Note: Intel has a swapped version also, like this: 4137 //operand indOffsetX(iRegI reg, immP offset) %{ 4138 // constraint(ALLOC_IN_RC(int_reg)); 4139 // match(AddP offset reg); 4140 // 4141 // op_cost(100); 4142 // format %{ "[$reg + $offset]" %} 4143 // interface(MEMORY_INTER) %{ 4144 // base($reg); 4145 // index(0x0); 4146 // scale(0x0); 4147 // disp($offset); 4148 // %} 4149 //%} 4150 //// However, it doesn't make sense for SPARC, since 4151 // we have no particularly good way to embed oops in 4152 // single instructions. 4153 4154 // Indirect with Register Index 4155 operand indIndex(iRegP addr, iRegX index) %{ 4156 constraint(ALLOC_IN_RC(ptr_reg)); 4157 match(AddP addr index); 4158 4159 op_cost(100); 4160 format %{ "[$addr + $index]" %} 4161 interface(MEMORY_INTER) %{ 4162 base($addr); 4163 index($index); 4164 scale(0x0); 4165 disp(0x0); 4166 %} 4167 %} 4168 4169 //----------Special Memory Operands-------------------------------------------- 4170 // Stack Slot Operand - This operand is used for loading and storing temporary 4171 // values on the stack where a match requires a value to 4172 // flow through memory. 4173 operand stackSlotI(sRegI reg) %{ 4174 constraint(ALLOC_IN_RC(stack_slots)); 4175 op_cost(100); 4176 //match(RegI); 4177 format %{ "[$reg]" %} 4178 interface(MEMORY_INTER) %{ 4179 base(0xE); // R_SP 4180 index(0x0); 4181 scale(0x0); 4182 disp($reg); // Stack Offset 4183 %} 4184 %} 4185 4186 operand stackSlotP(sRegP reg) %{ 4187 constraint(ALLOC_IN_RC(stack_slots)); 4188 op_cost(100); 4189 //match(RegP); 4190 format %{ "[$reg]" %} 4191 interface(MEMORY_INTER) %{ 4192 base(0xE); // R_SP 4193 index(0x0); 4194 scale(0x0); 4195 disp($reg); // Stack Offset 4196 %} 4197 %} 4198 4199 operand stackSlotF(sRegF reg) %{ 4200 constraint(ALLOC_IN_RC(stack_slots)); 4201 op_cost(100); 4202 //match(RegF); 4203 format %{ "[$reg]" %} 4204 interface(MEMORY_INTER) %{ 4205 base(0xE); // R_SP 4206 index(0x0); 4207 scale(0x0); 4208 disp($reg); // Stack Offset 4209 %} 4210 %} 4211 operand stackSlotD(sRegD reg) %{ 4212 constraint(ALLOC_IN_RC(stack_slots)); 4213 op_cost(100); 4214 //match(RegD); 4215 format %{ "[$reg]" %} 4216 interface(MEMORY_INTER) %{ 4217 base(0xE); // R_SP 4218 index(0x0); 4219 scale(0x0); 4220 disp($reg); // Stack Offset 4221 %} 4222 %} 4223 operand stackSlotL(sRegL reg) %{ 4224 constraint(ALLOC_IN_RC(stack_slots)); 4225 op_cost(100); 4226 //match(RegL); 4227 format %{ "[$reg]" %} 4228 interface(MEMORY_INTER) %{ 4229 base(0xE); // R_SP 4230 index(0x0); 4231 scale(0x0); 4232 disp($reg); // Stack Offset 4233 %} 4234 %} 4235 4236 // Operands for expressing Control Flow 4237 // NOTE: Label is a predefined operand which should not be redefined in 4238 // the AD file. It is generically handled within the ADLC. 4239 4240 //----------Conditional Branch Operands---------------------------------------- 4241 // Comparison Op - This is the operation of the comparison, and is limited to 4242 // the following set of codes: 4243 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4244 // 4245 // Other attributes of the comparison, such as unsignedness, are specified 4246 // by the comparison instruction that sets a condition code flags register. 4247 // That result is represented by a flags operand whose subtype is appropriate 4248 // to the unsignedness (etc.) of the comparison. 4249 // 4250 // Later, the instruction which matches both the Comparison Op (a Bool) and 4251 // the flags (produced by the Cmp) specifies the coding of the comparison op 4252 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4253 4254 operand cmpOp() %{ 4255 match(Bool); 4256 4257 format %{ "" %} 4258 interface(COND_INTER) %{ 4259 equal(0x1); 4260 not_equal(0x9); 4261 less(0x3); 4262 greater_equal(0xB); 4263 less_equal(0x2); 4264 greater(0xA); 4265 overflow(0x7); 4266 no_overflow(0xF); 4267 %} 4268 %} 4269 4270 // Comparison Op, unsigned 4271 operand cmpOpU() %{ 4272 match(Bool); 4273 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4274 n->as_Bool()->_test._test != BoolTest::no_overflow); 4275 4276 format %{ "u" %} 4277 interface(COND_INTER) %{ 4278 equal(0x1); 4279 not_equal(0x9); 4280 less(0x5); 4281 greater_equal(0xD); 4282 less_equal(0x4); 4283 greater(0xC); 4284 overflow(0x7); 4285 no_overflow(0xF); 4286 %} 4287 %} 4288 4289 // Comparison Op, pointer (same as unsigned) 4290 operand cmpOpP() %{ 4291 match(Bool); 4292 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4293 n->as_Bool()->_test._test != BoolTest::no_overflow); 4294 4295 format %{ "p" %} 4296 interface(COND_INTER) %{ 4297 equal(0x1); 4298 not_equal(0x9); 4299 less(0x5); 4300 greater_equal(0xD); 4301 less_equal(0x4); 4302 greater(0xC); 4303 overflow(0x7); 4304 no_overflow(0xF); 4305 %} 4306 %} 4307 4308 // Comparison Op, branch-register encoding 4309 operand cmpOp_reg() %{ 4310 match(Bool); 4311 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4312 n->as_Bool()->_test._test != BoolTest::no_overflow); 4313 4314 format %{ "" %} 4315 interface(COND_INTER) %{ 4316 equal (0x1); 4317 not_equal (0x5); 4318 less (0x3); 4319 greater_equal(0x7); 4320 less_equal (0x2); 4321 greater (0x6); 4322 overflow(0x7); // not supported 4323 no_overflow(0xF); // not supported 4324 %} 4325 %} 4326 4327 // Comparison Code, floating, unordered same as less 4328 operand cmpOpF() %{ 4329 match(Bool); 4330 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4331 n->as_Bool()->_test._test != BoolTest::no_overflow); 4332 4333 format %{ "fl" %} 4334 interface(COND_INTER) %{ 4335 equal(0x9); 4336 not_equal(0x1); 4337 less(0x3); 4338 greater_equal(0xB); 4339 less_equal(0xE); 4340 greater(0x6); 4341 4342 overflow(0x7); // not supported 4343 no_overflow(0xF); // not supported 4344 %} 4345 %} 4346 4347 // Used by long compare 4348 operand cmpOp_commute() %{ 4349 match(Bool); 4350 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4351 n->as_Bool()->_test._test != BoolTest::no_overflow); 4352 4353 format %{ "" %} 4354 interface(COND_INTER) %{ 4355 equal(0x1); 4356 not_equal(0x9); 4357 less(0xA); 4358 greater_equal(0x2); 4359 less_equal(0xB); 4360 greater(0x3); 4361 overflow(0x7); 4362 no_overflow(0xF); 4363 %} 4364 %} 4365 4366 //----------OPERAND CLASSES---------------------------------------------------- 4367 // Operand Classes are groups of operands that are used to simplify 4368 // instruction definitions by not requiring the AD writer to specify separate 4369 // instructions for every form of operand when the instruction accepts 4370 // multiple operand types with the same basic encoding and format. The classic 4371 // case of this is memory operands. 4372 opclass memory( indirect, indOffset13, indIndex ); 4373 opclass indIndexMemory( indIndex ); 4374 4375 //----------PIPELINE----------------------------------------------------------- 4376 pipeline %{ 4377 4378 //----------ATTRIBUTES--------------------------------------------------------- 4379 attributes %{ 4380 fixed_size_instructions; // Fixed size instructions 4381 branch_has_delay_slot; // Branch has delay slot following 4382 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle 4383 instruction_unit_size = 4; // An instruction is 4 bytes long 4384 instruction_fetch_unit_size = 16; // The processor fetches one line 4385 instruction_fetch_units = 1; // of 16 bytes 4386 4387 // List of nop instructions 4388 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR ); 4389 %} 4390 4391 //----------RESOURCES---------------------------------------------------------- 4392 // Resources are the functional units available to the machine 4393 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1); 4394 4395 //----------PIPELINE DESCRIPTION----------------------------------------------- 4396 // Pipeline Description specifies the stages in the machine's pipeline 4397 4398 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D); 4399 4400 //----------PIPELINE CLASSES--------------------------------------------------- 4401 // Pipeline Classes describe the stages in which input and output are 4402 // referenced by the hardware pipeline. 4403 4404 // Integer ALU reg-reg operation 4405 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4406 single_instruction; 4407 dst : E(write); 4408 src1 : R(read); 4409 src2 : R(read); 4410 IALU : R; 4411 %} 4412 4413 // Integer ALU reg-reg long operation 4414 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 4415 instruction_count(2); 4416 dst : E(write); 4417 src1 : R(read); 4418 src2 : R(read); 4419 IALU : R; 4420 IALU : R; 4421 %} 4422 4423 // Integer ALU reg-reg long dependent operation 4424 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{ 4425 instruction_count(1); multiple_bundles; 4426 dst : E(write); 4427 src1 : R(read); 4428 src2 : R(read); 4429 cr : E(write); 4430 IALU : R(2); 4431 %} 4432 4433 // Integer ALU reg-imm operaion 4434 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4435 single_instruction; 4436 dst : E(write); 4437 src1 : R(read); 4438 IALU : R; 4439 %} 4440 4441 // Integer ALU reg-reg operation with condition code 4442 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{ 4443 single_instruction; 4444 dst : E(write); 4445 cr : E(write); 4446 src1 : R(read); 4447 src2 : R(read); 4448 IALU : R; 4449 %} 4450 4451 // Integer ALU reg-imm operation with condition code 4452 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{ 4453 single_instruction; 4454 dst : E(write); 4455 cr : E(write); 4456 src1 : R(read); 4457 IALU : R; 4458 %} 4459 4460 // Integer ALU zero-reg operation 4461 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 4462 single_instruction; 4463 dst : E(write); 4464 src2 : R(read); 4465 IALU : R; 4466 %} 4467 4468 // Integer ALU zero-reg operation with condition code only 4469 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{ 4470 single_instruction; 4471 cr : E(write); 4472 src : R(read); 4473 IALU : R; 4474 %} 4475 4476 // Integer ALU reg-reg operation with condition code only 4477 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4478 single_instruction; 4479 cr : E(write); 4480 src1 : R(read); 4481 src2 : R(read); 4482 IALU : R; 4483 %} 4484 4485 // Integer ALU reg-imm operation with condition code only 4486 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4487 single_instruction; 4488 cr : E(write); 4489 src1 : R(read); 4490 IALU : R; 4491 %} 4492 4493 // Integer ALU reg-reg-zero operation with condition code only 4494 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{ 4495 single_instruction; 4496 cr : E(write); 4497 src1 : R(read); 4498 src2 : R(read); 4499 IALU : R; 4500 %} 4501 4502 // Integer ALU reg-imm-zero operation with condition code only 4503 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{ 4504 single_instruction; 4505 cr : E(write); 4506 src1 : R(read); 4507 IALU : R; 4508 %} 4509 4510 // Integer ALU reg-reg operation with condition code, src1 modified 4511 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4512 single_instruction; 4513 cr : E(write); 4514 src1 : E(write); 4515 src1 : R(read); 4516 src2 : R(read); 4517 IALU : R; 4518 %} 4519 4520 // Integer ALU reg-imm operation with condition code, src1 modified 4521 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4522 single_instruction; 4523 cr : E(write); 4524 src1 : E(write); 4525 src1 : R(read); 4526 IALU : R; 4527 %} 4528 4529 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{ 4530 multiple_bundles; 4531 dst : E(write)+4; 4532 cr : E(write); 4533 src1 : R(read); 4534 src2 : R(read); 4535 IALU : R(3); 4536 BR : R(2); 4537 %} 4538 4539 // Integer ALU operation 4540 pipe_class ialu_none(iRegI dst) %{ 4541 single_instruction; 4542 dst : E(write); 4543 IALU : R; 4544 %} 4545 4546 // Integer ALU reg operation 4547 pipe_class ialu_reg(iRegI dst, iRegI src) %{ 4548 single_instruction; may_have_no_code; 4549 dst : E(write); 4550 src : R(read); 4551 IALU : R; 4552 %} 4553 4554 // Integer ALU reg conditional operation 4555 // This instruction has a 1 cycle stall, and cannot execute 4556 // in the same cycle as the instruction setting the condition 4557 // code. We kludge this by pretending to read the condition code 4558 // 1 cycle earlier, and by marking the functional units as busy 4559 // for 2 cycles with the result available 1 cycle later than 4560 // is really the case. 4561 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{ 4562 single_instruction; 4563 op2_out : C(write); 4564 op1 : R(read); 4565 cr : R(read); // This is really E, with a 1 cycle stall 4566 BR : R(2); 4567 MS : R(2); 4568 %} 4569 4570 #ifdef _LP64 4571 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ 4572 instruction_count(1); multiple_bundles; 4573 dst : C(write)+1; 4574 src : R(read)+1; 4575 IALU : R(1); 4576 BR : E(2); 4577 MS : E(2); 4578 %} 4579 #endif 4580 4581 // Integer ALU reg operation 4582 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ 4583 single_instruction; may_have_no_code; 4584 dst : E(write); 4585 src : R(read); 4586 IALU : R; 4587 %} 4588 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{ 4589 single_instruction; may_have_no_code; 4590 dst : E(write); 4591 src : R(read); 4592 IALU : R; 4593 %} 4594 4595 // Two integer ALU reg operations 4596 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{ 4597 instruction_count(2); 4598 dst : E(write); 4599 src : R(read); 4600 A0 : R; 4601 A1 : R; 4602 %} 4603 4604 // Two integer ALU reg operations 4605 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{ 4606 instruction_count(2); may_have_no_code; 4607 dst : E(write); 4608 src : R(read); 4609 A0 : R; 4610 A1 : R; 4611 %} 4612 4613 // Integer ALU imm operation 4614 pipe_class ialu_imm(iRegI dst, immI13 src) %{ 4615 single_instruction; 4616 dst : E(write); 4617 IALU : R; 4618 %} 4619 4620 // Integer ALU reg-reg with carry operation 4621 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{ 4622 single_instruction; 4623 dst : E(write); 4624 src1 : R(read); 4625 src2 : R(read); 4626 IALU : R; 4627 %} 4628 4629 // Integer ALU cc operation 4630 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{ 4631 single_instruction; 4632 dst : E(write); 4633 cc : R(read); 4634 IALU : R; 4635 %} 4636 4637 // Integer ALU cc / second IALU operation 4638 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{ 4639 instruction_count(1); multiple_bundles; 4640 dst : E(write)+1; 4641 src : R(read); 4642 IALU : R; 4643 %} 4644 4645 // Integer ALU cc / second IALU operation 4646 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{ 4647 instruction_count(1); multiple_bundles; 4648 dst : E(write)+1; 4649 p : R(read); 4650 q : R(read); 4651 IALU : R; 4652 %} 4653 4654 // Integer ALU hi-lo-reg operation 4655 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{ 4656 instruction_count(1); multiple_bundles; 4657 dst : E(write)+1; 4658 IALU : R(2); 4659 %} 4660 4661 // Float ALU hi-lo-reg operation (with temp) 4662 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{ 4663 instruction_count(1); multiple_bundles; 4664 dst : E(write)+1; 4665 IALU : R(2); 4666 %} 4667 4668 // Long Constant 4669 pipe_class loadConL( iRegL dst, immL src ) %{ 4670 instruction_count(2); multiple_bundles; 4671 dst : E(write)+1; 4672 IALU : R(2); 4673 IALU : R(2); 4674 %} 4675 4676 // Pointer Constant 4677 pipe_class loadConP( iRegP dst, immP src ) %{ 4678 instruction_count(0); multiple_bundles; 4679 fixed_latency(6); 4680 %} 4681 4682 // Polling Address 4683 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ 4684 #ifdef _LP64 4685 instruction_count(0); multiple_bundles; 4686 fixed_latency(6); 4687 #else 4688 dst : E(write); 4689 IALU : R; 4690 #endif 4691 %} 4692 4693 // Long Constant small 4694 pipe_class loadConLlo( iRegL dst, immL src ) %{ 4695 instruction_count(2); 4696 dst : E(write); 4697 IALU : R; 4698 IALU : R; 4699 %} 4700 4701 // [PHH] This is wrong for 64-bit. See LdImmF/D. 4702 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{ 4703 instruction_count(1); multiple_bundles; 4704 src : R(read); 4705 dst : M(write)+1; 4706 IALU : R; 4707 MS : E; 4708 %} 4709 4710 // Integer ALU nop operation 4711 pipe_class ialu_nop() %{ 4712 single_instruction; 4713 IALU : R; 4714 %} 4715 4716 // Integer ALU nop operation 4717 pipe_class ialu_nop_A0() %{ 4718 single_instruction; 4719 A0 : R; 4720 %} 4721 4722 // Integer ALU nop operation 4723 pipe_class ialu_nop_A1() %{ 4724 single_instruction; 4725 A1 : R; 4726 %} 4727 4728 // Integer Multiply reg-reg operation 4729 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4730 single_instruction; 4731 dst : E(write); 4732 src1 : R(read); 4733 src2 : R(read); 4734 MS : R(5); 4735 %} 4736 4737 // Integer Multiply reg-imm operation 4738 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4739 single_instruction; 4740 dst : E(write); 4741 src1 : R(read); 4742 MS : R(5); 4743 %} 4744 4745 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4746 single_instruction; 4747 dst : E(write)+4; 4748 src1 : R(read); 4749 src2 : R(read); 4750 MS : R(6); 4751 %} 4752 4753 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4754 single_instruction; 4755 dst : E(write)+4; 4756 src1 : R(read); 4757 MS : R(6); 4758 %} 4759 4760 // Integer Divide reg-reg 4761 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{ 4762 instruction_count(1); multiple_bundles; 4763 dst : E(write); 4764 temp : E(write); 4765 src1 : R(read); 4766 src2 : R(read); 4767 temp : R(read); 4768 MS : R(38); 4769 %} 4770 4771 // Integer Divide reg-imm 4772 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{ 4773 instruction_count(1); multiple_bundles; 4774 dst : E(write); 4775 temp : E(write); 4776 src1 : R(read); 4777 temp : R(read); 4778 MS : R(38); 4779 %} 4780 4781 // Long Divide 4782 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4783 dst : E(write)+71; 4784 src1 : R(read); 4785 src2 : R(read)+1; 4786 MS : R(70); 4787 %} 4788 4789 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4790 dst : E(write)+71; 4791 src1 : R(read); 4792 MS : R(70); 4793 %} 4794 4795 // Floating Point Add Float 4796 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{ 4797 single_instruction; 4798 dst : X(write); 4799 src1 : E(read); 4800 src2 : E(read); 4801 FA : R; 4802 %} 4803 4804 // Floating Point Add Double 4805 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{ 4806 single_instruction; 4807 dst : X(write); 4808 src1 : E(read); 4809 src2 : E(read); 4810 FA : R; 4811 %} 4812 4813 // Floating Point Conditional Move based on integer flags 4814 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{ 4815 single_instruction; 4816 dst : X(write); 4817 src : E(read); 4818 cr : R(read); 4819 FA : R(2); 4820 BR : R(2); 4821 %} 4822 4823 // Floating Point Conditional Move based on integer flags 4824 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{ 4825 single_instruction; 4826 dst : X(write); 4827 src : E(read); 4828 cr : R(read); 4829 FA : R(2); 4830 BR : R(2); 4831 %} 4832 4833 // Floating Point Multiply Float 4834 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{ 4835 single_instruction; 4836 dst : X(write); 4837 src1 : E(read); 4838 src2 : E(read); 4839 FM : R; 4840 %} 4841 4842 // Floating Point Multiply Double 4843 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{ 4844 single_instruction; 4845 dst : X(write); 4846 src1 : E(read); 4847 src2 : E(read); 4848 FM : R; 4849 %} 4850 4851 // Floating Point Divide Float 4852 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{ 4853 single_instruction; 4854 dst : X(write); 4855 src1 : E(read); 4856 src2 : E(read); 4857 FM : R; 4858 FDIV : C(14); 4859 %} 4860 4861 // Floating Point Divide Double 4862 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{ 4863 single_instruction; 4864 dst : X(write); 4865 src1 : E(read); 4866 src2 : E(read); 4867 FM : R; 4868 FDIV : C(17); 4869 %} 4870 4871 // Floating Point Move/Negate/Abs Float 4872 pipe_class faddF_reg(regF dst, regF src) %{ 4873 single_instruction; 4874 dst : W(write); 4875 src : E(read); 4876 FA : R(1); 4877 %} 4878 4879 // Floating Point Move/Negate/Abs Double 4880 pipe_class faddD_reg(regD dst, regD src) %{ 4881 single_instruction; 4882 dst : W(write); 4883 src : E(read); 4884 FA : R; 4885 %} 4886 4887 // Floating Point Convert F->D 4888 pipe_class fcvtF2D(regD dst, regF src) %{ 4889 single_instruction; 4890 dst : X(write); 4891 src : E(read); 4892 FA : R; 4893 %} 4894 4895 // Floating Point Convert I->D 4896 pipe_class fcvtI2D(regD dst, regF src) %{ 4897 single_instruction; 4898 dst : X(write); 4899 src : E(read); 4900 FA : R; 4901 %} 4902 4903 // Floating Point Convert LHi->D 4904 pipe_class fcvtLHi2D(regD dst, regD src) %{ 4905 single_instruction; 4906 dst : X(write); 4907 src : E(read); 4908 FA : R; 4909 %} 4910 4911 // Floating Point Convert L->D 4912 pipe_class fcvtL2D(regD dst, regF src) %{ 4913 single_instruction; 4914 dst : X(write); 4915 src : E(read); 4916 FA : R; 4917 %} 4918 4919 // Floating Point Convert L->F 4920 pipe_class fcvtL2F(regD dst, regF src) %{ 4921 single_instruction; 4922 dst : X(write); 4923 src : E(read); 4924 FA : R; 4925 %} 4926 4927 // Floating Point Convert D->F 4928 pipe_class fcvtD2F(regD dst, regF src) %{ 4929 single_instruction; 4930 dst : X(write); 4931 src : E(read); 4932 FA : R; 4933 %} 4934 4935 // Floating Point Convert I->L 4936 pipe_class fcvtI2L(regD dst, regF src) %{ 4937 single_instruction; 4938 dst : X(write); 4939 src : E(read); 4940 FA : R; 4941 %} 4942 4943 // Floating Point Convert D->F 4944 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{ 4945 instruction_count(1); multiple_bundles; 4946 dst : X(write)+6; 4947 src : E(read); 4948 FA : R; 4949 %} 4950 4951 // Floating Point Convert D->L 4952 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{ 4953 instruction_count(1); multiple_bundles; 4954 dst : X(write)+6; 4955 src : E(read); 4956 FA : R; 4957 %} 4958 4959 // Floating Point Convert F->I 4960 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{ 4961 instruction_count(1); multiple_bundles; 4962 dst : X(write)+6; 4963 src : E(read); 4964 FA : R; 4965 %} 4966 4967 // Floating Point Convert F->L 4968 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{ 4969 instruction_count(1); multiple_bundles; 4970 dst : X(write)+6; 4971 src : E(read); 4972 FA : R; 4973 %} 4974 4975 // Floating Point Convert I->F 4976 pipe_class fcvtI2F(regF dst, regF src) %{ 4977 single_instruction; 4978 dst : X(write); 4979 src : E(read); 4980 FA : R; 4981 %} 4982 4983 // Floating Point Compare 4984 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{ 4985 single_instruction; 4986 cr : X(write); 4987 src1 : E(read); 4988 src2 : E(read); 4989 FA : R; 4990 %} 4991 4992 // Floating Point Compare 4993 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{ 4994 single_instruction; 4995 cr : X(write); 4996 src1 : E(read); 4997 src2 : E(read); 4998 FA : R; 4999 %} 5000 5001 // Floating Add Nop 5002 pipe_class fadd_nop() %{ 5003 single_instruction; 5004 FA : R; 5005 %} 5006 5007 // Integer Store to Memory 5008 pipe_class istore_mem_reg(memory mem, iRegI src) %{ 5009 single_instruction; 5010 mem : R(read); 5011 src : C(read); 5012 MS : R; 5013 %} 5014 5015 // Integer Store to Memory 5016 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{ 5017 single_instruction; 5018 mem : R(read); 5019 src : C(read); 5020 MS : R; 5021 %} 5022 5023 // Integer Store Zero to Memory 5024 pipe_class istore_mem_zero(memory mem, immI0 src) %{ 5025 single_instruction; 5026 mem : R(read); 5027 MS : R; 5028 %} 5029 5030 // Special Stack Slot Store 5031 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{ 5032 single_instruction; 5033 stkSlot : R(read); 5034 src : C(read); 5035 MS : R; 5036 %} 5037 5038 // Special Stack Slot Store 5039 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{ 5040 instruction_count(2); multiple_bundles; 5041 stkSlot : R(read); 5042 src : C(read); 5043 MS : R(2); 5044 %} 5045 5046 // Float Store 5047 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{ 5048 single_instruction; 5049 mem : R(read); 5050 src : C(read); 5051 MS : R; 5052 %} 5053 5054 // Float Store 5055 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{ 5056 single_instruction; 5057 mem : R(read); 5058 MS : R; 5059 %} 5060 5061 // Double Store 5062 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{ 5063 instruction_count(1); 5064 mem : R(read); 5065 src : C(read); 5066 MS : R; 5067 %} 5068 5069 // Double Store 5070 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{ 5071 single_instruction; 5072 mem : R(read); 5073 MS : R; 5074 %} 5075 5076 // Special Stack Slot Float Store 5077 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{ 5078 single_instruction; 5079 stkSlot : R(read); 5080 src : C(read); 5081 MS : R; 5082 %} 5083 5084 // Special Stack Slot Double Store 5085 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{ 5086 single_instruction; 5087 stkSlot : R(read); 5088 src : C(read); 5089 MS : R; 5090 %} 5091 5092 // Integer Load (when sign bit propagation not needed) 5093 pipe_class iload_mem(iRegI dst, memory mem) %{ 5094 single_instruction; 5095 mem : R(read); 5096 dst : C(write); 5097 MS : R; 5098 %} 5099 5100 // Integer Load from stack operand 5101 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{ 5102 single_instruction; 5103 mem : R(read); 5104 dst : C(write); 5105 MS : R; 5106 %} 5107 5108 // Integer Load (when sign bit propagation or masking is needed) 5109 pipe_class iload_mask_mem(iRegI dst, memory mem) %{ 5110 single_instruction; 5111 mem : R(read); 5112 dst : M(write); 5113 MS : R; 5114 %} 5115 5116 // Float Load 5117 pipe_class floadF_mem(regF dst, memory mem) %{ 5118 single_instruction; 5119 mem : R(read); 5120 dst : M(write); 5121 MS : R; 5122 %} 5123 5124 // Float Load 5125 pipe_class floadD_mem(regD dst, memory mem) %{ 5126 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case 5127 mem : R(read); 5128 dst : M(write); 5129 MS : R; 5130 %} 5131 5132 // Float Load 5133 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{ 5134 single_instruction; 5135 stkSlot : R(read); 5136 dst : M(write); 5137 MS : R; 5138 %} 5139 5140 // Float Load 5141 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{ 5142 single_instruction; 5143 stkSlot : R(read); 5144 dst : M(write); 5145 MS : R; 5146 %} 5147 5148 // Memory Nop 5149 pipe_class mem_nop() %{ 5150 single_instruction; 5151 MS : R; 5152 %} 5153 5154 pipe_class sethi(iRegP dst, immI src) %{ 5155 single_instruction; 5156 dst : E(write); 5157 IALU : R; 5158 %} 5159 5160 pipe_class loadPollP(iRegP poll) %{ 5161 single_instruction; 5162 poll : R(read); 5163 MS : R; 5164 %} 5165 5166 pipe_class br(Universe br, label labl) %{ 5167 single_instruction_with_delay_slot; 5168 BR : R; 5169 %} 5170 5171 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{ 5172 single_instruction_with_delay_slot; 5173 cr : E(read); 5174 BR : R; 5175 %} 5176 5177 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{ 5178 single_instruction_with_delay_slot; 5179 op1 : E(read); 5180 BR : R; 5181 MS : R; 5182 %} 5183 5184 // Compare and branch 5185 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{ 5186 instruction_count(2); has_delay_slot; 5187 cr : E(write); 5188 src1 : R(read); 5189 src2 : R(read); 5190 IALU : R; 5191 BR : R; 5192 %} 5193 5194 // Compare and branch 5195 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{ 5196 instruction_count(2); has_delay_slot; 5197 cr : E(write); 5198 src1 : R(read); 5199 IALU : R; 5200 BR : R; 5201 %} 5202 5203 // Compare and branch using cbcond 5204 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{ 5205 single_instruction; 5206 src1 : E(read); 5207 src2 : E(read); 5208 IALU : R; 5209 BR : R; 5210 %} 5211 5212 // Compare and branch using cbcond 5213 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{ 5214 single_instruction; 5215 src1 : E(read); 5216 IALU : R; 5217 BR : R; 5218 %} 5219 5220 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{ 5221 single_instruction_with_delay_slot; 5222 cr : E(read); 5223 BR : R; 5224 %} 5225 5226 pipe_class br_nop() %{ 5227 single_instruction; 5228 BR : R; 5229 %} 5230 5231 pipe_class simple_call(method meth) %{ 5232 instruction_count(2); multiple_bundles; force_serialization; 5233 fixed_latency(100); 5234 BR : R(1); 5235 MS : R(1); 5236 A0 : R(1); 5237 %} 5238 5239 pipe_class compiled_call(method meth) %{ 5240 instruction_count(1); multiple_bundles; force_serialization; 5241 fixed_latency(100); 5242 MS : R(1); 5243 %} 5244 5245 pipe_class call(method meth) %{ 5246 instruction_count(0); multiple_bundles; force_serialization; 5247 fixed_latency(100); 5248 %} 5249 5250 pipe_class tail_call(Universe ignore, label labl) %{ 5251 single_instruction; has_delay_slot; 5252 fixed_latency(100); 5253 BR : R(1); 5254 MS : R(1); 5255 %} 5256 5257 pipe_class ret(Universe ignore) %{ 5258 single_instruction; has_delay_slot; 5259 BR : R(1); 5260 MS : R(1); 5261 %} 5262 5263 pipe_class ret_poll(g3RegP poll) %{ 5264 instruction_count(3); has_delay_slot; 5265 poll : E(read); 5266 MS : R; 5267 %} 5268 5269 // The real do-nothing guy 5270 pipe_class empty( ) %{ 5271 instruction_count(0); 5272 %} 5273 5274 pipe_class long_memory_op() %{ 5275 instruction_count(0); multiple_bundles; force_serialization; 5276 fixed_latency(25); 5277 MS : R(1); 5278 %} 5279 5280 // Check-cast 5281 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{ 5282 array : R(read); 5283 match : R(read); 5284 IALU : R(2); 5285 BR : R(2); 5286 MS : R; 5287 %} 5288 5289 // Convert FPU flags into +1,0,-1 5290 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{ 5291 src1 : E(read); 5292 src2 : E(read); 5293 dst : E(write); 5294 FA : R; 5295 MS : R(2); 5296 BR : R(2); 5297 %} 5298 5299 // Compare for p < q, and conditionally add y 5300 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{ 5301 p : E(read); 5302 q : E(read); 5303 y : E(read); 5304 IALU : R(3) 5305 %} 5306 5307 // Perform a compare, then move conditionally in a branch delay slot. 5308 pipe_class min_max( iRegI src2, iRegI srcdst ) %{ 5309 src2 : E(read); 5310 srcdst : E(read); 5311 IALU : R; 5312 BR : R; 5313 %} 5314 5315 // Define the class for the Nop node 5316 define %{ 5317 MachNop = ialu_nop; 5318 %} 5319 5320 %} 5321 5322 //----------INSTRUCTIONS------------------------------------------------------- 5323 5324 //------------Special Stack Slot instructions - no match rules----------------- 5325 instruct stkI_to_regF(regF dst, stackSlotI src) %{ 5326 // No match rule to avoid chain rule match. 5327 effect(DEF dst, USE src); 5328 ins_cost(MEMORY_REF_COST); 5329 size(4); 5330 format %{ "LDF $src,$dst\t! stkI to regF" %} 5331 opcode(Assembler::ldf_op3); 5332 ins_encode(simple_form3_mem_reg(src, dst)); 5333 ins_pipe(floadF_stk); 5334 %} 5335 5336 instruct stkL_to_regD(regD dst, stackSlotL src) %{ 5337 // No match rule to avoid chain rule match. 5338 effect(DEF dst, USE src); 5339 ins_cost(MEMORY_REF_COST); 5340 size(4); 5341 format %{ "LDDF $src,$dst\t! stkL to regD" %} 5342 opcode(Assembler::lddf_op3); 5343 ins_encode(simple_form3_mem_reg(src, dst)); 5344 ins_pipe(floadD_stk); 5345 %} 5346 5347 instruct regF_to_stkI(stackSlotI dst, regF src) %{ 5348 // No match rule to avoid chain rule match. 5349 effect(DEF dst, USE src); 5350 ins_cost(MEMORY_REF_COST); 5351 size(4); 5352 format %{ "STF $src,$dst\t! regF to stkI" %} 5353 opcode(Assembler::stf_op3); 5354 ins_encode(simple_form3_mem_reg(dst, src)); 5355 ins_pipe(fstoreF_stk_reg); 5356 %} 5357 5358 instruct regD_to_stkL(stackSlotL dst, regD src) %{ 5359 // No match rule to avoid chain rule match. 5360 effect(DEF dst, USE src); 5361 ins_cost(MEMORY_REF_COST); 5362 size(4); 5363 format %{ "STDF $src,$dst\t! regD to stkL" %} 5364 opcode(Assembler::stdf_op3); 5365 ins_encode(simple_form3_mem_reg(dst, src)); 5366 ins_pipe(fstoreD_stk_reg); 5367 %} 5368 5369 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{ 5370 effect(DEF dst, USE src); 5371 ins_cost(MEMORY_REF_COST*2); 5372 size(8); 5373 format %{ "STW $src,$dst.hi\t! long\n\t" 5374 "STW R_G0,$dst.lo" %} 5375 opcode(Assembler::stw_op3); 5376 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); 5377 ins_pipe(lstoreI_stk_reg); 5378 %} 5379 5380 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{ 5381 // No match rule to avoid chain rule match. 5382 effect(DEF dst, USE src); 5383 ins_cost(MEMORY_REF_COST); 5384 size(4); 5385 format %{ "STX $src,$dst\t! regL to stkD" %} 5386 opcode(Assembler::stx_op3); 5387 ins_encode(simple_form3_mem_reg( dst, src ) ); 5388 ins_pipe(istore_stk_reg); 5389 %} 5390 5391 //---------- Chain stack slots between similar types -------- 5392 5393 // Load integer from stack slot 5394 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{ 5395 match(Set dst src); 5396 ins_cost(MEMORY_REF_COST); 5397 5398 size(4); 5399 format %{ "LDUW $src,$dst\t!stk" %} 5400 opcode(Assembler::lduw_op3); 5401 ins_encode(simple_form3_mem_reg( src, dst ) ); 5402 ins_pipe(iload_mem); 5403 %} 5404 5405 // Store integer to stack slot 5406 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{ 5407 match(Set dst src); 5408 ins_cost(MEMORY_REF_COST); 5409 5410 size(4); 5411 format %{ "STW $src,$dst\t!stk" %} 5412 opcode(Assembler::stw_op3); 5413 ins_encode(simple_form3_mem_reg( dst, src ) ); 5414 ins_pipe(istore_mem_reg); 5415 %} 5416 5417 // Load long from stack slot 5418 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{ 5419 match(Set dst src); 5420 5421 ins_cost(MEMORY_REF_COST); 5422 size(4); 5423 format %{ "LDX $src,$dst\t! long" %} 5424 opcode(Assembler::ldx_op3); 5425 ins_encode(simple_form3_mem_reg( src, dst ) ); 5426 ins_pipe(iload_mem); 5427 %} 5428 5429 // Store long to stack slot 5430 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{ 5431 match(Set dst src); 5432 5433 ins_cost(MEMORY_REF_COST); 5434 size(4); 5435 format %{ "STX $src,$dst\t! long" %} 5436 opcode(Assembler::stx_op3); 5437 ins_encode(simple_form3_mem_reg( dst, src ) ); 5438 ins_pipe(istore_mem_reg); 5439 %} 5440 5441 #ifdef _LP64 5442 // Load pointer from stack slot, 64-bit encoding 5443 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5444 match(Set dst src); 5445 ins_cost(MEMORY_REF_COST); 5446 size(4); 5447 format %{ "LDX $src,$dst\t!ptr" %} 5448 opcode(Assembler::ldx_op3); 5449 ins_encode(simple_form3_mem_reg( src, dst ) ); 5450 ins_pipe(iload_mem); 5451 %} 5452 5453 // Store pointer to stack slot 5454 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5455 match(Set dst src); 5456 ins_cost(MEMORY_REF_COST); 5457 size(4); 5458 format %{ "STX $src,$dst\t!ptr" %} 5459 opcode(Assembler::stx_op3); 5460 ins_encode(simple_form3_mem_reg( dst, src ) ); 5461 ins_pipe(istore_mem_reg); 5462 %} 5463 #else // _LP64 5464 // Load pointer from stack slot, 32-bit encoding 5465 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5466 match(Set dst src); 5467 ins_cost(MEMORY_REF_COST); 5468 format %{ "LDUW $src,$dst\t!ptr" %} 5469 opcode(Assembler::lduw_op3, Assembler::ldst_op); 5470 ins_encode(simple_form3_mem_reg( src, dst ) ); 5471 ins_pipe(iload_mem); 5472 %} 5473 5474 // Store pointer to stack slot 5475 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5476 match(Set dst src); 5477 ins_cost(MEMORY_REF_COST); 5478 format %{ "STW $src,$dst\t!ptr" %} 5479 opcode(Assembler::stw_op3, Assembler::ldst_op); 5480 ins_encode(simple_form3_mem_reg( dst, src ) ); 5481 ins_pipe(istore_mem_reg); 5482 %} 5483 #endif // _LP64 5484 5485 //------------Special Nop instructions for bundling - no match rules----------- 5486 // Nop using the A0 functional unit 5487 instruct Nop_A0() %{ 5488 ins_cost(0); 5489 5490 format %{ "NOP ! Alu Pipeline" %} 5491 opcode(Assembler::or_op3, Assembler::arith_op); 5492 ins_encode( form2_nop() ); 5493 ins_pipe(ialu_nop_A0); 5494 %} 5495 5496 // Nop using the A1 functional unit 5497 instruct Nop_A1( ) %{ 5498 ins_cost(0); 5499 5500 format %{ "NOP ! Alu Pipeline" %} 5501 opcode(Assembler::or_op3, Assembler::arith_op); 5502 ins_encode( form2_nop() ); 5503 ins_pipe(ialu_nop_A1); 5504 %} 5505 5506 // Nop using the memory functional unit 5507 instruct Nop_MS( ) %{ 5508 ins_cost(0); 5509 5510 format %{ "NOP ! Memory Pipeline" %} 5511 ins_encode( emit_mem_nop ); 5512 ins_pipe(mem_nop); 5513 %} 5514 5515 // Nop using the floating add functional unit 5516 instruct Nop_FA( ) %{ 5517 ins_cost(0); 5518 5519 format %{ "NOP ! Floating Add Pipeline" %} 5520 ins_encode( emit_fadd_nop ); 5521 ins_pipe(fadd_nop); 5522 %} 5523 5524 // Nop using the branch functional unit 5525 instruct Nop_BR( ) %{ 5526 ins_cost(0); 5527 5528 format %{ "NOP ! Branch Pipeline" %} 5529 ins_encode( emit_br_nop ); 5530 ins_pipe(br_nop); 5531 %} 5532 5533 //----------Load/Store/Move Instructions--------------------------------------- 5534 //----------Load Instructions-------------------------------------------------- 5535 // Load Byte (8bit signed) 5536 instruct loadB(iRegI dst, memory mem) %{ 5537 match(Set dst (LoadB mem)); 5538 ins_cost(MEMORY_REF_COST); 5539 5540 size(4); 5541 format %{ "LDSB $mem,$dst\t! byte" %} 5542 ins_encode %{ 5543 __ ldsb($mem$$Address, $dst$$Register); 5544 %} 5545 ins_pipe(iload_mask_mem); 5546 %} 5547 5548 // Load Byte (8bit signed) into a Long Register 5549 instruct loadB2L(iRegL dst, memory mem) %{ 5550 match(Set dst (ConvI2L (LoadB mem))); 5551 ins_cost(MEMORY_REF_COST); 5552 5553 size(4); 5554 format %{ "LDSB $mem,$dst\t! byte -> long" %} 5555 ins_encode %{ 5556 __ ldsb($mem$$Address, $dst$$Register); 5557 %} 5558 ins_pipe(iload_mask_mem); 5559 %} 5560 5561 // Load Unsigned Byte (8bit UNsigned) into an int reg 5562 instruct loadUB(iRegI dst, memory mem) %{ 5563 match(Set dst (LoadUB mem)); 5564 ins_cost(MEMORY_REF_COST); 5565 5566 size(4); 5567 format %{ "LDUB $mem,$dst\t! ubyte" %} 5568 ins_encode %{ 5569 __ ldub($mem$$Address, $dst$$Register); 5570 %} 5571 ins_pipe(iload_mem); 5572 %} 5573 5574 // Load Unsigned Byte (8bit UNsigned) into a Long Register 5575 instruct loadUB2L(iRegL dst, memory mem) %{ 5576 match(Set dst (ConvI2L (LoadUB mem))); 5577 ins_cost(MEMORY_REF_COST); 5578 5579 size(4); 5580 format %{ "LDUB $mem,$dst\t! ubyte -> long" %} 5581 ins_encode %{ 5582 __ ldub($mem$$Address, $dst$$Register); 5583 %} 5584 ins_pipe(iload_mem); 5585 %} 5586 5587 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register 5588 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{ 5589 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5590 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5591 5592 size(2*4); 5593 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t" 5594 "AND $dst,$mask,$dst" %} 5595 ins_encode %{ 5596 __ ldub($mem$$Address, $dst$$Register); 5597 __ and3($dst$$Register, $mask$$constant, $dst$$Register); 5598 %} 5599 ins_pipe(iload_mem); 5600 %} 5601 5602 // Load Short (16bit signed) 5603 instruct loadS(iRegI dst, memory mem) %{ 5604 match(Set dst (LoadS mem)); 5605 ins_cost(MEMORY_REF_COST); 5606 5607 size(4); 5608 format %{ "LDSH $mem,$dst\t! short" %} 5609 ins_encode %{ 5610 __ ldsh($mem$$Address, $dst$$Register); 5611 %} 5612 ins_pipe(iload_mask_mem); 5613 %} 5614 5615 // Load Short (16 bit signed) to Byte (8 bit signed) 5616 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5617 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5618 ins_cost(MEMORY_REF_COST); 5619 5620 size(4); 5621 5622 format %{ "LDSB $mem+1,$dst\t! short -> byte" %} 5623 ins_encode %{ 5624 __ ldsb($mem$$Address, $dst$$Register, 1); 5625 %} 5626 ins_pipe(iload_mask_mem); 5627 %} 5628 5629 // Load Short (16bit signed) into a Long Register 5630 instruct loadS2L(iRegL dst, memory mem) %{ 5631 match(Set dst (ConvI2L (LoadS mem))); 5632 ins_cost(MEMORY_REF_COST); 5633 5634 size(4); 5635 format %{ "LDSH $mem,$dst\t! short -> long" %} 5636 ins_encode %{ 5637 __ ldsh($mem$$Address, $dst$$Register); 5638 %} 5639 ins_pipe(iload_mask_mem); 5640 %} 5641 5642 // Load Unsigned Short/Char (16bit UNsigned) 5643 instruct loadUS(iRegI dst, memory mem) %{ 5644 match(Set dst (LoadUS mem)); 5645 ins_cost(MEMORY_REF_COST); 5646 5647 size(4); 5648 format %{ "LDUH $mem,$dst\t! ushort/char" %} 5649 ins_encode %{ 5650 __ lduh($mem$$Address, $dst$$Register); 5651 %} 5652 ins_pipe(iload_mem); 5653 %} 5654 5655 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5656 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5657 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5658 ins_cost(MEMORY_REF_COST); 5659 5660 size(4); 5661 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %} 5662 ins_encode %{ 5663 __ ldsb($mem$$Address, $dst$$Register, 1); 5664 %} 5665 ins_pipe(iload_mask_mem); 5666 %} 5667 5668 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register 5669 instruct loadUS2L(iRegL dst, memory mem) %{ 5670 match(Set dst (ConvI2L (LoadUS mem))); 5671 ins_cost(MEMORY_REF_COST); 5672 5673 size(4); 5674 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} 5675 ins_encode %{ 5676 __ lduh($mem$$Address, $dst$$Register); 5677 %} 5678 ins_pipe(iload_mem); 5679 %} 5680 5681 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register 5682 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5683 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5684 ins_cost(MEMORY_REF_COST); 5685 5686 size(4); 5687 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %} 5688 ins_encode %{ 5689 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE 5690 %} 5691 ins_pipe(iload_mem); 5692 %} 5693 5694 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register 5695 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5696 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5697 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5698 5699 size(2*4); 5700 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t" 5701 "AND $dst,$mask,$dst" %} 5702 ins_encode %{ 5703 Register Rdst = $dst$$Register; 5704 __ lduh($mem$$Address, Rdst); 5705 __ and3(Rdst, $mask$$constant, Rdst); 5706 %} 5707 ins_pipe(iload_mem); 5708 %} 5709 5710 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register 5711 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{ 5712 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5713 effect(TEMP dst, TEMP tmp); 5714 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5715 5716 size((3+1)*4); // set may use two instructions. 5717 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t" 5718 "SET $mask,$tmp\n\t" 5719 "AND $dst,$tmp,$dst" %} 5720 ins_encode %{ 5721 Register Rdst = $dst$$Register; 5722 Register Rtmp = $tmp$$Register; 5723 __ lduh($mem$$Address, Rdst); 5724 __ set($mask$$constant, Rtmp); 5725 __ and3(Rdst, Rtmp, Rdst); 5726 %} 5727 ins_pipe(iload_mem); 5728 %} 5729 5730 // Load Integer 5731 instruct loadI(iRegI dst, memory mem) %{ 5732 match(Set dst (LoadI mem)); 5733 ins_cost(MEMORY_REF_COST); 5734 5735 size(4); 5736 format %{ "LDUW $mem,$dst\t! int" %} 5737 ins_encode %{ 5738 __ lduw($mem$$Address, $dst$$Register); 5739 %} 5740 ins_pipe(iload_mem); 5741 %} 5742 5743 // Load Integer to Byte (8 bit signed) 5744 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5745 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5746 ins_cost(MEMORY_REF_COST); 5747 5748 size(4); 5749 5750 format %{ "LDSB $mem+3,$dst\t! int -> byte" %} 5751 ins_encode %{ 5752 __ ldsb($mem$$Address, $dst$$Register, 3); 5753 %} 5754 ins_pipe(iload_mask_mem); 5755 %} 5756 5757 // Load Integer to Unsigned Byte (8 bit UNsigned) 5758 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{ 5759 match(Set dst (AndI (LoadI mem) mask)); 5760 ins_cost(MEMORY_REF_COST); 5761 5762 size(4); 5763 5764 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %} 5765 ins_encode %{ 5766 __ ldub($mem$$Address, $dst$$Register, 3); 5767 %} 5768 ins_pipe(iload_mask_mem); 5769 %} 5770 5771 // Load Integer to Short (16 bit signed) 5772 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{ 5773 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5774 ins_cost(MEMORY_REF_COST); 5775 5776 size(4); 5777 5778 format %{ "LDSH $mem+2,$dst\t! int -> short" %} 5779 ins_encode %{ 5780 __ ldsh($mem$$Address, $dst$$Register, 2); 5781 %} 5782 ins_pipe(iload_mask_mem); 5783 %} 5784 5785 // Load Integer to Unsigned Short (16 bit UNsigned) 5786 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{ 5787 match(Set dst (AndI (LoadI mem) mask)); 5788 ins_cost(MEMORY_REF_COST); 5789 5790 size(4); 5791 5792 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %} 5793 ins_encode %{ 5794 __ lduh($mem$$Address, $dst$$Register, 2); 5795 %} 5796 ins_pipe(iload_mask_mem); 5797 %} 5798 5799 // Load Integer into a Long Register 5800 instruct loadI2L(iRegL dst, memory mem) %{ 5801 match(Set dst (ConvI2L (LoadI mem))); 5802 ins_cost(MEMORY_REF_COST); 5803 5804 size(4); 5805 format %{ "LDSW $mem,$dst\t! int -> long" %} 5806 ins_encode %{ 5807 __ ldsw($mem$$Address, $dst$$Register); 5808 %} 5809 ins_pipe(iload_mask_mem); 5810 %} 5811 5812 // Load Integer with mask 0xFF into a Long Register 5813 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5814 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5815 ins_cost(MEMORY_REF_COST); 5816 5817 size(4); 5818 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %} 5819 ins_encode %{ 5820 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE 5821 %} 5822 ins_pipe(iload_mem); 5823 %} 5824 5825 // Load Integer with mask 0xFFFF into a Long Register 5826 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{ 5827 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5828 ins_cost(MEMORY_REF_COST); 5829 5830 size(4); 5831 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %} 5832 ins_encode %{ 5833 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE 5834 %} 5835 ins_pipe(iload_mem); 5836 %} 5837 5838 // Load Integer with a 13-bit mask into a Long Register 5839 instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5840 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5841 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5842 5843 size(2*4); 5844 format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t" 5845 "AND $dst,$mask,$dst" %} 5846 ins_encode %{ 5847 Register Rdst = $dst$$Register; 5848 __ lduw($mem$$Address, Rdst); 5849 __ and3(Rdst, $mask$$constant, Rdst); 5850 %} 5851 ins_pipe(iload_mem); 5852 %} 5853 5854 // Load Integer with a 32-bit mask into a Long Register 5855 instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ 5856 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5857 effect(TEMP dst, TEMP tmp); 5858 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5859 5860 size((3+1)*4); // set may use two instructions. 5861 format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t" 5862 "SET $mask,$tmp\n\t" 5863 "AND $dst,$tmp,$dst" %} 5864 ins_encode %{ 5865 Register Rdst = $dst$$Register; 5866 Register Rtmp = $tmp$$Register; 5867 __ lduw($mem$$Address, Rdst); 5868 __ set($mask$$constant, Rtmp); 5869 __ and3(Rdst, Rtmp, Rdst); 5870 %} 5871 ins_pipe(iload_mem); 5872 %} 5873 5874 // Load Unsigned Integer into a Long Register 5875 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{ 5876 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 5877 ins_cost(MEMORY_REF_COST); 5878 5879 size(4); 5880 format %{ "LDUW $mem,$dst\t! uint -> long" %} 5881 ins_encode %{ 5882 __ lduw($mem$$Address, $dst$$Register); 5883 %} 5884 ins_pipe(iload_mem); 5885 %} 5886 5887 // Load Long - aligned 5888 instruct loadL(iRegL dst, memory mem ) %{ 5889 match(Set dst (LoadL mem)); 5890 ins_cost(MEMORY_REF_COST); 5891 5892 size(4); 5893 format %{ "LDX $mem,$dst\t! long" %} 5894 ins_encode %{ 5895 __ ldx($mem$$Address, $dst$$Register); 5896 %} 5897 ins_pipe(iload_mem); 5898 %} 5899 5900 // Load Long - UNaligned 5901 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{ 5902 match(Set dst (LoadL_unaligned mem)); 5903 effect(KILL tmp); 5904 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5905 size(16); 5906 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n" 5907 "\tLDUW $mem ,$dst\n" 5908 "\tSLLX #32, $dst, $dst\n" 5909 "\tOR $dst, R_O7, $dst" %} 5910 opcode(Assembler::lduw_op3); 5911 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); 5912 ins_pipe(iload_mem); 5913 %} 5914 5915 // Load Range 5916 instruct loadRange(iRegI dst, memory mem) %{ 5917 match(Set dst (LoadRange mem)); 5918 ins_cost(MEMORY_REF_COST); 5919 5920 size(4); 5921 format %{ "LDUW $mem,$dst\t! range" %} 5922 opcode(Assembler::lduw_op3); 5923 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5924 ins_pipe(iload_mem); 5925 %} 5926 5927 // Load Integer into %f register (for fitos/fitod) 5928 instruct loadI_freg(regF dst, memory mem) %{ 5929 match(Set dst (LoadI mem)); 5930 ins_cost(MEMORY_REF_COST); 5931 size(4); 5932 5933 format %{ "LDF $mem,$dst\t! for fitos/fitod" %} 5934 opcode(Assembler::ldf_op3); 5935 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5936 ins_pipe(floadF_mem); 5937 %} 5938 5939 // Load Pointer 5940 instruct loadP(iRegP dst, memory mem) %{ 5941 match(Set dst (LoadP mem)); 5942 ins_cost(MEMORY_REF_COST); 5943 size(4); 5944 5945 #ifndef _LP64 5946 format %{ "LDUW $mem,$dst\t! ptr" %} 5947 ins_encode %{ 5948 __ lduw($mem$$Address, $dst$$Register); 5949 %} 5950 #else 5951 format %{ "LDX $mem,$dst\t! ptr" %} 5952 ins_encode %{ 5953 __ ldx($mem$$Address, $dst$$Register); 5954 %} 5955 #endif 5956 ins_pipe(iload_mem); 5957 %} 5958 5959 // Load Compressed Pointer 5960 instruct loadN(iRegN dst, memory mem) %{ 5961 match(Set dst (LoadN mem)); 5962 ins_cost(MEMORY_REF_COST); 5963 size(4); 5964 5965 format %{ "LDUW $mem,$dst\t! compressed ptr" %} 5966 ins_encode %{ 5967 __ lduw($mem$$Address, $dst$$Register); 5968 %} 5969 ins_pipe(iload_mem); 5970 %} 5971 5972 // Load Klass Pointer 5973 instruct loadKlass(iRegP dst, memory mem) %{ 5974 match(Set dst (LoadKlass mem)); 5975 ins_cost(MEMORY_REF_COST); 5976 size(4); 5977 5978 #ifndef _LP64 5979 format %{ "LDUW $mem,$dst\t! klass ptr" %} 5980 ins_encode %{ 5981 __ lduw($mem$$Address, $dst$$Register); 5982 %} 5983 #else 5984 format %{ "LDX $mem,$dst\t! klass ptr" %} 5985 ins_encode %{ 5986 __ ldx($mem$$Address, $dst$$Register); 5987 %} 5988 #endif 5989 ins_pipe(iload_mem); 5990 %} 5991 5992 // Load narrow Klass Pointer 5993 instruct loadNKlass(iRegN dst, memory mem) %{ 5994 match(Set dst (LoadNKlass mem)); 5995 ins_cost(MEMORY_REF_COST); 5996 size(4); 5997 5998 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} 5999 ins_encode %{ 6000 __ lduw($mem$$Address, $dst$$Register); 6001 %} 6002 ins_pipe(iload_mem); 6003 %} 6004 6005 // Load Double 6006 instruct loadD(regD dst, memory mem) %{ 6007 match(Set dst (LoadD mem)); 6008 ins_cost(MEMORY_REF_COST); 6009 6010 size(4); 6011 format %{ "LDDF $mem,$dst" %} 6012 opcode(Assembler::lddf_op3); 6013 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6014 ins_pipe(floadD_mem); 6015 %} 6016 6017 // Load Double - UNaligned 6018 instruct loadD_unaligned(regD_low dst, memory mem ) %{ 6019 match(Set dst (LoadD_unaligned mem)); 6020 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 6021 size(8); 6022 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" 6023 "\tLDF $mem+4,$dst.lo\t!" %} 6024 opcode(Assembler::ldf_op3); 6025 ins_encode( form3_mem_reg_double_unaligned( mem, dst )); 6026 ins_pipe(iload_mem); 6027 %} 6028 6029 // Load Float 6030 instruct loadF(regF dst, memory mem) %{ 6031 match(Set dst (LoadF mem)); 6032 ins_cost(MEMORY_REF_COST); 6033 6034 size(4); 6035 format %{ "LDF $mem,$dst" %} 6036 opcode(Assembler::ldf_op3); 6037 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6038 ins_pipe(floadF_mem); 6039 %} 6040 6041 // Load Constant 6042 instruct loadConI( iRegI dst, immI src ) %{ 6043 match(Set dst src); 6044 ins_cost(DEFAULT_COST * 3/2); 6045 format %{ "SET $src,$dst" %} 6046 ins_encode( Set32(src, dst) ); 6047 ins_pipe(ialu_hi_lo_reg); 6048 %} 6049 6050 instruct loadConI13( iRegI dst, immI13 src ) %{ 6051 match(Set dst src); 6052 6053 size(4); 6054 format %{ "MOV $src,$dst" %} 6055 ins_encode( Set13( src, dst ) ); 6056 ins_pipe(ialu_imm); 6057 %} 6058 6059 #ifndef _LP64 6060 instruct loadConP(iRegP dst, immP con) %{ 6061 match(Set dst con); 6062 ins_cost(DEFAULT_COST * 3/2); 6063 format %{ "SET $con,$dst\t!ptr" %} 6064 ins_encode %{ 6065 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6066 intptr_t val = $con$$constant; 6067 if (constant_reloc == relocInfo::oop_type) { 6068 __ set_oop_constant((jobject) val, $dst$$Register); 6069 } else if (constant_reloc == relocInfo::metadata_type) { 6070 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6071 } else { // non-oop pointers, e.g. card mark base, heap top 6072 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6073 __ set(val, $dst$$Register); 6074 } 6075 %} 6076 ins_pipe(loadConP); 6077 %} 6078 #else 6079 instruct loadConP_set(iRegP dst, immP_set con) %{ 6080 match(Set dst con); 6081 ins_cost(DEFAULT_COST * 3/2); 6082 format %{ "SET $con,$dst\t! ptr" %} 6083 ins_encode %{ 6084 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6085 intptr_t val = $con$$constant; 6086 if (constant_reloc == relocInfo::oop_type) { 6087 __ set_oop_constant((jobject) val, $dst$$Register); 6088 } else if (constant_reloc == relocInfo::metadata_type) { 6089 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6090 } else { // non-oop pointers, e.g. card mark base, heap top 6091 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6092 __ set(val, $dst$$Register); 6093 } 6094 %} 6095 ins_pipe(loadConP); 6096 %} 6097 6098 instruct loadConP_load(iRegP dst, immP_load con) %{ 6099 match(Set dst con); 6100 ins_cost(MEMORY_REF_COST); 6101 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 6102 ins_encode %{ 6103 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6104 __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 6105 %} 6106 ins_pipe(loadConP); 6107 %} 6108 6109 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{ 6110 match(Set dst con); 6111 ins_cost(DEFAULT_COST * 3/2); 6112 format %{ "SET $con,$dst\t! non-oop ptr" %} 6113 ins_encode %{ 6114 __ set($con$$constant, $dst$$Register); 6115 %} 6116 ins_pipe(loadConP); 6117 %} 6118 #endif // _LP64 6119 6120 instruct loadConP0(iRegP dst, immP0 src) %{ 6121 match(Set dst src); 6122 6123 size(4); 6124 format %{ "CLR $dst\t!ptr" %} 6125 ins_encode %{ 6126 __ clr($dst$$Register); 6127 %} 6128 ins_pipe(ialu_imm); 6129 %} 6130 6131 instruct loadConP_poll(iRegP dst, immP_poll src) %{ 6132 match(Set dst src); 6133 ins_cost(DEFAULT_COST); 6134 format %{ "SET $src,$dst\t!ptr" %} 6135 ins_encode %{ 6136 AddressLiteral polling_page(os::get_polling_page()); 6137 __ sethi(polling_page, reg_to_register_object($dst$$reg)); 6138 %} 6139 ins_pipe(loadConP_poll); 6140 %} 6141 6142 instruct loadConN0(iRegN dst, immN0 src) %{ 6143 match(Set dst src); 6144 6145 size(4); 6146 format %{ "CLR $dst\t! compressed NULL ptr" %} 6147 ins_encode %{ 6148 __ clr($dst$$Register); 6149 %} 6150 ins_pipe(ialu_imm); 6151 %} 6152 6153 instruct loadConN(iRegN dst, immN src) %{ 6154 match(Set dst src); 6155 ins_cost(DEFAULT_COST * 3/2); 6156 format %{ "SET $src,$dst\t! compressed ptr" %} 6157 ins_encode %{ 6158 Register dst = $dst$$Register; 6159 __ set_narrow_oop((jobject)$src$$constant, dst); 6160 %} 6161 ins_pipe(ialu_hi_lo_reg); 6162 %} 6163 6164 instruct loadConNKlass(iRegN dst, immNKlass src) %{ 6165 match(Set dst src); 6166 ins_cost(DEFAULT_COST * 3/2); 6167 format %{ "SET $src,$dst\t! compressed klass ptr" %} 6168 ins_encode %{ 6169 Register dst = $dst$$Register; 6170 __ set_narrow_klass((Klass*)$src$$constant, dst); 6171 %} 6172 ins_pipe(ialu_hi_lo_reg); 6173 %} 6174 6175 // Materialize long value (predicated by immL_cheap). 6176 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 6177 match(Set dst con); 6178 effect(KILL tmp); 6179 ins_cost(DEFAULT_COST * 3); 6180 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 6181 ins_encode %{ 6182 __ set64($con$$constant, $dst$$Register, $tmp$$Register); 6183 %} 6184 ins_pipe(loadConL); 6185 %} 6186 6187 // Load long value from constant table (predicated by immL_expensive). 6188 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 6189 match(Set dst con); 6190 ins_cost(MEMORY_REF_COST); 6191 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 6192 ins_encode %{ 6193 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6194 __ ldx($constanttablebase, con_offset, $dst$$Register); 6195 %} 6196 ins_pipe(loadConL); 6197 %} 6198 6199 instruct loadConL0( iRegL dst, immL0 src ) %{ 6200 match(Set dst src); 6201 ins_cost(DEFAULT_COST); 6202 size(4); 6203 format %{ "CLR $dst\t! long" %} 6204 ins_encode( Set13( src, dst ) ); 6205 ins_pipe(ialu_imm); 6206 %} 6207 6208 instruct loadConL13( iRegL dst, immL13 src ) %{ 6209 match(Set dst src); 6210 ins_cost(DEFAULT_COST * 2); 6211 6212 size(4); 6213 format %{ "MOV $src,$dst\t! long" %} 6214 ins_encode( Set13( src, dst ) ); 6215 ins_pipe(ialu_imm); 6216 %} 6217 6218 instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 6219 match(Set dst con); 6220 effect(KILL tmp); 6221 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 6222 ins_encode %{ 6223 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6224 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 6225 %} 6226 ins_pipe(loadConFD); 6227 %} 6228 6229 instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 6230 match(Set dst con); 6231 effect(KILL tmp); 6232 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 6233 ins_encode %{ 6234 // XXX This is a quick fix for 6833573. 6235 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 6236 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6237 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 6238 %} 6239 ins_pipe(loadConFD); 6240 %} 6241 6242 // Prefetch instructions. 6243 // Must be safe to execute with invalid address (cannot fault). 6244 6245 instruct prefetchr( memory mem ) %{ 6246 match( PrefetchRead mem ); 6247 ins_cost(MEMORY_REF_COST); 6248 size(4); 6249 6250 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %} 6251 opcode(Assembler::prefetch_op3); 6252 ins_encode( form3_mem_prefetch_read( mem ) ); 6253 ins_pipe(iload_mem); 6254 %} 6255 6256 instruct prefetchw( memory mem ) %{ 6257 match( PrefetchWrite mem ); 6258 ins_cost(MEMORY_REF_COST); 6259 size(4); 6260 6261 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %} 6262 opcode(Assembler::prefetch_op3); 6263 ins_encode( form3_mem_prefetch_write( mem ) ); 6264 ins_pipe(iload_mem); 6265 %} 6266 6267 // Prefetch instructions for allocation. 6268 6269 instruct prefetchAlloc( memory mem ) %{ 6270 predicate(AllocatePrefetchInstr == 0); 6271 match( PrefetchAllocation mem ); 6272 ins_cost(MEMORY_REF_COST); 6273 size(4); 6274 6275 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %} 6276 opcode(Assembler::prefetch_op3); 6277 ins_encode( form3_mem_prefetch_write( mem ) ); 6278 ins_pipe(iload_mem); 6279 %} 6280 6281 // Use BIS instruction to prefetch for allocation. 6282 // Could fault, need space at the end of TLAB. 6283 instruct prefetchAlloc_bis( iRegP dst ) %{ 6284 predicate(AllocatePrefetchInstr == 1); 6285 match( PrefetchAllocation dst ); 6286 ins_cost(MEMORY_REF_COST); 6287 size(4); 6288 6289 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %} 6290 ins_encode %{ 6291 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 6292 %} 6293 ins_pipe(istore_mem_reg); 6294 %} 6295 6296 // Next code is used for finding next cache line address to prefetch. 6297 #ifndef _LP64 6298 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{ 6299 match(Set dst (CastX2P (AndI (CastP2X src) mask))); 6300 ins_cost(DEFAULT_COST); 6301 size(4); 6302 6303 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6304 ins_encode %{ 6305 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6306 %} 6307 ins_pipe(ialu_reg_imm); 6308 %} 6309 #else 6310 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ 6311 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 6312 ins_cost(DEFAULT_COST); 6313 size(4); 6314 6315 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6316 ins_encode %{ 6317 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6318 %} 6319 ins_pipe(ialu_reg_imm); 6320 %} 6321 #endif 6322 6323 //----------Store Instructions------------------------------------------------- 6324 // Store Byte 6325 instruct storeB(memory mem, iRegI src) %{ 6326 match(Set mem (StoreB mem src)); 6327 ins_cost(MEMORY_REF_COST); 6328 6329 size(4); 6330 format %{ "STB $src,$mem\t! byte" %} 6331 opcode(Assembler::stb_op3); 6332 ins_encode(simple_form3_mem_reg( mem, src ) ); 6333 ins_pipe(istore_mem_reg); 6334 %} 6335 6336 instruct storeB0(memory mem, immI0 src) %{ 6337 match(Set mem (StoreB mem src)); 6338 ins_cost(MEMORY_REF_COST); 6339 6340 size(4); 6341 format %{ "STB $src,$mem\t! byte" %} 6342 opcode(Assembler::stb_op3); 6343 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6344 ins_pipe(istore_mem_zero); 6345 %} 6346 6347 instruct storeCM0(memory mem, immI0 src) %{ 6348 match(Set mem (StoreCM mem src)); 6349 ins_cost(MEMORY_REF_COST); 6350 6351 size(4); 6352 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} 6353 opcode(Assembler::stb_op3); 6354 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6355 ins_pipe(istore_mem_zero); 6356 %} 6357 6358 // Store Char/Short 6359 instruct storeC(memory mem, iRegI src) %{ 6360 match(Set mem (StoreC mem src)); 6361 ins_cost(MEMORY_REF_COST); 6362 6363 size(4); 6364 format %{ "STH $src,$mem\t! short" %} 6365 opcode(Assembler::sth_op3); 6366 ins_encode(simple_form3_mem_reg( mem, src ) ); 6367 ins_pipe(istore_mem_reg); 6368 %} 6369 6370 instruct storeC0(memory mem, immI0 src) %{ 6371 match(Set mem (StoreC mem src)); 6372 ins_cost(MEMORY_REF_COST); 6373 6374 size(4); 6375 format %{ "STH $src,$mem\t! short" %} 6376 opcode(Assembler::sth_op3); 6377 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6378 ins_pipe(istore_mem_zero); 6379 %} 6380 6381 // Store Integer 6382 instruct storeI(memory mem, iRegI src) %{ 6383 match(Set mem (StoreI mem src)); 6384 ins_cost(MEMORY_REF_COST); 6385 6386 size(4); 6387 format %{ "STW $src,$mem" %} 6388 opcode(Assembler::stw_op3); 6389 ins_encode(simple_form3_mem_reg( mem, src ) ); 6390 ins_pipe(istore_mem_reg); 6391 %} 6392 6393 // Store Long 6394 instruct storeL(memory mem, iRegL src) %{ 6395 match(Set mem (StoreL mem src)); 6396 ins_cost(MEMORY_REF_COST); 6397 size(4); 6398 format %{ "STX $src,$mem\t! long" %} 6399 opcode(Assembler::stx_op3); 6400 ins_encode(simple_form3_mem_reg( mem, src ) ); 6401 ins_pipe(istore_mem_reg); 6402 %} 6403 6404 instruct storeI0(memory mem, immI0 src) %{ 6405 match(Set mem (StoreI mem src)); 6406 ins_cost(MEMORY_REF_COST); 6407 6408 size(4); 6409 format %{ "STW $src,$mem" %} 6410 opcode(Assembler::stw_op3); 6411 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6412 ins_pipe(istore_mem_zero); 6413 %} 6414 6415 instruct storeL0(memory mem, immL0 src) %{ 6416 match(Set mem (StoreL mem src)); 6417 ins_cost(MEMORY_REF_COST); 6418 6419 size(4); 6420 format %{ "STX $src,$mem" %} 6421 opcode(Assembler::stx_op3); 6422 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6423 ins_pipe(istore_mem_zero); 6424 %} 6425 6426 // Store Integer from float register (used after fstoi) 6427 instruct storeI_Freg(memory mem, regF src) %{ 6428 match(Set mem (StoreI mem src)); 6429 ins_cost(MEMORY_REF_COST); 6430 6431 size(4); 6432 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} 6433 opcode(Assembler::stf_op3); 6434 ins_encode(simple_form3_mem_reg( mem, src ) ); 6435 ins_pipe(fstoreF_mem_reg); 6436 %} 6437 6438 // Store Pointer 6439 instruct storeP(memory dst, sp_ptr_RegP src) %{ 6440 match(Set dst (StoreP dst src)); 6441 ins_cost(MEMORY_REF_COST); 6442 size(4); 6443 6444 #ifndef _LP64 6445 format %{ "STW $src,$dst\t! ptr" %} 6446 opcode(Assembler::stw_op3, 0, REGP_OP); 6447 #else 6448 format %{ "STX $src,$dst\t! ptr" %} 6449 opcode(Assembler::stx_op3, 0, REGP_OP); 6450 #endif 6451 ins_encode( form3_mem_reg( dst, src ) ); 6452 ins_pipe(istore_mem_spORreg); 6453 %} 6454 6455 instruct storeP0(memory dst, immP0 src) %{ 6456 match(Set dst (StoreP dst src)); 6457 ins_cost(MEMORY_REF_COST); 6458 size(4); 6459 6460 #ifndef _LP64 6461 format %{ "STW $src,$dst\t! ptr" %} 6462 opcode(Assembler::stw_op3, 0, REGP_OP); 6463 #else 6464 format %{ "STX $src,$dst\t! ptr" %} 6465 opcode(Assembler::stx_op3, 0, REGP_OP); 6466 #endif 6467 ins_encode( form3_mem_reg( dst, R_G0 ) ); 6468 ins_pipe(istore_mem_zero); 6469 %} 6470 6471 // Store Compressed Pointer 6472 instruct storeN(memory dst, iRegN src) %{ 6473 match(Set dst (StoreN dst src)); 6474 ins_cost(MEMORY_REF_COST); 6475 size(4); 6476 6477 format %{ "STW $src,$dst\t! compressed ptr" %} 6478 ins_encode %{ 6479 Register base = as_Register($dst$$base); 6480 Register index = as_Register($dst$$index); 6481 Register src = $src$$Register; 6482 if (index != G0) { 6483 __ stw(src, base, index); 6484 } else { 6485 __ stw(src, base, $dst$$disp); 6486 } 6487 %} 6488 ins_pipe(istore_mem_spORreg); 6489 %} 6490 6491 instruct storeNKlass(memory dst, iRegN src) %{ 6492 match(Set dst (StoreNKlass dst src)); 6493 ins_cost(MEMORY_REF_COST); 6494 size(4); 6495 6496 format %{ "STW $src,$dst\t! compressed klass ptr" %} 6497 ins_encode %{ 6498 Register base = as_Register($dst$$base); 6499 Register index = as_Register($dst$$index); 6500 Register src = $src$$Register; 6501 if (index != G0) { 6502 __ stw(src, base, index); 6503 } else { 6504 __ stw(src, base, $dst$$disp); 6505 } 6506 %} 6507 ins_pipe(istore_mem_spORreg); 6508 %} 6509 6510 instruct storeN0(memory dst, immN0 src) %{ 6511 match(Set dst (StoreN dst src)); 6512 ins_cost(MEMORY_REF_COST); 6513 size(4); 6514 6515 format %{ "STW $src,$dst\t! compressed ptr" %} 6516 ins_encode %{ 6517 Register base = as_Register($dst$$base); 6518 Register index = as_Register($dst$$index); 6519 if (index != G0) { 6520 __ stw(0, base, index); 6521 } else { 6522 __ stw(0, base, $dst$$disp); 6523 } 6524 %} 6525 ins_pipe(istore_mem_zero); 6526 %} 6527 6528 // Store Double 6529 instruct storeD( memory mem, regD src) %{ 6530 match(Set mem (StoreD mem src)); 6531 ins_cost(MEMORY_REF_COST); 6532 6533 size(4); 6534 format %{ "STDF $src,$mem" %} 6535 opcode(Assembler::stdf_op3); 6536 ins_encode(simple_form3_mem_reg( mem, src ) ); 6537 ins_pipe(fstoreD_mem_reg); 6538 %} 6539 6540 instruct storeD0( memory mem, immD0 src) %{ 6541 match(Set mem (StoreD mem src)); 6542 ins_cost(MEMORY_REF_COST); 6543 6544 size(4); 6545 format %{ "STX $src,$mem" %} 6546 opcode(Assembler::stx_op3); 6547 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6548 ins_pipe(fstoreD_mem_zero); 6549 %} 6550 6551 // Store Float 6552 instruct storeF( memory mem, regF src) %{ 6553 match(Set mem (StoreF mem src)); 6554 ins_cost(MEMORY_REF_COST); 6555 6556 size(4); 6557 format %{ "STF $src,$mem" %} 6558 opcode(Assembler::stf_op3); 6559 ins_encode(simple_form3_mem_reg( mem, src ) ); 6560 ins_pipe(fstoreF_mem_reg); 6561 %} 6562 6563 instruct storeF0( memory mem, immF0 src) %{ 6564 match(Set mem (StoreF mem src)); 6565 ins_cost(MEMORY_REF_COST); 6566 6567 size(4); 6568 format %{ "STW $src,$mem\t! storeF0" %} 6569 opcode(Assembler::stw_op3); 6570 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6571 ins_pipe(fstoreF_mem_zero); 6572 %} 6573 6574 // Convert oop pointer into compressed form 6575 instruct encodeHeapOop(iRegN dst, iRegP src) %{ 6576 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6577 match(Set dst (EncodeP src)); 6578 format %{ "encode_heap_oop $src, $dst" %} 6579 ins_encode %{ 6580 __ encode_heap_oop($src$$Register, $dst$$Register); 6581 %} 6582 ins_pipe(ialu_reg); 6583 %} 6584 6585 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ 6586 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6587 match(Set dst (EncodeP src)); 6588 format %{ "encode_heap_oop_not_null $src, $dst" %} 6589 ins_encode %{ 6590 __ encode_heap_oop_not_null($src$$Register, $dst$$Register); 6591 %} 6592 ins_pipe(ialu_reg); 6593 %} 6594 6595 instruct decodeHeapOop(iRegP dst, iRegN src) %{ 6596 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 6597 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 6598 match(Set dst (DecodeN src)); 6599 format %{ "decode_heap_oop $src, $dst" %} 6600 ins_encode %{ 6601 __ decode_heap_oop($src$$Register, $dst$$Register); 6602 %} 6603 ins_pipe(ialu_reg); 6604 %} 6605 6606 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ 6607 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 6608 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 6609 match(Set dst (DecodeN src)); 6610 format %{ "decode_heap_oop_not_null $src, $dst" %} 6611 ins_encode %{ 6612 __ decode_heap_oop_not_null($src$$Register, $dst$$Register); 6613 %} 6614 ins_pipe(ialu_reg); 6615 %} 6616 6617 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{ 6618 match(Set dst (EncodePKlass src)); 6619 format %{ "encode_klass_not_null $src, $dst" %} 6620 ins_encode %{ 6621 __ encode_klass_not_null($src$$Register, $dst$$Register); 6622 %} 6623 ins_pipe(ialu_reg); 6624 %} 6625 6626 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{ 6627 match(Set dst (DecodeNKlass src)); 6628 format %{ "decode_klass_not_null $src, $dst" %} 6629 ins_encode %{ 6630 __ decode_klass_not_null($src$$Register, $dst$$Register); 6631 %} 6632 ins_pipe(ialu_reg); 6633 %} 6634 6635 //----------MemBar Instructions----------------------------------------------- 6636 // Memory barrier flavors 6637 6638 instruct membar_acquire() %{ 6639 match(MemBarAcquire); 6640 ins_cost(4*MEMORY_REF_COST); 6641 6642 size(0); 6643 format %{ "MEMBAR-acquire" %} 6644 ins_encode( enc_membar_acquire ); 6645 ins_pipe(long_memory_op); 6646 %} 6647 6648 instruct membar_acquire_lock() %{ 6649 match(MemBarAcquireLock); 6650 ins_cost(0); 6651 6652 size(0); 6653 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %} 6654 ins_encode( ); 6655 ins_pipe(empty); 6656 %} 6657 6658 instruct membar_release() %{ 6659 match(MemBarRelease); 6660 ins_cost(4*MEMORY_REF_COST); 6661 6662 size(0); 6663 format %{ "MEMBAR-release" %} 6664 ins_encode( enc_membar_release ); 6665 ins_pipe(long_memory_op); 6666 %} 6667 6668 instruct membar_release_lock() %{ 6669 match(MemBarReleaseLock); 6670 ins_cost(0); 6671 6672 size(0); 6673 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %} 6674 ins_encode( ); 6675 ins_pipe(empty); 6676 %} 6677 6678 instruct membar_volatile() %{ 6679 match(MemBarVolatile); 6680 ins_cost(4*MEMORY_REF_COST); 6681 6682 size(4); 6683 format %{ "MEMBAR-volatile" %} 6684 ins_encode( enc_membar_volatile ); 6685 ins_pipe(long_memory_op); 6686 %} 6687 6688 instruct unnecessary_membar_volatile() %{ 6689 match(MemBarVolatile); 6690 predicate(Matcher::post_store_load_barrier(n)); 6691 ins_cost(0); 6692 6693 size(0); 6694 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %} 6695 ins_encode( ); 6696 ins_pipe(empty); 6697 %} 6698 6699 instruct membar_storestore() %{ 6700 match(MemBarStoreStore); 6701 ins_cost(0); 6702 6703 size(0); 6704 format %{ "!MEMBAR-storestore (empty encoding)" %} 6705 ins_encode( ); 6706 ins_pipe(empty); 6707 %} 6708 6709 //----------Register Move Instructions----------------------------------------- 6710 instruct roundDouble_nop(regD dst) %{ 6711 match(Set dst (RoundDouble dst)); 6712 ins_cost(0); 6713 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6714 ins_encode( ); 6715 ins_pipe(empty); 6716 %} 6717 6718 6719 instruct roundFloat_nop(regF dst) %{ 6720 match(Set dst (RoundFloat dst)); 6721 ins_cost(0); 6722 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6723 ins_encode( ); 6724 ins_pipe(empty); 6725 %} 6726 6727 6728 // Cast Index to Pointer for unsafe natives 6729 instruct castX2P(iRegX src, iRegP dst) %{ 6730 match(Set dst (CastX2P src)); 6731 6732 format %{ "MOV $src,$dst\t! IntX->Ptr" %} 6733 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6734 ins_pipe(ialu_reg); 6735 %} 6736 6737 // Cast Pointer to Index for unsafe natives 6738 instruct castP2X(iRegP src, iRegX dst) %{ 6739 match(Set dst (CastP2X src)); 6740 6741 format %{ "MOV $src,$dst\t! Ptr->IntX" %} 6742 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6743 ins_pipe(ialu_reg); 6744 %} 6745 6746 instruct stfSSD(stackSlotD stkSlot, regD src) %{ 6747 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6748 match(Set stkSlot src); // chain rule 6749 ins_cost(MEMORY_REF_COST); 6750 format %{ "STDF $src,$stkSlot\t!stk" %} 6751 opcode(Assembler::stdf_op3); 6752 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6753 ins_pipe(fstoreD_stk_reg); 6754 %} 6755 6756 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{ 6757 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6758 match(Set dst stkSlot); // chain rule 6759 ins_cost(MEMORY_REF_COST); 6760 format %{ "LDDF $stkSlot,$dst\t!stk" %} 6761 opcode(Assembler::lddf_op3); 6762 ins_encode(simple_form3_mem_reg(stkSlot, dst)); 6763 ins_pipe(floadD_stk); 6764 %} 6765 6766 instruct stfSSF(stackSlotF stkSlot, regF src) %{ 6767 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6768 match(Set stkSlot src); // chain rule 6769 ins_cost(MEMORY_REF_COST); 6770 format %{ "STF $src,$stkSlot\t!stk" %} 6771 opcode(Assembler::stf_op3); 6772 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6773 ins_pipe(fstoreF_stk_reg); 6774 %} 6775 6776 //----------Conditional Move--------------------------------------------------- 6777 // Conditional move 6778 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{ 6779 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6780 ins_cost(150); 6781 format %{ "MOV$cmp $pcc,$src,$dst" %} 6782 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6783 ins_pipe(ialu_reg); 6784 %} 6785 6786 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{ 6787 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6788 ins_cost(140); 6789 format %{ "MOV$cmp $pcc,$src,$dst" %} 6790 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6791 ins_pipe(ialu_imm); 6792 %} 6793 6794 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{ 6795 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6796 ins_cost(150); 6797 size(4); 6798 format %{ "MOV$cmp $icc,$src,$dst" %} 6799 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6800 ins_pipe(ialu_reg); 6801 %} 6802 6803 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{ 6804 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6805 ins_cost(140); 6806 size(4); 6807 format %{ "MOV$cmp $icc,$src,$dst" %} 6808 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6809 ins_pipe(ialu_imm); 6810 %} 6811 6812 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ 6813 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6814 ins_cost(150); 6815 size(4); 6816 format %{ "MOV$cmp $icc,$src,$dst" %} 6817 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6818 ins_pipe(ialu_reg); 6819 %} 6820 6821 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ 6822 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6823 ins_cost(140); 6824 size(4); 6825 format %{ "MOV$cmp $icc,$src,$dst" %} 6826 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6827 ins_pipe(ialu_imm); 6828 %} 6829 6830 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{ 6831 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6832 ins_cost(150); 6833 size(4); 6834 format %{ "MOV$cmp $fcc,$src,$dst" %} 6835 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6836 ins_pipe(ialu_reg); 6837 %} 6838 6839 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{ 6840 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6841 ins_cost(140); 6842 size(4); 6843 format %{ "MOV$cmp $fcc,$src,$dst" %} 6844 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6845 ins_pipe(ialu_imm); 6846 %} 6847 6848 // Conditional move for RegN. Only cmov(reg,reg). 6849 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ 6850 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); 6851 ins_cost(150); 6852 format %{ "MOV$cmp $pcc,$src,$dst" %} 6853 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6854 ins_pipe(ialu_reg); 6855 %} 6856 6857 // This instruction also works with CmpN so we don't need cmovNN_reg. 6858 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ 6859 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6860 ins_cost(150); 6861 size(4); 6862 format %{ "MOV$cmp $icc,$src,$dst" %} 6863 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6864 ins_pipe(ialu_reg); 6865 %} 6866 6867 // This instruction also works with CmpN so we don't need cmovNN_reg. 6868 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{ 6869 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6870 ins_cost(150); 6871 size(4); 6872 format %{ "MOV$cmp $icc,$src,$dst" %} 6873 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6874 ins_pipe(ialu_reg); 6875 %} 6876 6877 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ 6878 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); 6879 ins_cost(150); 6880 size(4); 6881 format %{ "MOV$cmp $fcc,$src,$dst" %} 6882 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6883 ins_pipe(ialu_reg); 6884 %} 6885 6886 // Conditional move 6887 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ 6888 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6889 ins_cost(150); 6890 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6891 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6892 ins_pipe(ialu_reg); 6893 %} 6894 6895 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{ 6896 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6897 ins_cost(140); 6898 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6899 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6900 ins_pipe(ialu_imm); 6901 %} 6902 6903 // This instruction also works with CmpN so we don't need cmovPN_reg. 6904 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ 6905 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6906 ins_cost(150); 6907 6908 size(4); 6909 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6910 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6911 ins_pipe(ialu_reg); 6912 %} 6913 6914 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{ 6915 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6916 ins_cost(150); 6917 6918 size(4); 6919 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6920 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6921 ins_pipe(ialu_reg); 6922 %} 6923 6924 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{ 6925 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6926 ins_cost(140); 6927 6928 size(4); 6929 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6930 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6931 ins_pipe(ialu_imm); 6932 %} 6933 6934 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{ 6935 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6936 ins_cost(140); 6937 6938 size(4); 6939 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6940 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6941 ins_pipe(ialu_imm); 6942 %} 6943 6944 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{ 6945 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6946 ins_cost(150); 6947 size(4); 6948 format %{ "MOV$cmp $fcc,$src,$dst" %} 6949 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6950 ins_pipe(ialu_imm); 6951 %} 6952 6953 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{ 6954 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6955 ins_cost(140); 6956 size(4); 6957 format %{ "MOV$cmp $fcc,$src,$dst" %} 6958 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6959 ins_pipe(ialu_imm); 6960 %} 6961 6962 // Conditional move 6963 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{ 6964 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src))); 6965 ins_cost(150); 6966 opcode(0x101); 6967 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 6968 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6969 ins_pipe(int_conditional_float_move); 6970 %} 6971 6972 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ 6973 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6974 ins_cost(150); 6975 6976 size(4); 6977 format %{ "FMOVS$cmp $icc,$src,$dst" %} 6978 opcode(0x101); 6979 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6980 ins_pipe(int_conditional_float_move); 6981 %} 6982 6983 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{ 6984 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6985 ins_cost(150); 6986 6987 size(4); 6988 format %{ "FMOVS$cmp $icc,$src,$dst" %} 6989 opcode(0x101); 6990 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6991 ins_pipe(int_conditional_float_move); 6992 %} 6993 6994 // Conditional move, 6995 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ 6996 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); 6997 ins_cost(150); 6998 size(4); 6999 format %{ "FMOVF$cmp $fcc,$src,$dst" %} 7000 opcode(0x1); 7001 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7002 ins_pipe(int_conditional_double_move); 7003 %} 7004 7005 // Conditional move 7006 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{ 7007 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src))); 7008 ins_cost(150); 7009 size(4); 7010 opcode(0x102); 7011 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7012 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7013 ins_pipe(int_conditional_double_move); 7014 %} 7015 7016 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{ 7017 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7018 ins_cost(150); 7019 7020 size(4); 7021 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7022 opcode(0x102); 7023 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7024 ins_pipe(int_conditional_double_move); 7025 %} 7026 7027 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{ 7028 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7029 ins_cost(150); 7030 7031 size(4); 7032 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7033 opcode(0x102); 7034 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7035 ins_pipe(int_conditional_double_move); 7036 %} 7037 7038 // Conditional move, 7039 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ 7040 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); 7041 ins_cost(150); 7042 size(4); 7043 format %{ "FMOVD$cmp $fcc,$src,$dst" %} 7044 opcode(0x2); 7045 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7046 ins_pipe(int_conditional_double_move); 7047 %} 7048 7049 // Conditional move 7050 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{ 7051 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7052 ins_cost(150); 7053 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7054 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7055 ins_pipe(ialu_reg); 7056 %} 7057 7058 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{ 7059 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7060 ins_cost(140); 7061 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7062 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 7063 ins_pipe(ialu_imm); 7064 %} 7065 7066 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{ 7067 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7068 ins_cost(150); 7069 7070 size(4); 7071 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7072 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7073 ins_pipe(ialu_reg); 7074 %} 7075 7076 7077 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{ 7078 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7079 ins_cost(150); 7080 7081 size(4); 7082 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7083 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7084 ins_pipe(ialu_reg); 7085 %} 7086 7087 7088 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{ 7089 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src))); 7090 ins_cost(150); 7091 7092 size(4); 7093 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %} 7094 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7095 ins_pipe(ialu_reg); 7096 %} 7097 7098 7099 7100 //----------OS and Locking Instructions---------------------------------------- 7101 7102 // This name is KNOWN by the ADLC and cannot be changed. 7103 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 7104 // for this guy. 7105 instruct tlsLoadP(g2RegP dst) %{ 7106 match(Set dst (ThreadLocal)); 7107 7108 size(0); 7109 ins_cost(0); 7110 format %{ "# TLS is in G2" %} 7111 ins_encode( /*empty encoding*/ ); 7112 ins_pipe(ialu_none); 7113 %} 7114 7115 instruct checkCastPP( iRegP dst ) %{ 7116 match(Set dst (CheckCastPP dst)); 7117 7118 size(0); 7119 format %{ "# checkcastPP of $dst" %} 7120 ins_encode( /*empty encoding*/ ); 7121 ins_pipe(empty); 7122 %} 7123 7124 7125 instruct castPP( iRegP dst ) %{ 7126 match(Set dst (CastPP dst)); 7127 format %{ "# castPP of $dst" %} 7128 ins_encode( /*empty encoding*/ ); 7129 ins_pipe(empty); 7130 %} 7131 7132 instruct castII( iRegI dst ) %{ 7133 match(Set dst (CastII dst)); 7134 format %{ "# castII of $dst" %} 7135 ins_encode( /*empty encoding*/ ); 7136 ins_cost(0); 7137 ins_pipe(empty); 7138 %} 7139 7140 //----------Arithmetic Instructions-------------------------------------------- 7141 // Addition Instructions 7142 // Register Addition 7143 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7144 match(Set dst (AddI src1 src2)); 7145 7146 size(4); 7147 format %{ "ADD $src1,$src2,$dst" %} 7148 ins_encode %{ 7149 __ add($src1$$Register, $src2$$Register, $dst$$Register); 7150 %} 7151 ins_pipe(ialu_reg_reg); 7152 %} 7153 7154 // Immediate Addition 7155 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7156 match(Set dst (AddI src1 src2)); 7157 7158 size(4); 7159 format %{ "ADD $src1,$src2,$dst" %} 7160 opcode(Assembler::add_op3, Assembler::arith_op); 7161 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7162 ins_pipe(ialu_reg_imm); 7163 %} 7164 7165 // Pointer Register Addition 7166 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{ 7167 match(Set dst (AddP src1 src2)); 7168 7169 size(4); 7170 format %{ "ADD $src1,$src2,$dst" %} 7171 opcode(Assembler::add_op3, Assembler::arith_op); 7172 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7173 ins_pipe(ialu_reg_reg); 7174 %} 7175 7176 // Pointer Immediate Addition 7177 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{ 7178 match(Set dst (AddP src1 src2)); 7179 7180 size(4); 7181 format %{ "ADD $src1,$src2,$dst" %} 7182 opcode(Assembler::add_op3, Assembler::arith_op); 7183 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7184 ins_pipe(ialu_reg_imm); 7185 %} 7186 7187 // Long Addition 7188 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7189 match(Set dst (AddL src1 src2)); 7190 7191 size(4); 7192 format %{ "ADD $src1,$src2,$dst\t! long" %} 7193 opcode(Assembler::add_op3, Assembler::arith_op); 7194 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7195 ins_pipe(ialu_reg_reg); 7196 %} 7197 7198 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7199 match(Set dst (AddL src1 con)); 7200 7201 size(4); 7202 format %{ "ADD $src1,$con,$dst" %} 7203 opcode(Assembler::add_op3, Assembler::arith_op); 7204 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7205 ins_pipe(ialu_reg_imm); 7206 %} 7207 7208 //----------Conditional_store-------------------------------------------------- 7209 // Conditional-store of the updated heap-top. 7210 // Used during allocation of the shared heap. 7211 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 7212 7213 // LoadP-locked. Same as a regular pointer load when used with a compare-swap 7214 instruct loadPLocked(iRegP dst, memory mem) %{ 7215 match(Set dst (LoadPLocked mem)); 7216 ins_cost(MEMORY_REF_COST); 7217 7218 #ifndef _LP64 7219 size(4); 7220 format %{ "LDUW $mem,$dst\t! ptr" %} 7221 opcode(Assembler::lduw_op3, 0, REGP_OP); 7222 #else 7223 format %{ "LDX $mem,$dst\t! ptr" %} 7224 opcode(Assembler::ldx_op3, 0, REGP_OP); 7225 #endif 7226 ins_encode( form3_mem_reg( mem, dst ) ); 7227 ins_pipe(iload_mem); 7228 %} 7229 7230 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ 7231 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); 7232 effect( KILL newval ); 7233 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" 7234 "CMP R_G3,$oldval\t\t! See if we made progress" %} 7235 ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); 7236 ins_pipe( long_memory_op ); 7237 %} 7238 7239 // Conditional-store of an int value. 7240 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ 7241 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); 7242 effect( KILL newval ); 7243 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7244 "CMP $oldval,$newval\t\t! See if we made progress" %} 7245 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7246 ins_pipe( long_memory_op ); 7247 %} 7248 7249 // Conditional-store of a long value. 7250 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ 7251 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); 7252 effect( KILL newval ); 7253 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7254 "CMP $oldval,$newval\t\t! See if we made progress" %} 7255 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7256 ins_pipe( long_memory_op ); 7257 %} 7258 7259 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 7260 7261 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7262 predicate(VM_Version::supports_cx8()); 7263 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 7264 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7265 format %{ 7266 "MOV $newval,O7\n\t" 7267 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7268 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7269 "MOV 1,$res\n\t" 7270 "MOVne xcc,R_G0,$res" 7271 %} 7272 ins_encode( enc_casx(mem_ptr, oldval, newval), 7273 enc_lflags_ne_to_boolean(res) ); 7274 ins_pipe( long_memory_op ); 7275 %} 7276 7277 7278 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7279 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 7280 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7281 format %{ 7282 "MOV $newval,O7\n\t" 7283 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7284 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7285 "MOV 1,$res\n\t" 7286 "MOVne icc,R_G0,$res" 7287 %} 7288 ins_encode( enc_casi(mem_ptr, oldval, newval), 7289 enc_iflags_ne_to_boolean(res) ); 7290 ins_pipe( long_memory_op ); 7291 %} 7292 7293 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7294 #ifdef _LP64 7295 predicate(VM_Version::supports_cx8()); 7296 #endif 7297 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 7298 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7299 format %{ 7300 "MOV $newval,O7\n\t" 7301 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7302 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7303 "MOV 1,$res\n\t" 7304 "MOVne xcc,R_G0,$res" 7305 %} 7306 #ifdef _LP64 7307 ins_encode( enc_casx(mem_ptr, oldval, newval), 7308 enc_lflags_ne_to_boolean(res) ); 7309 #else 7310 ins_encode( enc_casi(mem_ptr, oldval, newval), 7311 enc_iflags_ne_to_boolean(res) ); 7312 #endif 7313 ins_pipe( long_memory_op ); 7314 %} 7315 7316 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7317 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 7318 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7319 format %{ 7320 "MOV $newval,O7\n\t" 7321 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7322 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7323 "MOV 1,$res\n\t" 7324 "MOVne icc,R_G0,$res" 7325 %} 7326 ins_encode( enc_casi(mem_ptr, oldval, newval), 7327 enc_iflags_ne_to_boolean(res) ); 7328 ins_pipe( long_memory_op ); 7329 %} 7330 7331 instruct xchgI( memory mem, iRegI newval) %{ 7332 match(Set newval (GetAndSetI mem newval)); 7333 format %{ "SWAP [$mem],$newval" %} 7334 size(4); 7335 ins_encode %{ 7336 __ swap($mem$$Address, $newval$$Register); 7337 %} 7338 ins_pipe( long_memory_op ); 7339 %} 7340 7341 #ifndef _LP64 7342 instruct xchgP( memory mem, iRegP newval) %{ 7343 match(Set newval (GetAndSetP mem newval)); 7344 format %{ "SWAP [$mem],$newval" %} 7345 size(4); 7346 ins_encode %{ 7347 __ swap($mem$$Address, $newval$$Register); 7348 %} 7349 ins_pipe( long_memory_op ); 7350 %} 7351 #endif 7352 7353 instruct xchgN( memory mem, iRegN newval) %{ 7354 match(Set newval (GetAndSetN mem newval)); 7355 format %{ "SWAP [$mem],$newval" %} 7356 size(4); 7357 ins_encode %{ 7358 __ swap($mem$$Address, $newval$$Register); 7359 %} 7360 ins_pipe( long_memory_op ); 7361 %} 7362 7363 //--------------------- 7364 // Subtraction Instructions 7365 // Register Subtraction 7366 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7367 match(Set dst (SubI src1 src2)); 7368 7369 size(4); 7370 format %{ "SUB $src1,$src2,$dst" %} 7371 opcode(Assembler::sub_op3, Assembler::arith_op); 7372 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7373 ins_pipe(ialu_reg_reg); 7374 %} 7375 7376 // Immediate Subtraction 7377 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7378 match(Set dst (SubI src1 src2)); 7379 7380 size(4); 7381 format %{ "SUB $src1,$src2,$dst" %} 7382 opcode(Assembler::sub_op3, Assembler::arith_op); 7383 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7384 ins_pipe(ialu_reg_imm); 7385 %} 7386 7387 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 7388 match(Set dst (SubI zero src2)); 7389 7390 size(4); 7391 format %{ "NEG $src2,$dst" %} 7392 opcode(Assembler::sub_op3, Assembler::arith_op); 7393 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7394 ins_pipe(ialu_zero_reg); 7395 %} 7396 7397 // Long subtraction 7398 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7399 match(Set dst (SubL src1 src2)); 7400 7401 size(4); 7402 format %{ "SUB $src1,$src2,$dst\t! long" %} 7403 opcode(Assembler::sub_op3, Assembler::arith_op); 7404 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7405 ins_pipe(ialu_reg_reg); 7406 %} 7407 7408 // Immediate Subtraction 7409 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7410 match(Set dst (SubL src1 con)); 7411 7412 size(4); 7413 format %{ "SUB $src1,$con,$dst\t! long" %} 7414 opcode(Assembler::sub_op3, Assembler::arith_op); 7415 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7416 ins_pipe(ialu_reg_imm); 7417 %} 7418 7419 // Long negation 7420 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{ 7421 match(Set dst (SubL zero src2)); 7422 7423 size(4); 7424 format %{ "NEG $src2,$dst\t! long" %} 7425 opcode(Assembler::sub_op3, Assembler::arith_op); 7426 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7427 ins_pipe(ialu_zero_reg); 7428 %} 7429 7430 // Multiplication Instructions 7431 // Integer Multiplication 7432 // Register Multiplication 7433 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7434 match(Set dst (MulI src1 src2)); 7435 7436 size(4); 7437 format %{ "MULX $src1,$src2,$dst" %} 7438 opcode(Assembler::mulx_op3, Assembler::arith_op); 7439 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7440 ins_pipe(imul_reg_reg); 7441 %} 7442 7443 // Immediate Multiplication 7444 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7445 match(Set dst (MulI src1 src2)); 7446 7447 size(4); 7448 format %{ "MULX $src1,$src2,$dst" %} 7449 opcode(Assembler::mulx_op3, Assembler::arith_op); 7450 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7451 ins_pipe(imul_reg_imm); 7452 %} 7453 7454 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7455 match(Set dst (MulL src1 src2)); 7456 ins_cost(DEFAULT_COST * 5); 7457 size(4); 7458 format %{ "MULX $src1,$src2,$dst\t! long" %} 7459 opcode(Assembler::mulx_op3, Assembler::arith_op); 7460 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7461 ins_pipe(mulL_reg_reg); 7462 %} 7463 7464 // Immediate Multiplication 7465 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7466 match(Set dst (MulL src1 src2)); 7467 ins_cost(DEFAULT_COST * 5); 7468 size(4); 7469 format %{ "MULX $src1,$src2,$dst" %} 7470 opcode(Assembler::mulx_op3, Assembler::arith_op); 7471 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7472 ins_pipe(mulL_reg_imm); 7473 %} 7474 7475 // Integer Division 7476 // Register Division 7477 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{ 7478 match(Set dst (DivI src1 src2)); 7479 ins_cost((2+71)*DEFAULT_COST); 7480 7481 format %{ "SRA $src2,0,$src2\n\t" 7482 "SRA $src1,0,$src1\n\t" 7483 "SDIVX $src1,$src2,$dst" %} 7484 ins_encode( idiv_reg( src1, src2, dst ) ); 7485 ins_pipe(sdiv_reg_reg); 7486 %} 7487 7488 // Immediate Division 7489 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{ 7490 match(Set dst (DivI src1 src2)); 7491 ins_cost((2+71)*DEFAULT_COST); 7492 7493 format %{ "SRA $src1,0,$src1\n\t" 7494 "SDIVX $src1,$src2,$dst" %} 7495 ins_encode( idiv_imm( src1, src2, dst ) ); 7496 ins_pipe(sdiv_reg_imm); 7497 %} 7498 7499 //----------Div-By-10-Expansion------------------------------------------------ 7500 // Extract hi bits of a 32x32->64 bit multiply. 7501 // Expand rule only, not matched 7502 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{ 7503 effect( DEF dst, USE src1, USE src2 ); 7504 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t" 7505 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %} 7506 ins_encode( enc_mul_hi(dst,src1,src2)); 7507 ins_pipe(sdiv_reg_reg); 7508 %} 7509 7510 // Magic constant, reciprocal of 10 7511 instruct loadConI_x66666667(iRegIsafe dst) %{ 7512 effect( DEF dst ); 7513 7514 size(8); 7515 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %} 7516 ins_encode( Set32(0x66666667, dst) ); 7517 ins_pipe(ialu_hi_lo_reg); 7518 %} 7519 7520 // Register Shift Right Arithmetic Long by 32-63 7521 instruct sra_31( iRegI dst, iRegI src ) %{ 7522 effect( DEF dst, USE src ); 7523 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 7524 ins_encode( form3_rs1_rd_copysign_hi(src,dst) ); 7525 ins_pipe(ialu_reg_reg); 7526 %} 7527 7528 // Arithmetic Shift Right by 8-bit immediate 7529 instruct sra_reg_2( iRegI dst, iRegI src ) %{ 7530 effect( DEF dst, USE src ); 7531 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} 7532 opcode(Assembler::sra_op3, Assembler::arith_op); 7533 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); 7534 ins_pipe(ialu_reg_imm); 7535 %} 7536 7537 // Integer DIV with 10 7538 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{ 7539 match(Set dst (DivI src div)); 7540 ins_cost((6+6)*DEFAULT_COST); 7541 expand %{ 7542 iRegIsafe tmp1; // Killed temps; 7543 iRegIsafe tmp2; // Killed temps; 7544 iRegI tmp3; // Killed temps; 7545 iRegI tmp4; // Killed temps; 7546 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1 7547 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2 7548 sra_31( tmp3, src ); // SRA src,31 -> tmp3 7549 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4 7550 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst 7551 %} 7552 %} 7553 7554 // Register Long Division 7555 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7556 match(Set dst (DivL src1 src2)); 7557 ins_cost(DEFAULT_COST*71); 7558 size(4); 7559 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7560 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7561 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7562 ins_pipe(divL_reg_reg); 7563 %} 7564 7565 // Register Long Division 7566 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7567 match(Set dst (DivL src1 src2)); 7568 ins_cost(DEFAULT_COST*71); 7569 size(4); 7570 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7571 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7572 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7573 ins_pipe(divL_reg_imm); 7574 %} 7575 7576 // Integer Remainder 7577 // Register Remainder 7578 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{ 7579 match(Set dst (ModI src1 src2)); 7580 effect( KILL ccr, KILL temp); 7581 7582 format %{ "SREM $src1,$src2,$dst" %} 7583 ins_encode( irem_reg(src1, src2, dst, temp) ); 7584 ins_pipe(sdiv_reg_reg); 7585 %} 7586 7587 // Immediate Remainder 7588 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ 7589 match(Set dst (ModI src1 src2)); 7590 effect( KILL ccr, KILL temp); 7591 7592 format %{ "SREM $src1,$src2,$dst" %} 7593 ins_encode( irem_imm(src1, src2, dst, temp) ); 7594 ins_pipe(sdiv_reg_imm); 7595 %} 7596 7597 // Register Long Remainder 7598 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7599 effect(DEF dst, USE src1, USE src2); 7600 size(4); 7601 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7602 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7603 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7604 ins_pipe(divL_reg_reg); 7605 %} 7606 7607 // Register Long Division 7608 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7609 effect(DEF dst, USE src1, USE src2); 7610 size(4); 7611 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7612 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7613 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7614 ins_pipe(divL_reg_imm); 7615 %} 7616 7617 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7618 effect(DEF dst, USE src1, USE src2); 7619 size(4); 7620 format %{ "MULX $src1,$src2,$dst\t! long" %} 7621 opcode(Assembler::mulx_op3, Assembler::arith_op); 7622 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7623 ins_pipe(mulL_reg_reg); 7624 %} 7625 7626 // Immediate Multiplication 7627 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7628 effect(DEF dst, USE src1, USE src2); 7629 size(4); 7630 format %{ "MULX $src1,$src2,$dst" %} 7631 opcode(Assembler::mulx_op3, Assembler::arith_op); 7632 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7633 ins_pipe(mulL_reg_imm); 7634 %} 7635 7636 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7637 effect(DEF dst, USE src1, USE src2); 7638 size(4); 7639 format %{ "SUB $src1,$src2,$dst\t! long" %} 7640 opcode(Assembler::sub_op3, Assembler::arith_op); 7641 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7642 ins_pipe(ialu_reg_reg); 7643 %} 7644 7645 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 7646 effect(DEF dst, USE src1, USE src2); 7647 size(4); 7648 format %{ "SUB $src1,$src2,$dst\t! long" %} 7649 opcode(Assembler::sub_op3, Assembler::arith_op); 7650 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7651 ins_pipe(ialu_reg_reg); 7652 %} 7653 7654 // Register Long Remainder 7655 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7656 match(Set dst (ModL src1 src2)); 7657 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7658 expand %{ 7659 iRegL tmp1; 7660 iRegL tmp2; 7661 divL_reg_reg_1(tmp1, src1, src2); 7662 mulL_reg_reg_1(tmp2, tmp1, src2); 7663 subL_reg_reg_1(dst, src1, tmp2); 7664 %} 7665 %} 7666 7667 // Register Long Remainder 7668 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7669 match(Set dst (ModL src1 src2)); 7670 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7671 expand %{ 7672 iRegL tmp1; 7673 iRegL tmp2; 7674 divL_reg_imm13_1(tmp1, src1, src2); 7675 mulL_reg_imm13_1(tmp2, tmp1, src2); 7676 subL_reg_reg_2 (dst, src1, tmp2); 7677 %} 7678 %} 7679 7680 // Integer Shift Instructions 7681 // Register Shift Left 7682 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7683 match(Set dst (LShiftI src1 src2)); 7684 7685 size(4); 7686 format %{ "SLL $src1,$src2,$dst" %} 7687 opcode(Assembler::sll_op3, Assembler::arith_op); 7688 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7689 ins_pipe(ialu_reg_reg); 7690 %} 7691 7692 // Register Shift Left Immediate 7693 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7694 match(Set dst (LShiftI src1 src2)); 7695 7696 size(4); 7697 format %{ "SLL $src1,$src2,$dst" %} 7698 opcode(Assembler::sll_op3, Assembler::arith_op); 7699 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7700 ins_pipe(ialu_reg_imm); 7701 %} 7702 7703 // Register Shift Left 7704 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7705 match(Set dst (LShiftL src1 src2)); 7706 7707 size(4); 7708 format %{ "SLLX $src1,$src2,$dst" %} 7709 opcode(Assembler::sllx_op3, Assembler::arith_op); 7710 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7711 ins_pipe(ialu_reg_reg); 7712 %} 7713 7714 // Register Shift Left Immediate 7715 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7716 match(Set dst (LShiftL src1 src2)); 7717 7718 size(4); 7719 format %{ "SLLX $src1,$src2,$dst" %} 7720 opcode(Assembler::sllx_op3, Assembler::arith_op); 7721 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7722 ins_pipe(ialu_reg_imm); 7723 %} 7724 7725 // Register Arithmetic Shift Right 7726 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7727 match(Set dst (RShiftI src1 src2)); 7728 size(4); 7729 format %{ "SRA $src1,$src2,$dst" %} 7730 opcode(Assembler::sra_op3, Assembler::arith_op); 7731 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7732 ins_pipe(ialu_reg_reg); 7733 %} 7734 7735 // Register Arithmetic Shift Right Immediate 7736 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7737 match(Set dst (RShiftI src1 src2)); 7738 7739 size(4); 7740 format %{ "SRA $src1,$src2,$dst" %} 7741 opcode(Assembler::sra_op3, Assembler::arith_op); 7742 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7743 ins_pipe(ialu_reg_imm); 7744 %} 7745 7746 // Register Shift Right Arithmatic Long 7747 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7748 match(Set dst (RShiftL src1 src2)); 7749 7750 size(4); 7751 format %{ "SRAX $src1,$src2,$dst" %} 7752 opcode(Assembler::srax_op3, Assembler::arith_op); 7753 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7754 ins_pipe(ialu_reg_reg); 7755 %} 7756 7757 // Register Shift Left Immediate 7758 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7759 match(Set dst (RShiftL src1 src2)); 7760 7761 size(4); 7762 format %{ "SRAX $src1,$src2,$dst" %} 7763 opcode(Assembler::srax_op3, Assembler::arith_op); 7764 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7765 ins_pipe(ialu_reg_imm); 7766 %} 7767 7768 // Register Shift Right 7769 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7770 match(Set dst (URShiftI src1 src2)); 7771 7772 size(4); 7773 format %{ "SRL $src1,$src2,$dst" %} 7774 opcode(Assembler::srl_op3, Assembler::arith_op); 7775 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7776 ins_pipe(ialu_reg_reg); 7777 %} 7778 7779 // Register Shift Right Immediate 7780 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7781 match(Set dst (URShiftI src1 src2)); 7782 7783 size(4); 7784 format %{ "SRL $src1,$src2,$dst" %} 7785 opcode(Assembler::srl_op3, Assembler::arith_op); 7786 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7787 ins_pipe(ialu_reg_imm); 7788 %} 7789 7790 // Register Shift Right 7791 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7792 match(Set dst (URShiftL src1 src2)); 7793 7794 size(4); 7795 format %{ "SRLX $src1,$src2,$dst" %} 7796 opcode(Assembler::srlx_op3, Assembler::arith_op); 7797 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7798 ins_pipe(ialu_reg_reg); 7799 %} 7800 7801 // Register Shift Right Immediate 7802 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7803 match(Set dst (URShiftL src1 src2)); 7804 7805 size(4); 7806 format %{ "SRLX $src1,$src2,$dst" %} 7807 opcode(Assembler::srlx_op3, Assembler::arith_op); 7808 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7809 ins_pipe(ialu_reg_imm); 7810 %} 7811 7812 // Register Shift Right Immediate with a CastP2X 7813 #ifdef _LP64 7814 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ 7815 match(Set dst (URShiftL (CastP2X src1) src2)); 7816 size(4); 7817 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} 7818 opcode(Assembler::srlx_op3, Assembler::arith_op); 7819 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7820 ins_pipe(ialu_reg_imm); 7821 %} 7822 #else 7823 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{ 7824 match(Set dst (URShiftI (CastP2X src1) src2)); 7825 size(4); 7826 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %} 7827 opcode(Assembler::srl_op3, Assembler::arith_op); 7828 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7829 ins_pipe(ialu_reg_imm); 7830 %} 7831 #endif 7832 7833 7834 //----------Floating Point Arithmetic Instructions----------------------------- 7835 7836 // Add float single precision 7837 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 7838 match(Set dst (AddF src1 src2)); 7839 7840 size(4); 7841 format %{ "FADDS $src1,$src2,$dst" %} 7842 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf); 7843 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7844 ins_pipe(faddF_reg_reg); 7845 %} 7846 7847 // Add float double precision 7848 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 7849 match(Set dst (AddD src1 src2)); 7850 7851 size(4); 7852 format %{ "FADDD $src1,$src2,$dst" %} 7853 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 7854 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7855 ins_pipe(faddD_reg_reg); 7856 %} 7857 7858 // Sub float single precision 7859 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 7860 match(Set dst (SubF src1 src2)); 7861 7862 size(4); 7863 format %{ "FSUBS $src1,$src2,$dst" %} 7864 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf); 7865 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7866 ins_pipe(faddF_reg_reg); 7867 %} 7868 7869 // Sub float double precision 7870 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 7871 match(Set dst (SubD src1 src2)); 7872 7873 size(4); 7874 format %{ "FSUBD $src1,$src2,$dst" %} 7875 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 7876 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7877 ins_pipe(faddD_reg_reg); 7878 %} 7879 7880 // Mul float single precision 7881 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 7882 match(Set dst (MulF src1 src2)); 7883 7884 size(4); 7885 format %{ "FMULS $src1,$src2,$dst" %} 7886 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf); 7887 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7888 ins_pipe(fmulF_reg_reg); 7889 %} 7890 7891 // Mul float double precision 7892 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 7893 match(Set dst (MulD src1 src2)); 7894 7895 size(4); 7896 format %{ "FMULD $src1,$src2,$dst" %} 7897 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 7898 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7899 ins_pipe(fmulD_reg_reg); 7900 %} 7901 7902 // Div float single precision 7903 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 7904 match(Set dst (DivF src1 src2)); 7905 7906 size(4); 7907 format %{ "FDIVS $src1,$src2,$dst" %} 7908 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf); 7909 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7910 ins_pipe(fdivF_reg_reg); 7911 %} 7912 7913 // Div float double precision 7914 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 7915 match(Set dst (DivD src1 src2)); 7916 7917 size(4); 7918 format %{ "FDIVD $src1,$src2,$dst" %} 7919 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf); 7920 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7921 ins_pipe(fdivD_reg_reg); 7922 %} 7923 7924 // Absolute float double precision 7925 instruct absD_reg(regD dst, regD src) %{ 7926 match(Set dst (AbsD src)); 7927 7928 format %{ "FABSd $src,$dst" %} 7929 ins_encode(fabsd(dst, src)); 7930 ins_pipe(faddD_reg); 7931 %} 7932 7933 // Absolute float single precision 7934 instruct absF_reg(regF dst, regF src) %{ 7935 match(Set dst (AbsF src)); 7936 7937 format %{ "FABSs $src,$dst" %} 7938 ins_encode(fabss(dst, src)); 7939 ins_pipe(faddF_reg); 7940 %} 7941 7942 instruct negF_reg(regF dst, regF src) %{ 7943 match(Set dst (NegF src)); 7944 7945 size(4); 7946 format %{ "FNEGs $src,$dst" %} 7947 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); 7948 ins_encode(form3_opf_rs2F_rdF(src, dst)); 7949 ins_pipe(faddF_reg); 7950 %} 7951 7952 instruct negD_reg(regD dst, regD src) %{ 7953 match(Set dst (NegD src)); 7954 7955 format %{ "FNEGd $src,$dst" %} 7956 ins_encode(fnegd(dst, src)); 7957 ins_pipe(faddD_reg); 7958 %} 7959 7960 // Sqrt float double precision 7961 instruct sqrtF_reg_reg(regF dst, regF src) %{ 7962 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 7963 7964 size(4); 7965 format %{ "FSQRTS $src,$dst" %} 7966 ins_encode(fsqrts(dst, src)); 7967 ins_pipe(fdivF_reg_reg); 7968 %} 7969 7970 // Sqrt float double precision 7971 instruct sqrtD_reg_reg(regD dst, regD src) %{ 7972 match(Set dst (SqrtD src)); 7973 7974 size(4); 7975 format %{ "FSQRTD $src,$dst" %} 7976 ins_encode(fsqrtd(dst, src)); 7977 ins_pipe(fdivD_reg_reg); 7978 %} 7979 7980 //----------Logical Instructions----------------------------------------------- 7981 // And Instructions 7982 // Register And 7983 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7984 match(Set dst (AndI src1 src2)); 7985 7986 size(4); 7987 format %{ "AND $src1,$src2,$dst" %} 7988 opcode(Assembler::and_op3, Assembler::arith_op); 7989 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7990 ins_pipe(ialu_reg_reg); 7991 %} 7992 7993 // Immediate And 7994 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7995 match(Set dst (AndI src1 src2)); 7996 7997 size(4); 7998 format %{ "AND $src1,$src2,$dst" %} 7999 opcode(Assembler::and_op3, Assembler::arith_op); 8000 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8001 ins_pipe(ialu_reg_imm); 8002 %} 8003 8004 // Register And Long 8005 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8006 match(Set dst (AndL src1 src2)); 8007 8008 ins_cost(DEFAULT_COST); 8009 size(4); 8010 format %{ "AND $src1,$src2,$dst\t! long" %} 8011 opcode(Assembler::and_op3, Assembler::arith_op); 8012 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8013 ins_pipe(ialu_reg_reg); 8014 %} 8015 8016 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8017 match(Set dst (AndL src1 con)); 8018 8019 ins_cost(DEFAULT_COST); 8020 size(4); 8021 format %{ "AND $src1,$con,$dst\t! long" %} 8022 opcode(Assembler::and_op3, Assembler::arith_op); 8023 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8024 ins_pipe(ialu_reg_imm); 8025 %} 8026 8027 // Or Instructions 8028 // Register Or 8029 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8030 match(Set dst (OrI src1 src2)); 8031 8032 size(4); 8033 format %{ "OR $src1,$src2,$dst" %} 8034 opcode(Assembler::or_op3, Assembler::arith_op); 8035 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8036 ins_pipe(ialu_reg_reg); 8037 %} 8038 8039 // Immediate Or 8040 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8041 match(Set dst (OrI src1 src2)); 8042 8043 size(4); 8044 format %{ "OR $src1,$src2,$dst" %} 8045 opcode(Assembler::or_op3, Assembler::arith_op); 8046 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8047 ins_pipe(ialu_reg_imm); 8048 %} 8049 8050 // Register Or Long 8051 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8052 match(Set dst (OrL src1 src2)); 8053 8054 ins_cost(DEFAULT_COST); 8055 size(4); 8056 format %{ "OR $src1,$src2,$dst\t! long" %} 8057 opcode(Assembler::or_op3, Assembler::arith_op); 8058 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8059 ins_pipe(ialu_reg_reg); 8060 %} 8061 8062 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8063 match(Set dst (OrL src1 con)); 8064 ins_cost(DEFAULT_COST*2); 8065 8066 ins_cost(DEFAULT_COST); 8067 size(4); 8068 format %{ "OR $src1,$con,$dst\t! long" %} 8069 opcode(Assembler::or_op3, Assembler::arith_op); 8070 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8071 ins_pipe(ialu_reg_imm); 8072 %} 8073 8074 #ifndef _LP64 8075 8076 // Use sp_ptr_RegP to match G2 (TLS register) without spilling. 8077 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{ 8078 match(Set dst (OrI src1 (CastP2X src2))); 8079 8080 size(4); 8081 format %{ "OR $src1,$src2,$dst" %} 8082 opcode(Assembler::or_op3, Assembler::arith_op); 8083 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8084 ins_pipe(ialu_reg_reg); 8085 %} 8086 8087 #else 8088 8089 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ 8090 match(Set dst (OrL src1 (CastP2X src2))); 8091 8092 ins_cost(DEFAULT_COST); 8093 size(4); 8094 format %{ "OR $src1,$src2,$dst\t! long" %} 8095 opcode(Assembler::or_op3, Assembler::arith_op); 8096 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8097 ins_pipe(ialu_reg_reg); 8098 %} 8099 8100 #endif 8101 8102 // Xor Instructions 8103 // Register Xor 8104 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8105 match(Set dst (XorI src1 src2)); 8106 8107 size(4); 8108 format %{ "XOR $src1,$src2,$dst" %} 8109 opcode(Assembler::xor_op3, Assembler::arith_op); 8110 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8111 ins_pipe(ialu_reg_reg); 8112 %} 8113 8114 // Immediate Xor 8115 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8116 match(Set dst (XorI src1 src2)); 8117 8118 size(4); 8119 format %{ "XOR $src1,$src2,$dst" %} 8120 opcode(Assembler::xor_op3, Assembler::arith_op); 8121 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8122 ins_pipe(ialu_reg_imm); 8123 %} 8124 8125 // Register Xor Long 8126 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8127 match(Set dst (XorL src1 src2)); 8128 8129 ins_cost(DEFAULT_COST); 8130 size(4); 8131 format %{ "XOR $src1,$src2,$dst\t! long" %} 8132 opcode(Assembler::xor_op3, Assembler::arith_op); 8133 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8134 ins_pipe(ialu_reg_reg); 8135 %} 8136 8137 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8138 match(Set dst (XorL src1 con)); 8139 8140 ins_cost(DEFAULT_COST); 8141 size(4); 8142 format %{ "XOR $src1,$con,$dst\t! long" %} 8143 opcode(Assembler::xor_op3, Assembler::arith_op); 8144 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8145 ins_pipe(ialu_reg_imm); 8146 %} 8147 8148 //----------Convert to Boolean------------------------------------------------- 8149 // Nice hack for 32-bit tests but doesn't work for 8150 // 64-bit pointers. 8151 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{ 8152 match(Set dst (Conv2B src)); 8153 effect( KILL ccr ); 8154 ins_cost(DEFAULT_COST*2); 8155 format %{ "CMP R_G0,$src\n\t" 8156 "ADDX R_G0,0,$dst" %} 8157 ins_encode( enc_to_bool( src, dst ) ); 8158 ins_pipe(ialu_reg_ialu); 8159 %} 8160 8161 #ifndef _LP64 8162 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{ 8163 match(Set dst (Conv2B src)); 8164 effect( KILL ccr ); 8165 ins_cost(DEFAULT_COST*2); 8166 format %{ "CMP R_G0,$src\n\t" 8167 "ADDX R_G0,0,$dst" %} 8168 ins_encode( enc_to_bool( src, dst ) ); 8169 ins_pipe(ialu_reg_ialu); 8170 %} 8171 #else 8172 instruct convP2B( iRegI dst, iRegP src ) %{ 8173 match(Set dst (Conv2B src)); 8174 ins_cost(DEFAULT_COST*2); 8175 format %{ "MOV $src,$dst\n\t" 8176 "MOVRNZ $src,1,$dst" %} 8177 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); 8178 ins_pipe(ialu_clr_and_mover); 8179 %} 8180 #endif 8181 8182 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ 8183 match(Set dst (CmpLTMask src zero)); 8184 effect(KILL ccr); 8185 size(4); 8186 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %} 8187 ins_encode %{ 8188 __ sra($src$$Register, 31, $dst$$Register); 8189 %} 8190 ins_pipe(ialu_reg_imm); 8191 %} 8192 8193 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{ 8194 match(Set dst (CmpLTMask p q)); 8195 effect( KILL ccr ); 8196 ins_cost(DEFAULT_COST*4); 8197 format %{ "CMP $p,$q\n\t" 8198 "MOV #0,$dst\n\t" 8199 "BLT,a .+8\n\t" 8200 "MOV #-1,$dst" %} 8201 ins_encode( enc_ltmask(p,q,dst) ); 8202 ins_pipe(ialu_reg_reg_ialu); 8203 %} 8204 8205 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ 8206 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 8207 effect(KILL ccr, TEMP tmp); 8208 ins_cost(DEFAULT_COST*3); 8209 8210 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" 8211 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" 8212 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} 8213 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); 8214 ins_pipe(cadd_cmpltmask); 8215 %} 8216 8217 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ 8218 match(Set p (AndI (CmpLTMask p q) y)); 8219 effect(KILL ccr); 8220 ins_cost(DEFAULT_COST*3); 8221 8222 format %{ "CMP $p,$q\n\t" 8223 "MOV $y,$p\n\t" 8224 "MOVge G0,$p" %} 8225 ins_encode %{ 8226 __ cmp($p$$Register, $q$$Register); 8227 __ mov($y$$Register, $p$$Register); 8228 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); 8229 %} 8230 ins_pipe(ialu_reg_reg_ialu); 8231 %} 8232 8233 //----------------------------------------------------------------- 8234 // Direct raw moves between float and general registers using VIS3. 8235 8236 // ins_pipe(faddF_reg); 8237 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{ 8238 predicate(UseVIS >= 3); 8239 match(Set dst (MoveF2I src)); 8240 8241 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %} 8242 ins_encode %{ 8243 __ movstouw($src$$FloatRegister, $dst$$Register); 8244 %} 8245 ins_pipe(ialu_reg_reg); 8246 %} 8247 8248 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{ 8249 predicate(UseVIS >= 3); 8250 match(Set dst (MoveI2F src)); 8251 8252 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %} 8253 ins_encode %{ 8254 __ movwtos($src$$Register, $dst$$FloatRegister); 8255 %} 8256 ins_pipe(ialu_reg_reg); 8257 %} 8258 8259 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{ 8260 predicate(UseVIS >= 3); 8261 match(Set dst (MoveD2L src)); 8262 8263 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %} 8264 ins_encode %{ 8265 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register); 8266 %} 8267 ins_pipe(ialu_reg_reg); 8268 %} 8269 8270 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{ 8271 predicate(UseVIS >= 3); 8272 match(Set dst (MoveL2D src)); 8273 8274 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %} 8275 ins_encode %{ 8276 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg)); 8277 %} 8278 ins_pipe(ialu_reg_reg); 8279 %} 8280 8281 8282 // Raw moves between float and general registers using stack. 8283 8284 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ 8285 match(Set dst (MoveF2I src)); 8286 effect(DEF dst, USE src); 8287 ins_cost(MEMORY_REF_COST); 8288 8289 size(4); 8290 format %{ "LDUW $src,$dst\t! MoveF2I" %} 8291 opcode(Assembler::lduw_op3); 8292 ins_encode(simple_form3_mem_reg( src, dst ) ); 8293 ins_pipe(iload_mem); 8294 %} 8295 8296 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 8297 match(Set dst (MoveI2F src)); 8298 effect(DEF dst, USE src); 8299 ins_cost(MEMORY_REF_COST); 8300 8301 size(4); 8302 format %{ "LDF $src,$dst\t! MoveI2F" %} 8303 opcode(Assembler::ldf_op3); 8304 ins_encode(simple_form3_mem_reg(src, dst)); 8305 ins_pipe(floadF_stk); 8306 %} 8307 8308 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{ 8309 match(Set dst (MoveD2L src)); 8310 effect(DEF dst, USE src); 8311 ins_cost(MEMORY_REF_COST); 8312 8313 size(4); 8314 format %{ "LDX $src,$dst\t! MoveD2L" %} 8315 opcode(Assembler::ldx_op3); 8316 ins_encode(simple_form3_mem_reg( src, dst ) ); 8317 ins_pipe(iload_mem); 8318 %} 8319 8320 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 8321 match(Set dst (MoveL2D src)); 8322 effect(DEF dst, USE src); 8323 ins_cost(MEMORY_REF_COST); 8324 8325 size(4); 8326 format %{ "LDDF $src,$dst\t! MoveL2D" %} 8327 opcode(Assembler::lddf_op3); 8328 ins_encode(simple_form3_mem_reg(src, dst)); 8329 ins_pipe(floadD_stk); 8330 %} 8331 8332 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 8333 match(Set dst (MoveF2I src)); 8334 effect(DEF dst, USE src); 8335 ins_cost(MEMORY_REF_COST); 8336 8337 size(4); 8338 format %{ "STF $src,$dst\t! MoveF2I" %} 8339 opcode(Assembler::stf_op3); 8340 ins_encode(simple_form3_mem_reg(dst, src)); 8341 ins_pipe(fstoreF_stk_reg); 8342 %} 8343 8344 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ 8345 match(Set dst (MoveI2F src)); 8346 effect(DEF dst, USE src); 8347 ins_cost(MEMORY_REF_COST); 8348 8349 size(4); 8350 format %{ "STW $src,$dst\t! MoveI2F" %} 8351 opcode(Assembler::stw_op3); 8352 ins_encode(simple_form3_mem_reg( dst, src ) ); 8353 ins_pipe(istore_mem_reg); 8354 %} 8355 8356 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 8357 match(Set dst (MoveD2L src)); 8358 effect(DEF dst, USE src); 8359 ins_cost(MEMORY_REF_COST); 8360 8361 size(4); 8362 format %{ "STDF $src,$dst\t! MoveD2L" %} 8363 opcode(Assembler::stdf_op3); 8364 ins_encode(simple_form3_mem_reg(dst, src)); 8365 ins_pipe(fstoreD_stk_reg); 8366 %} 8367 8368 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ 8369 match(Set dst (MoveL2D src)); 8370 effect(DEF dst, USE src); 8371 ins_cost(MEMORY_REF_COST); 8372 8373 size(4); 8374 format %{ "STX $src,$dst\t! MoveL2D" %} 8375 opcode(Assembler::stx_op3); 8376 ins_encode(simple_form3_mem_reg( dst, src ) ); 8377 ins_pipe(istore_mem_reg); 8378 %} 8379 8380 8381 //----------Arithmetic Conversion Instructions--------------------------------- 8382 // The conversions operations are all Alpha sorted. Please keep it that way! 8383 8384 instruct convD2F_reg(regF dst, regD src) %{ 8385 match(Set dst (ConvD2F src)); 8386 size(4); 8387 format %{ "FDTOS $src,$dst" %} 8388 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); 8389 ins_encode(form3_opf_rs2D_rdF(src, dst)); 8390 ins_pipe(fcvtD2F); 8391 %} 8392 8393 8394 // Convert a double to an int in a float register. 8395 // If the double is a NAN, stuff a zero in instead. 8396 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ 8397 effect(DEF dst, USE src, KILL fcc0); 8398 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8399 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8400 "FDTOI $src,$dst\t! convert in delay slot\n\t" 8401 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8402 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8403 "skip:" %} 8404 ins_encode(form_d2i_helper(src,dst)); 8405 ins_pipe(fcvtD2I); 8406 %} 8407 8408 instruct convD2I_stk(stackSlotI dst, regD src) %{ 8409 match(Set dst (ConvD2I src)); 8410 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8411 expand %{ 8412 regF tmp; 8413 convD2I_helper(tmp, src); 8414 regF_to_stkI(dst, tmp); 8415 %} 8416 %} 8417 8418 instruct convD2I_reg(iRegI dst, regD src) %{ 8419 predicate(UseVIS >= 3); 8420 match(Set dst (ConvD2I src)); 8421 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8422 expand %{ 8423 regF tmp; 8424 convD2I_helper(tmp, src); 8425 MoveF2I_reg_reg(dst, tmp); 8426 %} 8427 %} 8428 8429 8430 // Convert a double to a long in a double register. 8431 // If the double is a NAN, stuff a zero in instead. 8432 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ 8433 effect(DEF dst, USE src, KILL fcc0); 8434 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8435 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8436 "FDTOX $src,$dst\t! convert in delay slot\n\t" 8437 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8438 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8439 "skip:" %} 8440 ins_encode(form_d2l_helper(src,dst)); 8441 ins_pipe(fcvtD2L); 8442 %} 8443 8444 instruct convD2L_stk(stackSlotL dst, regD src) %{ 8445 match(Set dst (ConvD2L src)); 8446 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8447 expand %{ 8448 regD tmp; 8449 convD2L_helper(tmp, src); 8450 regD_to_stkL(dst, tmp); 8451 %} 8452 %} 8453 8454 instruct convD2L_reg(iRegL dst, regD src) %{ 8455 predicate(UseVIS >= 3); 8456 match(Set dst (ConvD2L src)); 8457 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8458 expand %{ 8459 regD tmp; 8460 convD2L_helper(tmp, src); 8461 MoveD2L_reg_reg(dst, tmp); 8462 %} 8463 %} 8464 8465 8466 instruct convF2D_reg(regD dst, regF src) %{ 8467 match(Set dst (ConvF2D src)); 8468 format %{ "FSTOD $src,$dst" %} 8469 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf); 8470 ins_encode(form3_opf_rs2F_rdD(src, dst)); 8471 ins_pipe(fcvtF2D); 8472 %} 8473 8474 8475 // Convert a float to an int in a float register. 8476 // If the float is a NAN, stuff a zero in instead. 8477 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ 8478 effect(DEF dst, USE src, KILL fcc0); 8479 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8480 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8481 "FSTOI $src,$dst\t! convert in delay slot\n\t" 8482 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8483 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8484 "skip:" %} 8485 ins_encode(form_f2i_helper(src,dst)); 8486 ins_pipe(fcvtF2I); 8487 %} 8488 8489 instruct convF2I_stk(stackSlotI dst, regF src) %{ 8490 match(Set dst (ConvF2I src)); 8491 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8492 expand %{ 8493 regF tmp; 8494 convF2I_helper(tmp, src); 8495 regF_to_stkI(dst, tmp); 8496 %} 8497 %} 8498 8499 instruct convF2I_reg(iRegI dst, regF src) %{ 8500 predicate(UseVIS >= 3); 8501 match(Set dst (ConvF2I src)); 8502 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8503 expand %{ 8504 regF tmp; 8505 convF2I_helper(tmp, src); 8506 MoveF2I_reg_reg(dst, tmp); 8507 %} 8508 %} 8509 8510 8511 // Convert a float to a long in a float register. 8512 // If the float is a NAN, stuff a zero in instead. 8513 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ 8514 effect(DEF dst, USE src, KILL fcc0); 8515 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8516 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8517 "FSTOX $src,$dst\t! convert in delay slot\n\t" 8518 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8519 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8520 "skip:" %} 8521 ins_encode(form_f2l_helper(src,dst)); 8522 ins_pipe(fcvtF2L); 8523 %} 8524 8525 instruct convF2L_stk(stackSlotL dst, regF src) %{ 8526 match(Set dst (ConvF2L src)); 8527 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8528 expand %{ 8529 regD tmp; 8530 convF2L_helper(tmp, src); 8531 regD_to_stkL(dst, tmp); 8532 %} 8533 %} 8534 8535 instruct convF2L_reg(iRegL dst, regF src) %{ 8536 predicate(UseVIS >= 3); 8537 match(Set dst (ConvF2L src)); 8538 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8539 expand %{ 8540 regD tmp; 8541 convF2L_helper(tmp, src); 8542 MoveD2L_reg_reg(dst, tmp); 8543 %} 8544 %} 8545 8546 8547 instruct convI2D_helper(regD dst, regF tmp) %{ 8548 effect(USE tmp, DEF dst); 8549 format %{ "FITOD $tmp,$dst" %} 8550 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8551 ins_encode(form3_opf_rs2F_rdD(tmp, dst)); 8552 ins_pipe(fcvtI2D); 8553 %} 8554 8555 instruct convI2D_stk(stackSlotI src, regD dst) %{ 8556 match(Set dst (ConvI2D src)); 8557 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8558 expand %{ 8559 regF tmp; 8560 stkI_to_regF(tmp, src); 8561 convI2D_helper(dst, tmp); 8562 %} 8563 %} 8564 8565 instruct convI2D_reg(regD_low dst, iRegI src) %{ 8566 predicate(UseVIS >= 3); 8567 match(Set dst (ConvI2D src)); 8568 expand %{ 8569 regF tmp; 8570 MoveI2F_reg_reg(tmp, src); 8571 convI2D_helper(dst, tmp); 8572 %} 8573 %} 8574 8575 instruct convI2D_mem(regD_low dst, memory mem) %{ 8576 match(Set dst (ConvI2D (LoadI mem))); 8577 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8578 size(8); 8579 format %{ "LDF $mem,$dst\n\t" 8580 "FITOD $dst,$dst" %} 8581 opcode(Assembler::ldf_op3, Assembler::fitod_opf); 8582 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8583 ins_pipe(floadF_mem); 8584 %} 8585 8586 8587 instruct convI2F_helper(regF dst, regF tmp) %{ 8588 effect(DEF dst, USE tmp); 8589 format %{ "FITOS $tmp,$dst" %} 8590 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf); 8591 ins_encode(form3_opf_rs2F_rdF(tmp, dst)); 8592 ins_pipe(fcvtI2F); 8593 %} 8594 8595 instruct convI2F_stk(regF dst, stackSlotI src) %{ 8596 match(Set dst (ConvI2F src)); 8597 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8598 expand %{ 8599 regF tmp; 8600 stkI_to_regF(tmp,src); 8601 convI2F_helper(dst, tmp); 8602 %} 8603 %} 8604 8605 instruct convI2F_reg(regF dst, iRegI src) %{ 8606 predicate(UseVIS >= 3); 8607 match(Set dst (ConvI2F src)); 8608 ins_cost(DEFAULT_COST); 8609 expand %{ 8610 regF tmp; 8611 MoveI2F_reg_reg(tmp, src); 8612 convI2F_helper(dst, tmp); 8613 %} 8614 %} 8615 8616 instruct convI2F_mem( regF dst, memory mem ) %{ 8617 match(Set dst (ConvI2F (LoadI mem))); 8618 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8619 size(8); 8620 format %{ "LDF $mem,$dst\n\t" 8621 "FITOS $dst,$dst" %} 8622 opcode(Assembler::ldf_op3, Assembler::fitos_opf); 8623 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8624 ins_pipe(floadF_mem); 8625 %} 8626 8627 8628 instruct convI2L_reg(iRegL dst, iRegI src) %{ 8629 match(Set dst (ConvI2L src)); 8630 size(4); 8631 format %{ "SRA $src,0,$dst\t! int->long" %} 8632 opcode(Assembler::sra_op3, Assembler::arith_op); 8633 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8634 ins_pipe(ialu_reg_reg); 8635 %} 8636 8637 // Zero-extend convert int to long 8638 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ 8639 match(Set dst (AndL (ConvI2L src) mask) ); 8640 size(4); 8641 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} 8642 opcode(Assembler::srl_op3, Assembler::arith_op); 8643 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8644 ins_pipe(ialu_reg_reg); 8645 %} 8646 8647 // Zero-extend long 8648 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ 8649 match(Set dst (AndL src mask) ); 8650 size(4); 8651 format %{ "SRL $src,0,$dst\t! zero-extend long" %} 8652 opcode(Assembler::srl_op3, Assembler::arith_op); 8653 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8654 ins_pipe(ialu_reg_reg); 8655 %} 8656 8657 8658 //----------- 8659 // Long to Double conversion using V8 opcodes. 8660 // Still useful because cheetah traps and becomes 8661 // amazingly slow for some common numbers. 8662 8663 // Magic constant, 0x43300000 8664 instruct loadConI_x43300000(iRegI dst) %{ 8665 effect(DEF dst); 8666 size(4); 8667 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %} 8668 ins_encode(SetHi22(0x43300000, dst)); 8669 ins_pipe(ialu_none); 8670 %} 8671 8672 // Magic constant, 0x41f00000 8673 instruct loadConI_x41f00000(iRegI dst) %{ 8674 effect(DEF dst); 8675 size(4); 8676 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %} 8677 ins_encode(SetHi22(0x41f00000, dst)); 8678 ins_pipe(ialu_none); 8679 %} 8680 8681 // Construct a double from two float halves 8682 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{ 8683 effect(DEF dst, USE src1, USE src2); 8684 size(8); 8685 format %{ "FMOVS $src1.hi,$dst.hi\n\t" 8686 "FMOVS $src2.lo,$dst.lo" %} 8687 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); 8688 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); 8689 ins_pipe(faddD_reg_reg); 8690 %} 8691 8692 // Convert integer in high half of a double register (in the lower half of 8693 // the double register file) to double 8694 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{ 8695 effect(DEF dst, USE src); 8696 size(4); 8697 format %{ "FITOD $src,$dst" %} 8698 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8699 ins_encode(form3_opf_rs2D_rdD(src, dst)); 8700 ins_pipe(fcvtLHi2D); 8701 %} 8702 8703 // Add float double precision 8704 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{ 8705 effect(DEF dst, USE src1, USE src2); 8706 size(4); 8707 format %{ "FADDD $src1,$src2,$dst" %} 8708 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 8709 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8710 ins_pipe(faddD_reg_reg); 8711 %} 8712 8713 // Sub float double precision 8714 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{ 8715 effect(DEF dst, USE src1, USE src2); 8716 size(4); 8717 format %{ "FSUBD $src1,$src2,$dst" %} 8718 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 8719 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8720 ins_pipe(faddD_reg_reg); 8721 %} 8722 8723 // Mul float double precision 8724 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{ 8725 effect(DEF dst, USE src1, USE src2); 8726 size(4); 8727 format %{ "FMULD $src1,$src2,$dst" %} 8728 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 8729 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8730 ins_pipe(fmulD_reg_reg); 8731 %} 8732 8733 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{ 8734 match(Set dst (ConvL2D src)); 8735 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); 8736 8737 expand %{ 8738 regD_low tmpsrc; 8739 iRegI ix43300000; 8740 iRegI ix41f00000; 8741 stackSlotL lx43300000; 8742 stackSlotL lx41f00000; 8743 regD_low dx43300000; 8744 regD dx41f00000; 8745 regD tmp1; 8746 regD_low tmp2; 8747 regD tmp3; 8748 regD tmp4; 8749 8750 stkL_to_regD(tmpsrc, src); 8751 8752 loadConI_x43300000(ix43300000); 8753 loadConI_x41f00000(ix41f00000); 8754 regI_to_stkLHi(lx43300000, ix43300000); 8755 regI_to_stkLHi(lx41f00000, ix41f00000); 8756 stkL_to_regD(dx43300000, lx43300000); 8757 stkL_to_regD(dx41f00000, lx41f00000); 8758 8759 convI2D_regDHi_regD(tmp1, tmpsrc); 8760 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc); 8761 subD_regD_regD(tmp3, tmp2, dx43300000); 8762 mulD_regD_regD(tmp4, tmp1, dx41f00000); 8763 addD_regD_regD(dst, tmp3, tmp4); 8764 %} 8765 %} 8766 8767 // Long to Double conversion using fast fxtof 8768 instruct convL2D_helper(regD dst, regD tmp) %{ 8769 effect(DEF dst, USE tmp); 8770 size(4); 8771 format %{ "FXTOD $tmp,$dst" %} 8772 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf); 8773 ins_encode(form3_opf_rs2D_rdD(tmp, dst)); 8774 ins_pipe(fcvtL2D); 8775 %} 8776 8777 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{ 8778 predicate(VM_Version::has_fast_fxtof()); 8779 match(Set dst (ConvL2D src)); 8780 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); 8781 expand %{ 8782 regD tmp; 8783 stkL_to_regD(tmp, src); 8784 convL2D_helper(dst, tmp); 8785 %} 8786 %} 8787 8788 instruct convL2D_reg(regD dst, iRegL src) %{ 8789 predicate(UseVIS >= 3); 8790 match(Set dst (ConvL2D src)); 8791 expand %{ 8792 regD tmp; 8793 MoveL2D_reg_reg(tmp, src); 8794 convL2D_helper(dst, tmp); 8795 %} 8796 %} 8797 8798 // Long to Float conversion using fast fxtof 8799 instruct convL2F_helper(regF dst, regD tmp) %{ 8800 effect(DEF dst, USE tmp); 8801 size(4); 8802 format %{ "FXTOS $tmp,$dst" %} 8803 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf); 8804 ins_encode(form3_opf_rs2D_rdF(tmp, dst)); 8805 ins_pipe(fcvtL2F); 8806 %} 8807 8808 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{ 8809 match(Set dst (ConvL2F src)); 8810 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8811 expand %{ 8812 regD tmp; 8813 stkL_to_regD(tmp, src); 8814 convL2F_helper(dst, tmp); 8815 %} 8816 %} 8817 8818 instruct convL2F_reg(regF dst, iRegL src) %{ 8819 predicate(UseVIS >= 3); 8820 match(Set dst (ConvL2F src)); 8821 ins_cost(DEFAULT_COST); 8822 expand %{ 8823 regD tmp; 8824 MoveL2D_reg_reg(tmp, src); 8825 convL2F_helper(dst, tmp); 8826 %} 8827 %} 8828 8829 //----------- 8830 8831 instruct convL2I_reg(iRegI dst, iRegL src) %{ 8832 match(Set dst (ConvL2I src)); 8833 #ifndef _LP64 8834 format %{ "MOV $src.lo,$dst\t! long->int" %} 8835 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) ); 8836 ins_pipe(ialu_move_reg_I_to_L); 8837 #else 8838 size(4); 8839 format %{ "SRA $src,R_G0,$dst\t! long->int" %} 8840 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); 8841 ins_pipe(ialu_reg); 8842 #endif 8843 %} 8844 8845 // Register Shift Right Immediate 8846 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{ 8847 match(Set dst (ConvL2I (RShiftL src cnt))); 8848 8849 size(4); 8850 format %{ "SRAX $src,$cnt,$dst" %} 8851 opcode(Assembler::srax_op3, Assembler::arith_op); 8852 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) ); 8853 ins_pipe(ialu_reg_imm); 8854 %} 8855 8856 //----------Control Flow Instructions------------------------------------------ 8857 // Compare Instructions 8858 // Compare Integers 8859 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{ 8860 match(Set icc (CmpI op1 op2)); 8861 effect( DEF icc, USE op1, USE op2 ); 8862 8863 size(4); 8864 format %{ "CMP $op1,$op2" %} 8865 opcode(Assembler::subcc_op3, Assembler::arith_op); 8866 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8867 ins_pipe(ialu_cconly_reg_reg); 8868 %} 8869 8870 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{ 8871 match(Set icc (CmpU op1 op2)); 8872 8873 size(4); 8874 format %{ "CMP $op1,$op2\t! unsigned" %} 8875 opcode(Assembler::subcc_op3, Assembler::arith_op); 8876 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8877 ins_pipe(ialu_cconly_reg_reg); 8878 %} 8879 8880 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{ 8881 match(Set icc (CmpI op1 op2)); 8882 effect( DEF icc, USE op1 ); 8883 8884 size(4); 8885 format %{ "CMP $op1,$op2" %} 8886 opcode(Assembler::subcc_op3, Assembler::arith_op); 8887 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8888 ins_pipe(ialu_cconly_reg_imm); 8889 %} 8890 8891 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{ 8892 match(Set icc (CmpI (AndI op1 op2) zero)); 8893 8894 size(4); 8895 format %{ "BTST $op2,$op1" %} 8896 opcode(Assembler::andcc_op3, Assembler::arith_op); 8897 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8898 ins_pipe(ialu_cconly_reg_reg_zero); 8899 %} 8900 8901 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{ 8902 match(Set icc (CmpI (AndI op1 op2) zero)); 8903 8904 size(4); 8905 format %{ "BTST $op2,$op1" %} 8906 opcode(Assembler::andcc_op3, Assembler::arith_op); 8907 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8908 ins_pipe(ialu_cconly_reg_imm_zero); 8909 %} 8910 8911 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{ 8912 match(Set xcc (CmpL op1 op2)); 8913 effect( DEF xcc, USE op1, USE op2 ); 8914 8915 size(4); 8916 format %{ "CMP $op1,$op2\t\t! long" %} 8917 opcode(Assembler::subcc_op3, Assembler::arith_op); 8918 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8919 ins_pipe(ialu_cconly_reg_reg); 8920 %} 8921 8922 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{ 8923 match(Set xcc (CmpL op1 con)); 8924 effect( DEF xcc, USE op1, USE con ); 8925 8926 size(4); 8927 format %{ "CMP $op1,$con\t\t! long" %} 8928 opcode(Assembler::subcc_op3, Assembler::arith_op); 8929 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8930 ins_pipe(ialu_cconly_reg_reg); 8931 %} 8932 8933 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ 8934 match(Set xcc (CmpL (AndL op1 op2) zero)); 8935 effect( DEF xcc, USE op1, USE op2 ); 8936 8937 size(4); 8938 format %{ "BTST $op1,$op2\t\t! long" %} 8939 opcode(Assembler::andcc_op3, Assembler::arith_op); 8940 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8941 ins_pipe(ialu_cconly_reg_reg); 8942 %} 8943 8944 // useful for checking the alignment of a pointer: 8945 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{ 8946 match(Set xcc (CmpL (AndL op1 con) zero)); 8947 effect( DEF xcc, USE op1, USE con ); 8948 8949 size(4); 8950 format %{ "BTST $op1,$con\t\t! long" %} 8951 opcode(Assembler::andcc_op3, Assembler::arith_op); 8952 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8953 ins_pipe(ialu_cconly_reg_reg); 8954 %} 8955 8956 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{ 8957 match(Set icc (CmpU op1 op2)); 8958 8959 size(4); 8960 format %{ "CMP $op1,$op2\t! unsigned" %} 8961 opcode(Assembler::subcc_op3, Assembler::arith_op); 8962 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8963 ins_pipe(ialu_cconly_reg_imm); 8964 %} 8965 8966 // Compare Pointers 8967 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{ 8968 match(Set pcc (CmpP op1 op2)); 8969 8970 size(4); 8971 format %{ "CMP $op1,$op2\t! ptr" %} 8972 opcode(Assembler::subcc_op3, Assembler::arith_op); 8973 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8974 ins_pipe(ialu_cconly_reg_reg); 8975 %} 8976 8977 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{ 8978 match(Set pcc (CmpP op1 op2)); 8979 8980 size(4); 8981 format %{ "CMP $op1,$op2\t! ptr" %} 8982 opcode(Assembler::subcc_op3, Assembler::arith_op); 8983 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8984 ins_pipe(ialu_cconly_reg_imm); 8985 %} 8986 8987 // Compare Narrow oops 8988 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ 8989 match(Set icc (CmpN op1 op2)); 8990 8991 size(4); 8992 format %{ "CMP $op1,$op2\t! compressed ptr" %} 8993 opcode(Assembler::subcc_op3, Assembler::arith_op); 8994 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8995 ins_pipe(ialu_cconly_reg_reg); 8996 %} 8997 8998 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ 8999 match(Set icc (CmpN op1 op2)); 9000 9001 size(4); 9002 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9003 opcode(Assembler::subcc_op3, Assembler::arith_op); 9004 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9005 ins_pipe(ialu_cconly_reg_imm); 9006 %} 9007 9008 //----------Max and Min-------------------------------------------------------- 9009 // Min Instructions 9010 // Conditional move for min 9011 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9012 effect( USE_DEF op2, USE op1, USE icc ); 9013 9014 size(4); 9015 format %{ "MOVlt icc,$op1,$op2\t! min" %} 9016 opcode(Assembler::less); 9017 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9018 ins_pipe(ialu_reg_flags); 9019 %} 9020 9021 // Min Register with Register. 9022 instruct minI_eReg(iRegI op1, iRegI op2) %{ 9023 match(Set op2 (MinI op1 op2)); 9024 ins_cost(DEFAULT_COST*2); 9025 expand %{ 9026 flagsReg icc; 9027 compI_iReg(icc,op1,op2); 9028 cmovI_reg_lt(op2,op1,icc); 9029 %} 9030 %} 9031 9032 // Max Instructions 9033 // Conditional move for max 9034 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9035 effect( USE_DEF op2, USE op1, USE icc ); 9036 format %{ "MOVgt icc,$op1,$op2\t! max" %} 9037 opcode(Assembler::greater); 9038 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9039 ins_pipe(ialu_reg_flags); 9040 %} 9041 9042 // Max Register with Register 9043 instruct maxI_eReg(iRegI op1, iRegI op2) %{ 9044 match(Set op2 (MaxI op1 op2)); 9045 ins_cost(DEFAULT_COST*2); 9046 expand %{ 9047 flagsReg icc; 9048 compI_iReg(icc,op1,op2); 9049 cmovI_reg_gt(op2,op1,icc); 9050 %} 9051 %} 9052 9053 9054 //----------Float Compares---------------------------------------------------- 9055 // Compare floating, generate condition code 9056 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{ 9057 match(Set fcc (CmpF src1 src2)); 9058 9059 size(4); 9060 format %{ "FCMPs $fcc,$src1,$src2" %} 9061 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); 9062 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); 9063 ins_pipe(faddF_fcc_reg_reg_zero); 9064 %} 9065 9066 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{ 9067 match(Set fcc (CmpD src1 src2)); 9068 9069 size(4); 9070 format %{ "FCMPd $fcc,$src1,$src2" %} 9071 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); 9072 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); 9073 ins_pipe(faddD_fcc_reg_reg_zero); 9074 %} 9075 9076 9077 // Compare floating, generate -1,0,1 9078 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{ 9079 match(Set dst (CmpF3 src1 src2)); 9080 effect(KILL fcc0); 9081 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9082 format %{ "fcmpl $dst,$src1,$src2" %} 9083 // Primary = float 9084 opcode( true ); 9085 ins_encode( floating_cmp( dst, src1, src2 ) ); 9086 ins_pipe( floating_cmp ); 9087 %} 9088 9089 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{ 9090 match(Set dst (CmpD3 src1 src2)); 9091 effect(KILL fcc0); 9092 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9093 format %{ "dcmpl $dst,$src1,$src2" %} 9094 // Primary = double (not float) 9095 opcode( false ); 9096 ins_encode( floating_cmp( dst, src1, src2 ) ); 9097 ins_pipe( floating_cmp ); 9098 %} 9099 9100 //----------Branches--------------------------------------------------------- 9101 // Jump 9102 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above) 9103 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{ 9104 match(Jump switch_val); 9105 effect(TEMP table); 9106 9107 ins_cost(350); 9108 9109 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 9110 "LD [O7 + $switch_val], O7\n\t" 9111 "JUMP O7" %} 9112 ins_encode %{ 9113 // Calculate table address into a register. 9114 Register table_reg; 9115 Register label_reg = O7; 9116 // If we are calculating the size of this instruction don't trust 9117 // zero offsets because they might change when 9118 // MachConstantBaseNode decides to optimize the constant table 9119 // base. 9120 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) { 9121 table_reg = $constanttablebase; 9122 } else { 9123 table_reg = O7; 9124 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 9125 __ add($constanttablebase, con_offset, table_reg); 9126 } 9127 9128 // Jump to base address + switch value 9129 __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 9130 __ jmp(label_reg, G0); 9131 __ delayed()->nop(); 9132 %} 9133 ins_pipe(ialu_reg_reg); 9134 %} 9135 9136 // Direct Branch. Use V8 version with longer range. 9137 instruct branch(label labl) %{ 9138 match(Goto); 9139 effect(USE labl); 9140 9141 size(8); 9142 ins_cost(BRANCH_COST); 9143 format %{ "BA $labl" %} 9144 ins_encode %{ 9145 Label* L = $labl$$label; 9146 __ ba(*L); 9147 __ delayed()->nop(); 9148 %} 9149 ins_pipe(br); 9150 %} 9151 9152 // Direct Branch, short with no delay slot 9153 instruct branch_short(label labl) %{ 9154 match(Goto); 9155 predicate(UseCBCond); 9156 effect(USE labl); 9157 9158 size(4); 9159 ins_cost(BRANCH_COST); 9160 format %{ "BA $labl\t! short branch" %} 9161 ins_encode %{ 9162 Label* L = $labl$$label; 9163 assert(__ use_cbcond(*L), "back to back cbcond"); 9164 __ ba_short(*L); 9165 %} 9166 ins_short_branch(1); 9167 ins_avoid_back_to_back(1); 9168 ins_pipe(cbcond_reg_imm); 9169 %} 9170 9171 // Conditional Direct Branch 9172 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{ 9173 match(If cmp icc); 9174 effect(USE labl); 9175 9176 size(8); 9177 ins_cost(BRANCH_COST); 9178 format %{ "BP$cmp $icc,$labl" %} 9179 // Prim = bits 24-22, Secnd = bits 31-30 9180 ins_encode( enc_bp( labl, cmp, icc ) ); 9181 ins_pipe(br_cc); 9182 %} 9183 9184 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9185 match(If cmp icc); 9186 effect(USE labl); 9187 9188 ins_cost(BRANCH_COST); 9189 format %{ "BP$cmp $icc,$labl" %} 9190 // Prim = bits 24-22, Secnd = bits 31-30 9191 ins_encode( enc_bp( labl, cmp, icc ) ); 9192 ins_pipe(br_cc); 9193 %} 9194 9195 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{ 9196 match(If cmp pcc); 9197 effect(USE labl); 9198 9199 size(8); 9200 ins_cost(BRANCH_COST); 9201 format %{ "BP$cmp $pcc,$labl" %} 9202 ins_encode %{ 9203 Label* L = $labl$$label; 9204 Assembler::Predict predict_taken = 9205 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9206 9207 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9208 __ delayed()->nop(); 9209 %} 9210 ins_pipe(br_cc); 9211 %} 9212 9213 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{ 9214 match(If cmp fcc); 9215 effect(USE labl); 9216 9217 size(8); 9218 ins_cost(BRANCH_COST); 9219 format %{ "FBP$cmp $fcc,$labl" %} 9220 ins_encode %{ 9221 Label* L = $labl$$label; 9222 Assembler::Predict predict_taken = 9223 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9224 9225 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L); 9226 __ delayed()->nop(); 9227 %} 9228 ins_pipe(br_fcc); 9229 %} 9230 9231 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{ 9232 match(CountedLoopEnd cmp icc); 9233 effect(USE labl); 9234 9235 size(8); 9236 ins_cost(BRANCH_COST); 9237 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9238 // Prim = bits 24-22, Secnd = bits 31-30 9239 ins_encode( enc_bp( labl, cmp, icc ) ); 9240 ins_pipe(br_cc); 9241 %} 9242 9243 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9244 match(CountedLoopEnd cmp icc); 9245 effect(USE labl); 9246 9247 size(8); 9248 ins_cost(BRANCH_COST); 9249 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9250 // Prim = bits 24-22, Secnd = bits 31-30 9251 ins_encode( enc_bp( labl, cmp, icc ) ); 9252 ins_pipe(br_cc); 9253 %} 9254 9255 // Compare and branch instructions 9256 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9257 match(If cmp (CmpI op1 op2)); 9258 effect(USE labl, KILL icc); 9259 9260 size(12); 9261 ins_cost(BRANCH_COST); 9262 format %{ "CMP $op1,$op2\t! int\n\t" 9263 "BP$cmp $labl" %} 9264 ins_encode %{ 9265 Label* L = $labl$$label; 9266 Assembler::Predict predict_taken = 9267 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9268 __ cmp($op1$$Register, $op2$$Register); 9269 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9270 __ delayed()->nop(); 9271 %} 9272 ins_pipe(cmp_br_reg_reg); 9273 %} 9274 9275 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9276 match(If cmp (CmpI op1 op2)); 9277 effect(USE labl, KILL icc); 9278 9279 size(12); 9280 ins_cost(BRANCH_COST); 9281 format %{ "CMP $op1,$op2\t! int\n\t" 9282 "BP$cmp $labl" %} 9283 ins_encode %{ 9284 Label* L = $labl$$label; 9285 Assembler::Predict predict_taken = 9286 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9287 __ cmp($op1$$Register, $op2$$constant); 9288 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9289 __ delayed()->nop(); 9290 %} 9291 ins_pipe(cmp_br_reg_imm); 9292 %} 9293 9294 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9295 match(If cmp (CmpU op1 op2)); 9296 effect(USE labl, KILL icc); 9297 9298 size(12); 9299 ins_cost(BRANCH_COST); 9300 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9301 "BP$cmp $labl" %} 9302 ins_encode %{ 9303 Label* L = $labl$$label; 9304 Assembler::Predict predict_taken = 9305 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9306 __ cmp($op1$$Register, $op2$$Register); 9307 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9308 __ delayed()->nop(); 9309 %} 9310 ins_pipe(cmp_br_reg_reg); 9311 %} 9312 9313 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9314 match(If cmp (CmpU op1 op2)); 9315 effect(USE labl, KILL icc); 9316 9317 size(12); 9318 ins_cost(BRANCH_COST); 9319 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9320 "BP$cmp $labl" %} 9321 ins_encode %{ 9322 Label* L = $labl$$label; 9323 Assembler::Predict predict_taken = 9324 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9325 __ cmp($op1$$Register, $op2$$constant); 9326 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9327 __ delayed()->nop(); 9328 %} 9329 ins_pipe(cmp_br_reg_imm); 9330 %} 9331 9332 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9333 match(If cmp (CmpL op1 op2)); 9334 effect(USE labl, KILL xcc); 9335 9336 size(12); 9337 ins_cost(BRANCH_COST); 9338 format %{ "CMP $op1,$op2\t! long\n\t" 9339 "BP$cmp $labl" %} 9340 ins_encode %{ 9341 Label* L = $labl$$label; 9342 Assembler::Predict predict_taken = 9343 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9344 __ cmp($op1$$Register, $op2$$Register); 9345 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9346 __ delayed()->nop(); 9347 %} 9348 ins_pipe(cmp_br_reg_reg); 9349 %} 9350 9351 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9352 match(If cmp (CmpL op1 op2)); 9353 effect(USE labl, KILL xcc); 9354 9355 size(12); 9356 ins_cost(BRANCH_COST); 9357 format %{ "CMP $op1,$op2\t! long\n\t" 9358 "BP$cmp $labl" %} 9359 ins_encode %{ 9360 Label* L = $labl$$label; 9361 Assembler::Predict predict_taken = 9362 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9363 __ cmp($op1$$Register, $op2$$constant); 9364 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9365 __ delayed()->nop(); 9366 %} 9367 ins_pipe(cmp_br_reg_imm); 9368 %} 9369 9370 // Compare Pointers and branch 9371 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9372 match(If cmp (CmpP op1 op2)); 9373 effect(USE labl, KILL pcc); 9374 9375 size(12); 9376 ins_cost(BRANCH_COST); 9377 format %{ "CMP $op1,$op2\t! ptr\n\t" 9378 "B$cmp $labl" %} 9379 ins_encode %{ 9380 Label* L = $labl$$label; 9381 Assembler::Predict predict_taken = 9382 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9383 __ cmp($op1$$Register, $op2$$Register); 9384 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9385 __ delayed()->nop(); 9386 %} 9387 ins_pipe(cmp_br_reg_reg); 9388 %} 9389 9390 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9391 match(If cmp (CmpP op1 null)); 9392 effect(USE labl, KILL pcc); 9393 9394 size(12); 9395 ins_cost(BRANCH_COST); 9396 format %{ "CMP $op1,0\t! ptr\n\t" 9397 "B$cmp $labl" %} 9398 ins_encode %{ 9399 Label* L = $labl$$label; 9400 Assembler::Predict predict_taken = 9401 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9402 __ cmp($op1$$Register, G0); 9403 // bpr() is not used here since it has shorter distance. 9404 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9405 __ delayed()->nop(); 9406 %} 9407 ins_pipe(cmp_br_reg_reg); 9408 %} 9409 9410 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9411 match(If cmp (CmpN op1 op2)); 9412 effect(USE labl, KILL icc); 9413 9414 size(12); 9415 ins_cost(BRANCH_COST); 9416 format %{ "CMP $op1,$op2\t! compressed ptr\n\t" 9417 "BP$cmp $labl" %} 9418 ins_encode %{ 9419 Label* L = $labl$$label; 9420 Assembler::Predict predict_taken = 9421 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9422 __ cmp($op1$$Register, $op2$$Register); 9423 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9424 __ delayed()->nop(); 9425 %} 9426 ins_pipe(cmp_br_reg_reg); 9427 %} 9428 9429 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9430 match(If cmp (CmpN op1 null)); 9431 effect(USE labl, KILL icc); 9432 9433 size(12); 9434 ins_cost(BRANCH_COST); 9435 format %{ "CMP $op1,0\t! compressed ptr\n\t" 9436 "BP$cmp $labl" %} 9437 ins_encode %{ 9438 Label* L = $labl$$label; 9439 Assembler::Predict predict_taken = 9440 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9441 __ cmp($op1$$Register, G0); 9442 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9443 __ delayed()->nop(); 9444 %} 9445 ins_pipe(cmp_br_reg_reg); 9446 %} 9447 9448 // Loop back branch 9449 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9450 match(CountedLoopEnd cmp (CmpI op1 op2)); 9451 effect(USE labl, KILL icc); 9452 9453 size(12); 9454 ins_cost(BRANCH_COST); 9455 format %{ "CMP $op1,$op2\t! int\n\t" 9456 "BP$cmp $labl\t! Loop end" %} 9457 ins_encode %{ 9458 Label* L = $labl$$label; 9459 Assembler::Predict predict_taken = 9460 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9461 __ cmp($op1$$Register, $op2$$Register); 9462 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9463 __ delayed()->nop(); 9464 %} 9465 ins_pipe(cmp_br_reg_reg); 9466 %} 9467 9468 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9469 match(CountedLoopEnd cmp (CmpI op1 op2)); 9470 effect(USE labl, KILL icc); 9471 9472 size(12); 9473 ins_cost(BRANCH_COST); 9474 format %{ "CMP $op1,$op2\t! int\n\t" 9475 "BP$cmp $labl\t! Loop end" %} 9476 ins_encode %{ 9477 Label* L = $labl$$label; 9478 Assembler::Predict predict_taken = 9479 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9480 __ cmp($op1$$Register, $op2$$constant); 9481 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9482 __ delayed()->nop(); 9483 %} 9484 ins_pipe(cmp_br_reg_imm); 9485 %} 9486 9487 // Short compare and branch instructions 9488 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9489 match(If cmp (CmpI op1 op2)); 9490 predicate(UseCBCond); 9491 effect(USE labl, KILL icc); 9492 9493 size(4); 9494 ins_cost(BRANCH_COST); 9495 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9496 ins_encode %{ 9497 Label* L = $labl$$label; 9498 assert(__ use_cbcond(*L), "back to back cbcond"); 9499 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9500 %} 9501 ins_short_branch(1); 9502 ins_avoid_back_to_back(1); 9503 ins_pipe(cbcond_reg_reg); 9504 %} 9505 9506 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9507 match(If cmp (CmpI op1 op2)); 9508 predicate(UseCBCond); 9509 effect(USE labl, KILL icc); 9510 9511 size(4); 9512 ins_cost(BRANCH_COST); 9513 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9514 ins_encode %{ 9515 Label* L = $labl$$label; 9516 assert(__ use_cbcond(*L), "back to back cbcond"); 9517 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9518 %} 9519 ins_short_branch(1); 9520 ins_avoid_back_to_back(1); 9521 ins_pipe(cbcond_reg_imm); 9522 %} 9523 9524 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9525 match(If cmp (CmpU op1 op2)); 9526 predicate(UseCBCond); 9527 effect(USE labl, KILL icc); 9528 9529 size(4); 9530 ins_cost(BRANCH_COST); 9531 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9532 ins_encode %{ 9533 Label* L = $labl$$label; 9534 assert(__ use_cbcond(*L), "back to back cbcond"); 9535 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9536 %} 9537 ins_short_branch(1); 9538 ins_avoid_back_to_back(1); 9539 ins_pipe(cbcond_reg_reg); 9540 %} 9541 9542 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9543 match(If cmp (CmpU op1 op2)); 9544 predicate(UseCBCond); 9545 effect(USE labl, KILL icc); 9546 9547 size(4); 9548 ins_cost(BRANCH_COST); 9549 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9550 ins_encode %{ 9551 Label* L = $labl$$label; 9552 assert(__ use_cbcond(*L), "back to back cbcond"); 9553 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9554 %} 9555 ins_short_branch(1); 9556 ins_avoid_back_to_back(1); 9557 ins_pipe(cbcond_reg_imm); 9558 %} 9559 9560 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9561 match(If cmp (CmpL op1 op2)); 9562 predicate(UseCBCond); 9563 effect(USE labl, KILL xcc); 9564 9565 size(4); 9566 ins_cost(BRANCH_COST); 9567 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9568 ins_encode %{ 9569 Label* L = $labl$$label; 9570 assert(__ use_cbcond(*L), "back to back cbcond"); 9571 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9572 %} 9573 ins_short_branch(1); 9574 ins_avoid_back_to_back(1); 9575 ins_pipe(cbcond_reg_reg); 9576 %} 9577 9578 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9579 match(If cmp (CmpL op1 op2)); 9580 predicate(UseCBCond); 9581 effect(USE labl, KILL xcc); 9582 9583 size(4); 9584 ins_cost(BRANCH_COST); 9585 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9586 ins_encode %{ 9587 Label* L = $labl$$label; 9588 assert(__ use_cbcond(*L), "back to back cbcond"); 9589 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9590 %} 9591 ins_short_branch(1); 9592 ins_avoid_back_to_back(1); 9593 ins_pipe(cbcond_reg_imm); 9594 %} 9595 9596 // Compare Pointers and branch 9597 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9598 match(If cmp (CmpP op1 op2)); 9599 predicate(UseCBCond); 9600 effect(USE labl, KILL pcc); 9601 9602 size(4); 9603 ins_cost(BRANCH_COST); 9604 #ifdef _LP64 9605 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} 9606 #else 9607 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %} 9608 #endif 9609 ins_encode %{ 9610 Label* L = $labl$$label; 9611 assert(__ use_cbcond(*L), "back to back cbcond"); 9612 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L); 9613 %} 9614 ins_short_branch(1); 9615 ins_avoid_back_to_back(1); 9616 ins_pipe(cbcond_reg_reg); 9617 %} 9618 9619 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9620 match(If cmp (CmpP op1 null)); 9621 predicate(UseCBCond); 9622 effect(USE labl, KILL pcc); 9623 9624 size(4); 9625 ins_cost(BRANCH_COST); 9626 #ifdef _LP64 9627 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} 9628 #else 9629 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %} 9630 #endif 9631 ins_encode %{ 9632 Label* L = $labl$$label; 9633 assert(__ use_cbcond(*L), "back to back cbcond"); 9634 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L); 9635 %} 9636 ins_short_branch(1); 9637 ins_avoid_back_to_back(1); 9638 ins_pipe(cbcond_reg_reg); 9639 %} 9640 9641 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9642 match(If cmp (CmpN op1 op2)); 9643 predicate(UseCBCond); 9644 effect(USE labl, KILL icc); 9645 9646 size(4); 9647 ins_cost(BRANCH_COST); 9648 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %} 9649 ins_encode %{ 9650 Label* L = $labl$$label; 9651 assert(__ use_cbcond(*L), "back to back cbcond"); 9652 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9653 %} 9654 ins_short_branch(1); 9655 ins_avoid_back_to_back(1); 9656 ins_pipe(cbcond_reg_reg); 9657 %} 9658 9659 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9660 match(If cmp (CmpN op1 null)); 9661 predicate(UseCBCond); 9662 effect(USE labl, KILL icc); 9663 9664 size(4); 9665 ins_cost(BRANCH_COST); 9666 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %} 9667 ins_encode %{ 9668 Label* L = $labl$$label; 9669 assert(__ use_cbcond(*L), "back to back cbcond"); 9670 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L); 9671 %} 9672 ins_short_branch(1); 9673 ins_avoid_back_to_back(1); 9674 ins_pipe(cbcond_reg_reg); 9675 %} 9676 9677 // Loop back branch 9678 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9679 match(CountedLoopEnd cmp (CmpI op1 op2)); 9680 predicate(UseCBCond); 9681 effect(USE labl, KILL icc); 9682 9683 size(4); 9684 ins_cost(BRANCH_COST); 9685 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9686 ins_encode %{ 9687 Label* L = $labl$$label; 9688 assert(__ use_cbcond(*L), "back to back cbcond"); 9689 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9690 %} 9691 ins_short_branch(1); 9692 ins_avoid_back_to_back(1); 9693 ins_pipe(cbcond_reg_reg); 9694 %} 9695 9696 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9697 match(CountedLoopEnd cmp (CmpI op1 op2)); 9698 predicate(UseCBCond); 9699 effect(USE labl, KILL icc); 9700 9701 size(4); 9702 ins_cost(BRANCH_COST); 9703 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9704 ins_encode %{ 9705 Label* L = $labl$$label; 9706 assert(__ use_cbcond(*L), "back to back cbcond"); 9707 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9708 %} 9709 ins_short_branch(1); 9710 ins_avoid_back_to_back(1); 9711 ins_pipe(cbcond_reg_imm); 9712 %} 9713 9714 // Branch-on-register tests all 64 bits. We assume that values 9715 // in 64-bit registers always remains zero or sign extended 9716 // unless our code munges the high bits. Interrupts can chop 9717 // the high order bits to zero or sign at any time. 9718 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ 9719 match(If cmp (CmpI op1 zero)); 9720 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9721 effect(USE labl); 9722 9723 size(8); 9724 ins_cost(BRANCH_COST); 9725 format %{ "BR$cmp $op1,$labl" %} 9726 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9727 ins_pipe(br_reg); 9728 %} 9729 9730 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{ 9731 match(If cmp (CmpP op1 null)); 9732 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9733 effect(USE labl); 9734 9735 size(8); 9736 ins_cost(BRANCH_COST); 9737 format %{ "BR$cmp $op1,$labl" %} 9738 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9739 ins_pipe(br_reg); 9740 %} 9741 9742 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{ 9743 match(If cmp (CmpL op1 zero)); 9744 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9745 effect(USE labl); 9746 9747 size(8); 9748 ins_cost(BRANCH_COST); 9749 format %{ "BR$cmp $op1,$labl" %} 9750 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9751 ins_pipe(br_reg); 9752 %} 9753 9754 9755 // ============================================================================ 9756 // Long Compare 9757 // 9758 // Currently we hold longs in 2 registers. Comparing such values efficiently 9759 // is tricky. The flavor of compare used depends on whether we are testing 9760 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit. 9761 // The GE test is the negated LT test. The LE test can be had by commuting 9762 // the operands (yielding a GE test) and then negating; negate again for the 9763 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the 9764 // NE test is negated from that. 9765 9766 // Due to a shortcoming in the ADLC, it mixes up expressions like: 9767 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the 9768 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections 9769 // are collapsed internally in the ADLC's dfa-gen code. The match for 9770 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the 9771 // foo match ends up with the wrong leaf. One fix is to not match both 9772 // reg-reg and reg-zero forms of long-compare. This is unfortunate because 9773 // both forms beat the trinary form of long-compare and both are very useful 9774 // on Intel which has so few registers. 9775 9776 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ 9777 match(If cmp xcc); 9778 effect(USE labl); 9779 9780 size(8); 9781 ins_cost(BRANCH_COST); 9782 format %{ "BP$cmp $xcc,$labl" %} 9783 ins_encode %{ 9784 Label* L = $labl$$label; 9785 Assembler::Predict predict_taken = 9786 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9787 9788 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9789 __ delayed()->nop(); 9790 %} 9791 ins_pipe(br_cc); 9792 %} 9793 9794 // Manifest a CmpL3 result in an integer register. Very painful. 9795 // This is the test to avoid. 9796 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{ 9797 match(Set dst (CmpL3 src1 src2) ); 9798 effect( KILL ccr ); 9799 ins_cost(6*DEFAULT_COST); 9800 size(24); 9801 format %{ "CMP $src1,$src2\t\t! long\n" 9802 "\tBLT,a,pn done\n" 9803 "\tMOV -1,$dst\t! delay slot\n" 9804 "\tBGT,a,pn done\n" 9805 "\tMOV 1,$dst\t! delay slot\n" 9806 "\tCLR $dst\n" 9807 "done:" %} 9808 ins_encode( cmpl_flag(src1,src2,dst) ); 9809 ins_pipe(cmpL_reg); 9810 %} 9811 9812 // Conditional move 9813 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{ 9814 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9815 ins_cost(150); 9816 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9817 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9818 ins_pipe(ialu_reg); 9819 %} 9820 9821 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{ 9822 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9823 ins_cost(140); 9824 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9825 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9826 ins_pipe(ialu_imm); 9827 %} 9828 9829 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{ 9830 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9831 ins_cost(150); 9832 format %{ "MOV$cmp $xcc,$src,$dst" %} 9833 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9834 ins_pipe(ialu_reg); 9835 %} 9836 9837 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{ 9838 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9839 ins_cost(140); 9840 format %{ "MOV$cmp $xcc,$src,$dst" %} 9841 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9842 ins_pipe(ialu_imm); 9843 %} 9844 9845 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ 9846 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); 9847 ins_cost(150); 9848 format %{ "MOV$cmp $xcc,$src,$dst" %} 9849 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9850 ins_pipe(ialu_reg); 9851 %} 9852 9853 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ 9854 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9855 ins_cost(150); 9856 format %{ "MOV$cmp $xcc,$src,$dst" %} 9857 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9858 ins_pipe(ialu_reg); 9859 %} 9860 9861 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{ 9862 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9863 ins_cost(140); 9864 format %{ "MOV$cmp $xcc,$src,$dst" %} 9865 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9866 ins_pipe(ialu_imm); 9867 %} 9868 9869 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{ 9870 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src))); 9871 ins_cost(150); 9872 opcode(0x101); 9873 format %{ "FMOVS$cmp $xcc,$src,$dst" %} 9874 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9875 ins_pipe(int_conditional_float_move); 9876 %} 9877 9878 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{ 9879 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src))); 9880 ins_cost(150); 9881 opcode(0x102); 9882 format %{ "FMOVD$cmp $xcc,$src,$dst" %} 9883 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9884 ins_pipe(int_conditional_float_move); 9885 %} 9886 9887 // ============================================================================ 9888 // Safepoint Instruction 9889 instruct safePoint_poll(iRegP poll) %{ 9890 match(SafePoint poll); 9891 effect(USE poll); 9892 9893 size(4); 9894 #ifdef _LP64 9895 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} 9896 #else 9897 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %} 9898 #endif 9899 ins_encode %{ 9900 __ relocate(relocInfo::poll_type); 9901 __ ld_ptr($poll$$Register, 0, G0); 9902 %} 9903 ins_pipe(loadPollP); 9904 %} 9905 9906 // ============================================================================ 9907 // Call Instructions 9908 // Call Java Static Instruction 9909 instruct CallStaticJavaDirect( method meth ) %{ 9910 match(CallStaticJava); 9911 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9912 effect(USE meth); 9913 9914 size(8); 9915 ins_cost(CALL_COST); 9916 format %{ "CALL,static ; NOP ==> " %} 9917 ins_encode( Java_Static_Call( meth ), call_epilog ); 9918 ins_pipe(simple_call); 9919 %} 9920 9921 // Call Java Static Instruction (method handle version) 9922 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ 9923 match(CallStaticJava); 9924 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9925 effect(USE meth, KILL l7_mh_SP_save); 9926 9927 size(16); 9928 ins_cost(CALL_COST); 9929 format %{ "CALL,static/MethodHandle" %} 9930 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); 9931 ins_pipe(simple_call); 9932 %} 9933 9934 // Call Java Dynamic Instruction 9935 instruct CallDynamicJavaDirect( method meth ) %{ 9936 match(CallDynamicJava); 9937 effect(USE meth); 9938 9939 ins_cost(CALL_COST); 9940 format %{ "SET (empty),R_G5\n\t" 9941 "CALL,dynamic ; NOP ==> " %} 9942 ins_encode( Java_Dynamic_Call( meth ), call_epilog ); 9943 ins_pipe(call); 9944 %} 9945 9946 // Call Runtime Instruction 9947 instruct CallRuntimeDirect(method meth, l7RegP l7) %{ 9948 match(CallRuntime); 9949 effect(USE meth, KILL l7); 9950 ins_cost(CALL_COST); 9951 format %{ "CALL,runtime" %} 9952 ins_encode( Java_To_Runtime( meth ), 9953 call_epilog, adjust_long_from_native_call ); 9954 ins_pipe(simple_call); 9955 %} 9956 9957 // Call runtime without safepoint - same as CallRuntime 9958 instruct CallLeafDirect(method meth, l7RegP l7) %{ 9959 match(CallLeaf); 9960 effect(USE meth, KILL l7); 9961 ins_cost(CALL_COST); 9962 format %{ "CALL,runtime leaf" %} 9963 ins_encode( Java_To_Runtime( meth ), 9964 call_epilog, 9965 adjust_long_from_native_call ); 9966 ins_pipe(simple_call); 9967 %} 9968 9969 // Call runtime without safepoint - same as CallLeaf 9970 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{ 9971 match(CallLeafNoFP); 9972 effect(USE meth, KILL l7); 9973 ins_cost(CALL_COST); 9974 format %{ "CALL,runtime leaf nofp" %} 9975 ins_encode( Java_To_Runtime( meth ), 9976 call_epilog, 9977 adjust_long_from_native_call ); 9978 ins_pipe(simple_call); 9979 %} 9980 9981 // Tail Call; Jump from runtime stub to Java code. 9982 // Also known as an 'interprocedural jump'. 9983 // Target of jump will eventually return to caller. 9984 // TailJump below removes the return address. 9985 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{ 9986 match(TailCall jump_target method_oop ); 9987 9988 ins_cost(CALL_COST); 9989 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %} 9990 ins_encode(form_jmpl(jump_target)); 9991 ins_pipe(tail_call); 9992 %} 9993 9994 9995 // Return Instruction 9996 instruct Ret() %{ 9997 match(Return); 9998 9999 // The epilogue node did the ret already. 10000 size(0); 10001 format %{ "! return" %} 10002 ins_encode(); 10003 ins_pipe(empty); 10004 %} 10005 10006 10007 // Tail Jump; remove the return address; jump to target. 10008 // TailCall above leaves the return address around. 10009 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 10010 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 10011 // "restore" before this instruction (in Epilogue), we need to materialize it 10012 // in %i0. 10013 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{ 10014 match( TailJump jump_target ex_oop ); 10015 ins_cost(CALL_COST); 10016 format %{ "! discard R_O7\n\t" 10017 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} 10018 ins_encode(form_jmpl_set_exception_pc(jump_target)); 10019 // opcode(Assembler::jmpl_op3, Assembler::arith_op); 10020 // The hack duplicates the exception oop into G3, so that CreateEx can use it there. 10021 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); 10022 ins_pipe(tail_call); 10023 %} 10024 10025 // Create exception oop: created by stack-crawling runtime code. 10026 // Created exception is now available to this handler, and is setup 10027 // just prior to jumping to this handler. No code emitted. 10028 instruct CreateException( o0RegP ex_oop ) 10029 %{ 10030 match(Set ex_oop (CreateEx)); 10031 ins_cost(0); 10032 10033 size(0); 10034 // use the following format syntax 10035 format %{ "! exception oop is in R_O0; no code emitted" %} 10036 ins_encode(); 10037 ins_pipe(empty); 10038 %} 10039 10040 10041 // Rethrow exception: 10042 // The exception oop will come in the first argument position. 10043 // Then JUMP (not call) to the rethrow stub code. 10044 instruct RethrowException() 10045 %{ 10046 match(Rethrow); 10047 ins_cost(CALL_COST); 10048 10049 // use the following format syntax 10050 format %{ "Jmp rethrow_stub" %} 10051 ins_encode(enc_rethrow); 10052 ins_pipe(tail_call); 10053 %} 10054 10055 10056 // Die now 10057 instruct ShouldNotReachHere( ) 10058 %{ 10059 match(Halt); 10060 ins_cost(CALL_COST); 10061 10062 size(4); 10063 // Use the following format syntax 10064 format %{ "ILLTRAP ; ShouldNotReachHere" %} 10065 ins_encode( form2_illtrap() ); 10066 ins_pipe(tail_call); 10067 %} 10068 10069 // ============================================================================ 10070 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 10071 // array for an instance of the superklass. Set a hidden internal cache on a 10072 // hit (cache is checked with exposed code in gen_subtype_check()). Return 10073 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 10074 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{ 10075 match(Set index (PartialSubtypeCheck sub super)); 10076 effect( KILL pcc, KILL o7 ); 10077 ins_cost(DEFAULT_COST*10); 10078 format %{ "CALL PartialSubtypeCheck\n\tNOP" %} 10079 ins_encode( enc_PartialSubtypeCheck() ); 10080 ins_pipe(partial_subtype_check_pipe); 10081 %} 10082 10083 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ 10084 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); 10085 effect( KILL idx, KILL o7 ); 10086 ins_cost(DEFAULT_COST*10); 10087 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %} 10088 ins_encode( enc_PartialSubtypeCheck() ); 10089 ins_pipe(partial_subtype_check_pipe); 10090 %} 10091 10092 10093 // ============================================================================ 10094 // inlined locking and unlocking 10095 10096 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10097 match(Set pcc (FastLock object box)); 10098 10099 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10100 ins_cost(100); 10101 10102 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10103 ins_encode( Fast_Lock(object, box, scratch, scratch2) ); 10104 ins_pipe(long_memory_op); 10105 %} 10106 10107 10108 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10109 match(Set pcc (FastUnlock object box)); 10110 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10111 ins_cost(100); 10112 10113 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10114 ins_encode( Fast_Unlock(object, box, scratch, scratch2) ); 10115 ins_pipe(long_memory_op); 10116 %} 10117 10118 // The encodings are generic. 10119 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ 10120 predicate(!use_block_zeroing(n->in(2)) ); 10121 match(Set dummy (ClearArray cnt base)); 10122 effect(TEMP temp, KILL ccr); 10123 ins_cost(300); 10124 format %{ "MOV $cnt,$temp\n" 10125 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" 10126 " BRge loop\t\t! Clearing loop\n" 10127 " STX G0,[$base+$temp]\t! delay slot" %} 10128 10129 ins_encode %{ 10130 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 10131 Register nof_bytes_arg = $cnt$$Register; 10132 Register nof_bytes_tmp = $temp$$Register; 10133 Register base_pointer_arg = $base$$Register; 10134 10135 Label loop; 10136 __ mov(nof_bytes_arg, nof_bytes_tmp); 10137 10138 // Loop and clear, walking backwards through the array. 10139 // nof_bytes_tmp (if >0) is always the number of bytes to zero 10140 __ bind(loop); 10141 __ deccc(nof_bytes_tmp, 8); 10142 __ br(Assembler::greaterEqual, true, Assembler::pt, loop); 10143 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp); 10144 // %%%% this mini-loop must not cross a cache boundary! 10145 %} 10146 ins_pipe(long_memory_op); 10147 %} 10148 10149 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{ 10150 predicate(use_block_zeroing(n->in(2))); 10151 match(Set dummy (ClearArray cnt base)); 10152 effect(USE_KILL cnt, USE_KILL base, KILL ccr); 10153 ins_cost(300); 10154 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10155 10156 ins_encode %{ 10157 10158 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10159 Register to = $base$$Register; 10160 Register count = $cnt$$Register; 10161 10162 Label Ldone; 10163 __ nop(); // Separate short branches 10164 // Use BIS for zeroing (temp is not used). 10165 __ bis_zeroing(to, count, G0, Ldone); 10166 __ bind(Ldone); 10167 10168 %} 10169 ins_pipe(long_memory_op); 10170 %} 10171 10172 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{ 10173 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit)); 10174 match(Set dummy (ClearArray cnt base)); 10175 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr); 10176 ins_cost(300); 10177 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10178 10179 ins_encode %{ 10180 10181 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10182 Register to = $base$$Register; 10183 Register count = $cnt$$Register; 10184 Register temp = $tmp$$Register; 10185 10186 Label Ldone; 10187 __ nop(); // Separate short branches 10188 // Use BIS for zeroing 10189 __ bis_zeroing(to, count, temp, Ldone); 10190 __ bind(Ldone); 10191 10192 %} 10193 ins_pipe(long_memory_op); 10194 %} 10195 10196 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10197 o7RegI tmp, flagsReg ccr) %{ 10198 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10199 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10200 ins_cost(300); 10201 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10202 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) ); 10203 ins_pipe(long_memory_op); 10204 %} 10205 10206 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10207 o7RegI tmp, flagsReg ccr) %{ 10208 match(Set result (StrEquals (Binary str1 str2) cnt)); 10209 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10210 ins_cost(300); 10211 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %} 10212 ins_encode( enc_String_Equals(str1, str2, cnt, result) ); 10213 ins_pipe(long_memory_op); 10214 %} 10215 10216 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10217 o7RegI tmp2, flagsReg ccr) %{ 10218 match(Set result (AryEq ary1 ary2)); 10219 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10220 ins_cost(300); 10221 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10222 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result)); 10223 ins_pipe(long_memory_op); 10224 %} 10225 10226 10227 //---------- Zeros Count Instructions ------------------------------------------ 10228 10229 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{ 10230 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10231 match(Set dst (CountLeadingZerosI src)); 10232 effect(TEMP dst, TEMP tmp, KILL cr); 10233 10234 // x |= (x >> 1); 10235 // x |= (x >> 2); 10236 // x |= (x >> 4); 10237 // x |= (x >> 8); 10238 // x |= (x >> 16); 10239 // return (WORDBITS - popc(x)); 10240 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t" 10241 "SRL $src,0,$dst\t! 32-bit zero extend\n\t" 10242 "OR $dst,$tmp,$dst\n\t" 10243 "SRL $dst,2,$tmp\n\t" 10244 "OR $dst,$tmp,$dst\n\t" 10245 "SRL $dst,4,$tmp\n\t" 10246 "OR $dst,$tmp,$dst\n\t" 10247 "SRL $dst,8,$tmp\n\t" 10248 "OR $dst,$tmp,$dst\n\t" 10249 "SRL $dst,16,$tmp\n\t" 10250 "OR $dst,$tmp,$dst\n\t" 10251 "POPC $dst,$dst\n\t" 10252 "MOV 32,$tmp\n\t" 10253 "SUB $tmp,$dst,$dst" %} 10254 ins_encode %{ 10255 Register Rdst = $dst$$Register; 10256 Register Rsrc = $src$$Register; 10257 Register Rtmp = $tmp$$Register; 10258 __ srl(Rsrc, 1, Rtmp); 10259 __ srl(Rsrc, 0, Rdst); 10260 __ or3(Rdst, Rtmp, Rdst); 10261 __ srl(Rdst, 2, Rtmp); 10262 __ or3(Rdst, Rtmp, Rdst); 10263 __ srl(Rdst, 4, Rtmp); 10264 __ or3(Rdst, Rtmp, Rdst); 10265 __ srl(Rdst, 8, Rtmp); 10266 __ or3(Rdst, Rtmp, Rdst); 10267 __ srl(Rdst, 16, Rtmp); 10268 __ or3(Rdst, Rtmp, Rdst); 10269 __ popc(Rdst, Rdst); 10270 __ mov(BitsPerInt, Rtmp); 10271 __ sub(Rtmp, Rdst, Rdst); 10272 %} 10273 ins_pipe(ialu_reg); 10274 %} 10275 10276 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{ 10277 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10278 match(Set dst (CountLeadingZerosL src)); 10279 effect(TEMP dst, TEMP tmp, KILL cr); 10280 10281 // x |= (x >> 1); 10282 // x |= (x >> 2); 10283 // x |= (x >> 4); 10284 // x |= (x >> 8); 10285 // x |= (x >> 16); 10286 // x |= (x >> 32); 10287 // return (WORDBITS - popc(x)); 10288 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t" 10289 "OR $src,$tmp,$dst\n\t" 10290 "SRLX $dst,2,$tmp\n\t" 10291 "OR $dst,$tmp,$dst\n\t" 10292 "SRLX $dst,4,$tmp\n\t" 10293 "OR $dst,$tmp,$dst\n\t" 10294 "SRLX $dst,8,$tmp\n\t" 10295 "OR $dst,$tmp,$dst\n\t" 10296 "SRLX $dst,16,$tmp\n\t" 10297 "OR $dst,$tmp,$dst\n\t" 10298 "SRLX $dst,32,$tmp\n\t" 10299 "OR $dst,$tmp,$dst\n\t" 10300 "POPC $dst,$dst\n\t" 10301 "MOV 64,$tmp\n\t" 10302 "SUB $tmp,$dst,$dst" %} 10303 ins_encode %{ 10304 Register Rdst = $dst$$Register; 10305 Register Rsrc = $src$$Register; 10306 Register Rtmp = $tmp$$Register; 10307 __ srlx(Rsrc, 1, Rtmp); 10308 __ or3( Rsrc, Rtmp, Rdst); 10309 __ srlx(Rdst, 2, Rtmp); 10310 __ or3( Rdst, Rtmp, Rdst); 10311 __ srlx(Rdst, 4, Rtmp); 10312 __ or3( Rdst, Rtmp, Rdst); 10313 __ srlx(Rdst, 8, Rtmp); 10314 __ or3( Rdst, Rtmp, Rdst); 10315 __ srlx(Rdst, 16, Rtmp); 10316 __ or3( Rdst, Rtmp, Rdst); 10317 __ srlx(Rdst, 32, Rtmp); 10318 __ or3( Rdst, Rtmp, Rdst); 10319 __ popc(Rdst, Rdst); 10320 __ mov(BitsPerLong, Rtmp); 10321 __ sub(Rtmp, Rdst, Rdst); 10322 %} 10323 ins_pipe(ialu_reg); 10324 %} 10325 10326 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{ 10327 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10328 match(Set dst (CountTrailingZerosI src)); 10329 effect(TEMP dst, KILL cr); 10330 10331 // return popc(~x & (x - 1)); 10332 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t" 10333 "ANDN $dst,$src,$dst\n\t" 10334 "SRL $dst,R_G0,$dst\n\t" 10335 "POPC $dst,$dst" %} 10336 ins_encode %{ 10337 Register Rdst = $dst$$Register; 10338 Register Rsrc = $src$$Register; 10339 __ sub(Rsrc, 1, Rdst); 10340 __ andn(Rdst, Rsrc, Rdst); 10341 __ srl(Rdst, G0, Rdst); 10342 __ popc(Rdst, Rdst); 10343 %} 10344 ins_pipe(ialu_reg); 10345 %} 10346 10347 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{ 10348 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10349 match(Set dst (CountTrailingZerosL src)); 10350 effect(TEMP dst, KILL cr); 10351 10352 // return popc(~x & (x - 1)); 10353 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t" 10354 "ANDN $dst,$src,$dst\n\t" 10355 "POPC $dst,$dst" %} 10356 ins_encode %{ 10357 Register Rdst = $dst$$Register; 10358 Register Rsrc = $src$$Register; 10359 __ sub(Rsrc, 1, Rdst); 10360 __ andn(Rdst, Rsrc, Rdst); 10361 __ popc(Rdst, Rdst); 10362 %} 10363 ins_pipe(ialu_reg); 10364 %} 10365 10366 10367 //---------- Population Count Instructions ------------------------------------- 10368 10369 instruct popCountI(iRegIsafe dst, iRegI src) %{ 10370 predicate(UsePopCountInstruction); 10371 match(Set dst (PopCountI src)); 10372 10373 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t" 10374 "POPC $dst, $dst" %} 10375 ins_encode %{ 10376 __ srl($src$$Register, G0, $dst$$Register); 10377 __ popc($dst$$Register, $dst$$Register); 10378 %} 10379 ins_pipe(ialu_reg); 10380 %} 10381 10382 // Note: Long.bitCount(long) returns an int. 10383 instruct popCountL(iRegIsafe dst, iRegL src) %{ 10384 predicate(UsePopCountInstruction); 10385 match(Set dst (PopCountL src)); 10386 10387 format %{ "POPC $src, $dst" %} 10388 ins_encode %{ 10389 __ popc($src$$Register, $dst$$Register); 10390 %} 10391 ins_pipe(ialu_reg); 10392 %} 10393 10394 10395 // ============================================================================ 10396 //------------Bytes reverse-------------------------------------------------- 10397 10398 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ 10399 match(Set dst (ReverseBytesI src)); 10400 10401 // Op cost is artificially doubled to make sure that load or store 10402 // instructions are preferred over this one which requires a spill 10403 // onto a stack slot. 10404 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10405 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10406 10407 ins_encode %{ 10408 __ set($src$$disp + STACK_BIAS, O7); 10409 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10410 %} 10411 ins_pipe( iload_mem ); 10412 %} 10413 10414 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ 10415 match(Set dst (ReverseBytesL src)); 10416 10417 // Op cost is artificially doubled to make sure that load or store 10418 // instructions are preferred over this one which requires a spill 10419 // onto a stack slot. 10420 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10421 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10422 10423 ins_encode %{ 10424 __ set($src$$disp + STACK_BIAS, O7); 10425 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10426 %} 10427 ins_pipe( iload_mem ); 10428 %} 10429 10430 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ 10431 match(Set dst (ReverseBytesUS src)); 10432 10433 // Op cost is artificially doubled to make sure that load or store 10434 // instructions are preferred over this one which requires a spill 10435 // onto a stack slot. 10436 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10437 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} 10438 10439 ins_encode %{ 10440 // the value was spilled as an int so bias the load 10441 __ set($src$$disp + STACK_BIAS + 2, O7); 10442 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10443 %} 10444 ins_pipe( iload_mem ); 10445 %} 10446 10447 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ 10448 match(Set dst (ReverseBytesS src)); 10449 10450 // Op cost is artificially doubled to make sure that load or store 10451 // instructions are preferred over this one which requires a spill 10452 // onto a stack slot. 10453 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10454 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} 10455 10456 ins_encode %{ 10457 // the value was spilled as an int so bias the load 10458 __ set($src$$disp + STACK_BIAS + 2, O7); 10459 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10460 %} 10461 ins_pipe( iload_mem ); 10462 %} 10463 10464 // Load Integer reversed byte order 10465 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ 10466 match(Set dst (ReverseBytesI (LoadI src))); 10467 10468 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 10469 size(4); 10470 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10471 10472 ins_encode %{ 10473 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10474 %} 10475 ins_pipe(iload_mem); 10476 %} 10477 10478 // Load Long - aligned and reversed 10479 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ 10480 match(Set dst (ReverseBytesL (LoadL src))); 10481 10482 ins_cost(MEMORY_REF_COST); 10483 size(4); 10484 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10485 10486 ins_encode %{ 10487 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10488 %} 10489 ins_pipe(iload_mem); 10490 %} 10491 10492 // Load unsigned short / char reversed byte order 10493 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ 10494 match(Set dst (ReverseBytesUS (LoadUS src))); 10495 10496 ins_cost(MEMORY_REF_COST); 10497 size(4); 10498 format %{ "LDUHA $src, $dst\t!asi=primary_little" %} 10499 10500 ins_encode %{ 10501 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10502 %} 10503 ins_pipe(iload_mem); 10504 %} 10505 10506 // Load short reversed byte order 10507 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ 10508 match(Set dst (ReverseBytesS (LoadS src))); 10509 10510 ins_cost(MEMORY_REF_COST); 10511 size(4); 10512 format %{ "LDSHA $src, $dst\t!asi=primary_little" %} 10513 10514 ins_encode %{ 10515 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10516 %} 10517 ins_pipe(iload_mem); 10518 %} 10519 10520 // Store Integer reversed byte order 10521 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ 10522 match(Set dst (StoreI dst (ReverseBytesI src))); 10523 10524 ins_cost(MEMORY_REF_COST); 10525 size(4); 10526 format %{ "STWA $src, $dst\t!asi=primary_little" %} 10527 10528 ins_encode %{ 10529 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10530 %} 10531 ins_pipe(istore_mem_reg); 10532 %} 10533 10534 // Store Long reversed byte order 10535 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ 10536 match(Set dst (StoreL dst (ReverseBytesL src))); 10537 10538 ins_cost(MEMORY_REF_COST); 10539 size(4); 10540 format %{ "STXA $src, $dst\t!asi=primary_little" %} 10541 10542 ins_encode %{ 10543 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10544 %} 10545 ins_pipe(istore_mem_reg); 10546 %} 10547 10548 // Store unsighed short/char reversed byte order 10549 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ 10550 match(Set dst (StoreC dst (ReverseBytesUS src))); 10551 10552 ins_cost(MEMORY_REF_COST); 10553 size(4); 10554 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10555 10556 ins_encode %{ 10557 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10558 %} 10559 ins_pipe(istore_mem_reg); 10560 %} 10561 10562 // Store short reversed byte order 10563 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ 10564 match(Set dst (StoreC dst (ReverseBytesS src))); 10565 10566 ins_cost(MEMORY_REF_COST); 10567 size(4); 10568 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10569 10570 ins_encode %{ 10571 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10572 %} 10573 ins_pipe(istore_mem_reg); 10574 %} 10575 10576 // ====================VECTOR INSTRUCTIONS===================================== 10577 10578 // Load Aligned Packed values into a Double Register 10579 instruct loadV8(regD dst, memory mem) %{ 10580 predicate(n->as_LoadVector()->memory_size() == 8); 10581 match(Set dst (LoadVector mem)); 10582 ins_cost(MEMORY_REF_COST); 10583 size(4); 10584 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %} 10585 ins_encode %{ 10586 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg)); 10587 %} 10588 ins_pipe(floadD_mem); 10589 %} 10590 10591 // Store Vector in Double register to memory 10592 instruct storeV8(memory mem, regD src) %{ 10593 predicate(n->as_StoreVector()->memory_size() == 8); 10594 match(Set mem (StoreVector mem src)); 10595 ins_cost(MEMORY_REF_COST); 10596 size(4); 10597 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %} 10598 ins_encode %{ 10599 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address); 10600 %} 10601 ins_pipe(fstoreD_mem_reg); 10602 %} 10603 10604 // Store Zero into vector in memory 10605 instruct storeV8B_zero(memory mem, immI0 zero) %{ 10606 predicate(n->as_StoreVector()->memory_size() == 8); 10607 match(Set mem (StoreVector mem (ReplicateB zero))); 10608 ins_cost(MEMORY_REF_COST); 10609 size(4); 10610 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %} 10611 ins_encode %{ 10612 __ stx(G0, $mem$$Address); 10613 %} 10614 ins_pipe(fstoreD_mem_zero); 10615 %} 10616 10617 instruct storeV4S_zero(memory mem, immI0 zero) %{ 10618 predicate(n->as_StoreVector()->memory_size() == 8); 10619 match(Set mem (StoreVector mem (ReplicateS zero))); 10620 ins_cost(MEMORY_REF_COST); 10621 size(4); 10622 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %} 10623 ins_encode %{ 10624 __ stx(G0, $mem$$Address); 10625 %} 10626 ins_pipe(fstoreD_mem_zero); 10627 %} 10628 10629 instruct storeV2I_zero(memory mem, immI0 zero) %{ 10630 predicate(n->as_StoreVector()->memory_size() == 8); 10631 match(Set mem (StoreVector mem (ReplicateI zero))); 10632 ins_cost(MEMORY_REF_COST); 10633 size(4); 10634 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %} 10635 ins_encode %{ 10636 __ stx(G0, $mem$$Address); 10637 %} 10638 ins_pipe(fstoreD_mem_zero); 10639 %} 10640 10641 instruct storeV2F_zero(memory mem, immF0 zero) %{ 10642 predicate(n->as_StoreVector()->memory_size() == 8); 10643 match(Set mem (StoreVector mem (ReplicateF zero))); 10644 ins_cost(MEMORY_REF_COST); 10645 size(4); 10646 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %} 10647 ins_encode %{ 10648 __ stx(G0, $mem$$Address); 10649 %} 10650 ins_pipe(fstoreD_mem_zero); 10651 %} 10652 10653 // Replicate scalar to packed byte values into Double register 10654 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10655 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3); 10656 match(Set dst (ReplicateB src)); 10657 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10658 format %{ "SLLX $src,56,$tmp\n\t" 10659 "SRLX $tmp, 8,$tmp2\n\t" 10660 "OR $tmp,$tmp2,$tmp\n\t" 10661 "SRLX $tmp,16,$tmp2\n\t" 10662 "OR $tmp,$tmp2,$tmp\n\t" 10663 "SRLX $tmp,32,$tmp2\n\t" 10664 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10665 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10666 ins_encode %{ 10667 Register Rsrc = $src$$Register; 10668 Register Rtmp = $tmp$$Register; 10669 Register Rtmp2 = $tmp2$$Register; 10670 __ sllx(Rsrc, 56, Rtmp); 10671 __ srlx(Rtmp, 8, Rtmp2); 10672 __ or3 (Rtmp, Rtmp2, Rtmp); 10673 __ srlx(Rtmp, 16, Rtmp2); 10674 __ or3 (Rtmp, Rtmp2, Rtmp); 10675 __ srlx(Rtmp, 32, Rtmp2); 10676 __ or3 (Rtmp, Rtmp2, Rtmp); 10677 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10678 %} 10679 ins_pipe(ialu_reg); 10680 %} 10681 10682 // Replicate scalar to packed byte values into Double stack 10683 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10684 predicate(n->as_Vector()->length() == 8 && UseVIS < 3); 10685 match(Set dst (ReplicateB src)); 10686 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10687 format %{ "SLLX $src,56,$tmp\n\t" 10688 "SRLX $tmp, 8,$tmp2\n\t" 10689 "OR $tmp,$tmp2,$tmp\n\t" 10690 "SRLX $tmp,16,$tmp2\n\t" 10691 "OR $tmp,$tmp2,$tmp\n\t" 10692 "SRLX $tmp,32,$tmp2\n\t" 10693 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10694 "STX $tmp,$dst\t! regL to stkD" %} 10695 ins_encode %{ 10696 Register Rsrc = $src$$Register; 10697 Register Rtmp = $tmp$$Register; 10698 Register Rtmp2 = $tmp2$$Register; 10699 __ sllx(Rsrc, 56, Rtmp); 10700 __ srlx(Rtmp, 8, Rtmp2); 10701 __ or3 (Rtmp, Rtmp2, Rtmp); 10702 __ srlx(Rtmp, 16, Rtmp2); 10703 __ or3 (Rtmp, Rtmp2, Rtmp); 10704 __ srlx(Rtmp, 32, Rtmp2); 10705 __ or3 (Rtmp, Rtmp2, Rtmp); 10706 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10707 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10708 %} 10709 ins_pipe(ialu_reg); 10710 %} 10711 10712 // Replicate scalar constant to packed byte values in Double register 10713 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 10714 predicate(n->as_Vector()->length() == 8); 10715 match(Set dst (ReplicateB con)); 10716 effect(KILL tmp); 10717 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 10718 ins_encode %{ 10719 // XXX This is a quick fix for 6833573. 10720 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 10721 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 10722 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10723 %} 10724 ins_pipe(loadConFD); 10725 %} 10726 10727 // Replicate scalar to packed char/short values into Double register 10728 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10729 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3); 10730 match(Set dst (ReplicateS src)); 10731 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10732 format %{ "SLLX $src,48,$tmp\n\t" 10733 "SRLX $tmp,16,$tmp2\n\t" 10734 "OR $tmp,$tmp2,$tmp\n\t" 10735 "SRLX $tmp,32,$tmp2\n\t" 10736 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10737 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10738 ins_encode %{ 10739 Register Rsrc = $src$$Register; 10740 Register Rtmp = $tmp$$Register; 10741 Register Rtmp2 = $tmp2$$Register; 10742 __ sllx(Rsrc, 48, Rtmp); 10743 __ srlx(Rtmp, 16, Rtmp2); 10744 __ or3 (Rtmp, Rtmp2, Rtmp); 10745 __ srlx(Rtmp, 32, Rtmp2); 10746 __ or3 (Rtmp, Rtmp2, Rtmp); 10747 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10748 %} 10749 ins_pipe(ialu_reg); 10750 %} 10751 10752 // Replicate scalar to packed char/short values into Double stack 10753 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10754 predicate(n->as_Vector()->length() == 4 && UseVIS < 3); 10755 match(Set dst (ReplicateS src)); 10756 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10757 format %{ "SLLX $src,48,$tmp\n\t" 10758 "SRLX $tmp,16,$tmp2\n\t" 10759 "OR $tmp,$tmp2,$tmp\n\t" 10760 "SRLX $tmp,32,$tmp2\n\t" 10761 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10762 "STX $tmp,$dst\t! regL to stkD" %} 10763 ins_encode %{ 10764 Register Rsrc = $src$$Register; 10765 Register Rtmp = $tmp$$Register; 10766 Register Rtmp2 = $tmp2$$Register; 10767 __ sllx(Rsrc, 48, Rtmp); 10768 __ srlx(Rtmp, 16, Rtmp2); 10769 __ or3 (Rtmp, Rtmp2, Rtmp); 10770 __ srlx(Rtmp, 32, Rtmp2); 10771 __ or3 (Rtmp, Rtmp2, Rtmp); 10772 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10773 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10774 %} 10775 ins_pipe(ialu_reg); 10776 %} 10777 10778 // Replicate scalar constant to packed char/short values in Double register 10779 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 10780 predicate(n->as_Vector()->length() == 4); 10781 match(Set dst (ReplicateS con)); 10782 effect(KILL tmp); 10783 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 10784 ins_encode %{ 10785 // XXX This is a quick fix for 6833573. 10786 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 10787 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 10788 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10789 %} 10790 ins_pipe(loadConFD); 10791 %} 10792 10793 // Replicate scalar to packed int values into Double register 10794 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10795 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3); 10796 match(Set dst (ReplicateI src)); 10797 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10798 format %{ "SLLX $src,32,$tmp\n\t" 10799 "SRLX $tmp,32,$tmp2\n\t" 10800 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10801 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10802 ins_encode %{ 10803 Register Rsrc = $src$$Register; 10804 Register Rtmp = $tmp$$Register; 10805 Register Rtmp2 = $tmp2$$Register; 10806 __ sllx(Rsrc, 32, Rtmp); 10807 __ srlx(Rtmp, 32, Rtmp2); 10808 __ or3 (Rtmp, Rtmp2, Rtmp); 10809 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10810 %} 10811 ins_pipe(ialu_reg); 10812 %} 10813 10814 // Replicate scalar to packed int values into Double stack 10815 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10816 predicate(n->as_Vector()->length() == 2 && UseVIS < 3); 10817 match(Set dst (ReplicateI src)); 10818 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10819 format %{ "SLLX $src,32,$tmp\n\t" 10820 "SRLX $tmp,32,$tmp2\n\t" 10821 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10822 "STX $tmp,$dst\t! regL to stkD" %} 10823 ins_encode %{ 10824 Register Rsrc = $src$$Register; 10825 Register Rtmp = $tmp$$Register; 10826 Register Rtmp2 = $tmp2$$Register; 10827 __ sllx(Rsrc, 32, Rtmp); 10828 __ srlx(Rtmp, 32, Rtmp2); 10829 __ or3 (Rtmp, Rtmp2, Rtmp); 10830 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10831 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10832 %} 10833 ins_pipe(ialu_reg); 10834 %} 10835 10836 // Replicate scalar zero constant to packed int values in Double register 10837 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 10838 predicate(n->as_Vector()->length() == 2); 10839 match(Set dst (ReplicateI con)); 10840 effect(KILL tmp); 10841 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 10842 ins_encode %{ 10843 // XXX This is a quick fix for 6833573. 10844 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 10845 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 10846 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10847 %} 10848 ins_pipe(loadConFD); 10849 %} 10850 10851 // Replicate scalar to packed float values into Double stack 10852 instruct Repl2F_stk(stackSlotD dst, regF src) %{ 10853 predicate(n->as_Vector()->length() == 2); 10854 match(Set dst (ReplicateF src)); 10855 ins_cost(MEMORY_REF_COST*2); 10856 format %{ "STF $src,$dst.hi\t! packed2F\n\t" 10857 "STF $src,$dst.lo" %} 10858 opcode(Assembler::stf_op3); 10859 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src)); 10860 ins_pipe(fstoreF_stk_reg); 10861 %} 10862 10863 // Replicate scalar zero constant to packed float values in Double register 10864 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{ 10865 predicate(n->as_Vector()->length() == 2); 10866 match(Set dst (ReplicateF con)); 10867 effect(KILL tmp); 10868 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %} 10869 ins_encode %{ 10870 // XXX This is a quick fix for 6833573. 10871 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister); 10872 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register); 10873 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10874 %} 10875 ins_pipe(loadConFD); 10876 %} 10877 10878 //----------PEEPHOLE RULES----------------------------------------------------- 10879 // These must follow all instruction definitions as they use the names 10880 // defined in the instructions definitions. 10881 // 10882 // peepmatch ( root_instr_name [preceding_instruction]* ); 10883 // 10884 // peepconstraint %{ 10885 // (instruction_number.operand_name relational_op instruction_number.operand_name 10886 // [, ...] ); 10887 // // instruction numbers are zero-based using left to right order in peepmatch 10888 // 10889 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 10890 // // provide an instruction_number.operand_name for each operand that appears 10891 // // in the replacement instruction's match rule 10892 // 10893 // ---------VM FLAGS--------------------------------------------------------- 10894 // 10895 // All peephole optimizations can be turned off using -XX:-OptoPeephole 10896 // 10897 // Each peephole rule is given an identifying number starting with zero and 10898 // increasing by one in the order seen by the parser. An individual peephole 10899 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 10900 // on the command-line. 10901 // 10902 // ---------CURRENT LIMITATIONS---------------------------------------------- 10903 // 10904 // Only match adjacent instructions in same basic block 10905 // Only equality constraints 10906 // Only constraints between operands, not (0.dest_reg == EAX_enc) 10907 // Only one replacement instruction 10908 // 10909 // ---------EXAMPLE---------------------------------------------------------- 10910 // 10911 // // pertinent parts of existing instructions in architecture description 10912 // instruct movI(eRegI dst, eRegI src) %{ 10913 // match(Set dst (CopyI src)); 10914 // %} 10915 // 10916 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 10917 // match(Set dst (AddI dst src)); 10918 // effect(KILL cr); 10919 // %} 10920 // 10921 // // Change (inc mov) to lea 10922 // peephole %{ 10923 // // increment preceeded by register-register move 10924 // peepmatch ( incI_eReg movI ); 10925 // // require that the destination register of the increment 10926 // // match the destination register of the move 10927 // peepconstraint ( 0.dst == 1.dst ); 10928 // // construct a replacement instruction that sets 10929 // // the destination to ( move's source register + one ) 10930 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); 10931 // %} 10932 // 10933 10934 // // Change load of spilled value to only a spill 10935 // instruct storeI(memory mem, eRegI src) %{ 10936 // match(Set mem (StoreI mem src)); 10937 // %} 10938 // 10939 // instruct loadI(eRegI dst, memory mem) %{ 10940 // match(Set dst (LoadI mem)); 10941 // %} 10942 // 10943 // peephole %{ 10944 // peepmatch ( loadI storeI ); 10945 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 10946 // peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 10947 // %} 10948 10949 //----------SMARTSPILL RULES--------------------------------------------------- 10950 // These must follow all instruction definitions as they use the names 10951 // defined in the instructions definitions. 10952 // 10953 // SPARC will probably not have any of these rules due to RISC instruction set. 10954 10955 //----------PIPELINE----------------------------------------------------------- 10956 // Rules which define the behavior of the target architectures pipeline.